diff options
| -rw-r--r-- | Documentation/devicetree/bindings/iommu/arm,smmu.txt | 6 | ||||
| -rw-r--r-- | Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt | 26 | ||||
| -rw-r--r-- | arch/arm/mach-omap2/omap-iommu.c | 5 | ||||
| -rw-r--r-- | drivers/iommu/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu.c | 8 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_init.c | 16 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_types.h | 11 | ||||
| -rw-r--r-- | drivers/iommu/arm-smmu.c | 105 | ||||
| -rw-r--r-- | drivers/iommu/dmar.c | 513 | ||||
| -rw-r--r-- | drivers/iommu/intel-iommu.c | 1610 | ||||
| -rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 108 | ||||
| -rw-r--r-- | drivers/iommu/iova.c | 64 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu.c | 162 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu.h | 5 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu2.c | 3 | ||||
| -rw-r--r-- | include/acpi/actbl2.h | 15 | ||||
| -rw-r--r-- | include/linux/dmar.h | 82 | ||||
| -rw-r--r-- | include/linux/intel-iommu.h | 1 | ||||
| -rw-r--r-- | include/linux/iova.h | 2 |
19 files changed, 1754 insertions, 990 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index e34c6cdd8ba8..f284b99402bc 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
| @@ -48,6 +48,12 @@ conditions. | |||
| 48 | from the mmu-masters towards memory) node for this | 48 | from the mmu-masters towards memory) node for this |
| 49 | SMMU. | 49 | SMMU. |
| 50 | 50 | ||
| 51 | - calxeda,smmu-secure-config-access : Enable proper handling of buggy | ||
| 52 | implementations that always use secure access to | ||
| 53 | SMMU configuration registers. In this case non-secure | ||
| 54 | aliases of secure registers have to be used during | ||
| 55 | SMMU configuration. | ||
| 56 | |||
| 51 | Example: | 57 | Example: |
| 52 | 58 | ||
| 53 | smmu { | 59 | smmu { |
diff --git a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt new file mode 100644 index 000000000000..42531dc387aa --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | OMAP2+ IOMMU | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible : Should be one of, | ||
| 5 | "ti,omap2-iommu" for OMAP2/OMAP3 IOMMU instances | ||
| 6 | "ti,omap4-iommu" for OMAP4/OMAP5 IOMMU instances | ||
| 7 | "ti,dra7-iommu" for DRA7xx IOMMU instances | ||
| 8 | - ti,hwmods : Name of the hwmod associated with the IOMMU instance | ||
| 9 | - reg : Address space for the configuration registers | ||
| 10 | - interrupts : Interrupt specifier for the IOMMU instance | ||
| 11 | |||
| 12 | Optional properties: | ||
| 13 | - ti,#tlb-entries : Number of entries in the translation look-aside buffer. | ||
| 14 | Should be either 8 or 32 (default: 32) | ||
| 15 | - ti,iommu-bus-err-back : Indicates the IOMMU instance supports throwing | ||
| 16 | back a bus error response on MMU faults. | ||
| 17 | |||
| 18 | Example: | ||
| 19 | /* OMAP3 ISP MMU */ | ||
| 20 | mmu_isp: mmu@480bd400 { | ||
| 21 | compatible = "ti,omap2-iommu"; | ||
| 22 | reg = <0x480bd400 0x80>; | ||
| 23 | interrupts = <24>; | ||
| 24 | ti,hwmods = "mmu_isp"; | ||
| 25 | ti,#tlb-entries = <8>; | ||
| 26 | }; | ||
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index f6daae821ebb..f1fab5684a24 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/of.h> | ||
| 13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 15 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| @@ -58,6 +59,10 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused) | |||
| 58 | 59 | ||
| 59 | static int __init omap_iommu_init(void) | 60 | static int __init omap_iommu_init(void) |
| 60 | { | 61 | { |
| 62 | /* If dtb is there, the devices will be created dynamically */ | ||
| 63 | if (of_have_populated_dt()) | ||
| 64 | return -ENODEV; | ||
| 65 | |||
| 61 | return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); | 66 | return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); |
| 62 | } | 67 | } |
| 63 | /* must be ready before omap3isp is probed */ | 68 | /* must be ready before omap3isp is probed */ |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 79bbc21c1d01..df56e4c74a7e 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -207,7 +207,7 @@ config SHMOBILE_IOMMU | |||
| 207 | bool "IOMMU for Renesas IPMMU/IPMMUI" | 207 | bool "IOMMU for Renesas IPMMU/IPMMUI" |
| 208 | default n | 208 | default n |
| 209 | depends on ARM | 209 | depends on ARM |
| 210 | depends on SH_MOBILE || COMPILE_TEST | 210 | depends on ARCH_SHMOBILE || COMPILE_TEST |
| 211 | select IOMMU_API | 211 | select IOMMU_API |
| 212 | select ARM_DMA_USE_IOMMU | 212 | select ARM_DMA_USE_IOMMU |
| 213 | select SHMOBILE_IPMMU | 213 | select SHMOBILE_IPMMU |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index faf0da4bb3a2..c949520bd196 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -963,7 +963,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid, | |||
| 963 | 963 | ||
| 964 | address &= ~(0xfffULL); | 964 | address &= ~(0xfffULL); |
| 965 | 965 | ||
| 966 | cmd->data[0] = pasid & PASID_MASK; | 966 | cmd->data[0] = pasid; |
| 967 | cmd->data[1] = domid; | 967 | cmd->data[1] = domid; |
| 968 | cmd->data[2] = lower_32_bits(address); | 968 | cmd->data[2] = lower_32_bits(address); |
| 969 | cmd->data[3] = upper_32_bits(address); | 969 | cmd->data[3] = upper_32_bits(address); |
| @@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, | |||
| 982 | address &= ~(0xfffULL); | 982 | address &= ~(0xfffULL); |
| 983 | 983 | ||
| 984 | cmd->data[0] = devid; | 984 | cmd->data[0] = devid; |
| 985 | cmd->data[0] |= (pasid & 0xff) << 16; | 985 | cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; |
| 986 | cmd->data[0] |= (qdep & 0xff) << 24; | 986 | cmd->data[0] |= (qdep & 0xff) << 24; |
| 987 | cmd->data[1] = devid; | 987 | cmd->data[1] = devid; |
| 988 | cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; | 988 | cmd->data[1] |= (pasid & 0xff) << 16; |
| 989 | cmd->data[2] = lower_32_bits(address); | 989 | cmd->data[2] = lower_32_bits(address); |
| 990 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; | 990 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; |
| 991 | cmd->data[3] = upper_32_bits(address); | 991 | cmd->data[3] = upper_32_bits(address); |
| @@ -1001,7 +1001,7 @@ static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, | |||
| 1001 | 1001 | ||
| 1002 | cmd->data[0] = devid; | 1002 | cmd->data[0] = devid; |
| 1003 | if (gn) { | 1003 | if (gn) { |
| 1004 | cmd->data[1] = pasid & PASID_MASK; | 1004 | cmd->data[1] = pasid; |
| 1005 | cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; | 1005 | cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; |
| 1006 | } | 1006 | } |
| 1007 | cmd->data[3] = tag & 0x1ff; | 1007 | cmd->data[3] = tag & 0x1ff; |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 28b4bea7c109..b76c58dbe30c 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -150,7 +150,7 @@ int amd_iommus_present; | |||
| 150 | bool amd_iommu_np_cache __read_mostly; | 150 | bool amd_iommu_np_cache __read_mostly; |
| 151 | bool amd_iommu_iotlb_sup __read_mostly = true; | 151 | bool amd_iommu_iotlb_sup __read_mostly = true; |
| 152 | 152 | ||
| 153 | u32 amd_iommu_max_pasids __read_mostly = ~0; | 153 | u32 amd_iommu_max_pasid __read_mostly = ~0; |
| 154 | 154 | ||
| 155 | bool amd_iommu_v2_present __read_mostly; | 155 | bool amd_iommu_v2_present __read_mostly; |
| 156 | bool amd_iommu_pc_present __read_mostly; | 156 | bool amd_iommu_pc_present __read_mostly; |
| @@ -1231,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu) | |||
| 1231 | 1231 | ||
| 1232 | if (iommu_feature(iommu, FEATURE_GT)) { | 1232 | if (iommu_feature(iommu, FEATURE_GT)) { |
| 1233 | int glxval; | 1233 | int glxval; |
| 1234 | u32 pasids; | 1234 | u32 max_pasid; |
| 1235 | u64 shift; | 1235 | u64 pasmax; |
| 1236 | 1236 | ||
| 1237 | shift = iommu->features & FEATURE_PASID_MASK; | 1237 | pasmax = iommu->features & FEATURE_PASID_MASK; |
| 1238 | shift >>= FEATURE_PASID_SHIFT; | 1238 | pasmax >>= FEATURE_PASID_SHIFT; |
| 1239 | pasids = (1 << shift); | 1239 | max_pasid = (1 << (pasmax + 1)) - 1; |
| 1240 | 1240 | ||
| 1241 | amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); | 1241 | amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); |
| 1242 | |||
| 1243 | BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); | ||
| 1242 | 1244 | ||
| 1243 | glxval = iommu->features & FEATURE_GLXVAL_MASK; | 1245 | glxval = iommu->features & FEATURE_GLXVAL_MASK; |
| 1244 | glxval >>= FEATURE_GLXVAL_SHIFT; | 1246 | glxval >>= FEATURE_GLXVAL_SHIFT; |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index cff039df056e..f1a5abf11acf 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -99,7 +99,12 @@ | |||
| 99 | #define FEATURE_GLXVAL_SHIFT 14 | 99 | #define FEATURE_GLXVAL_SHIFT 14 |
| 100 | #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) | 100 | #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) |
| 101 | 101 | ||
| 102 | #define PASID_MASK 0x000fffff | 102 | /* Note: |
| 103 | * The current driver only support 16-bit PASID. | ||
| 104 | * Currently, hardware only implement upto 16-bit PASID | ||
| 105 | * even though the spec says it could have upto 20 bits. | ||
| 106 | */ | ||
| 107 | #define PASID_MASK 0x0000ffff | ||
| 103 | 108 | ||
| 104 | /* MMIO status bits */ | 109 | /* MMIO status bits */ |
| 105 | #define MMIO_STATUS_EVT_INT_MASK (1 << 1) | 110 | #define MMIO_STATUS_EVT_INT_MASK (1 << 1) |
| @@ -697,8 +702,8 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; | |||
| 697 | */ | 702 | */ |
| 698 | extern u32 amd_iommu_unmap_flush; | 703 | extern u32 amd_iommu_unmap_flush; |
| 699 | 704 | ||
| 700 | /* Smallest number of PASIDs supported by any IOMMU in the system */ | 705 | /* Smallest max PASID supported by any IOMMU in the system */ |
| 701 | extern u32 amd_iommu_max_pasids; | 706 | extern u32 amd_iommu_max_pasid; |
| 702 | 707 | ||
| 703 | extern bool amd_iommu_v2_present; | 708 | extern bool amd_iommu_v2_present; |
| 704 | 709 | ||
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1d9ab39af29f..8b89e33a89fe 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | #include <asm/pgalloc.h> | 48 | #include <asm/pgalloc.h> |
| 49 | 49 | ||
| 50 | /* Maximum number of stream IDs assigned to a single device */ | 50 | /* Maximum number of stream IDs assigned to a single device */ |
| 51 | #define MAX_MASTER_STREAMIDS 8 | 51 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
| 52 | 52 | ||
| 53 | /* Maximum number of context banks per SMMU */ | 53 | /* Maximum number of context banks per SMMU */ |
| 54 | #define ARM_SMMU_MAX_CBS 128 | 54 | #define ARM_SMMU_MAX_CBS 128 |
| @@ -60,6 +60,16 @@ | |||
| 60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | 60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) |
| 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) | 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) |
| 62 | 62 | ||
| 63 | /* | ||
| 64 | * SMMU global address space with conditional offset to access secure | ||
| 65 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | ||
| 66 | * nsGFSYNR0: 0x450) | ||
| 67 | */ | ||
| 68 | #define ARM_SMMU_GR0_NS(smmu) \ | ||
| 69 | ((smmu)->base + \ | ||
| 70 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | ||
| 71 | ? 0x400 : 0)) | ||
| 72 | |||
| 63 | /* Page table bits */ | 73 | /* Page table bits */ |
| 64 | #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) | 74 | #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) |
| 65 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) | 75 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) |
| @@ -351,6 +361,9 @@ struct arm_smmu_device { | |||
| 351 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | 361 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) |
| 352 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | 362 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) |
| 353 | u32 features; | 363 | u32 features; |
| 364 | |||
| 365 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | ||
| 366 | u32 options; | ||
| 354 | int version; | 367 | int version; |
| 355 | 368 | ||
| 356 | u32 num_context_banks; | 369 | u32 num_context_banks; |
| @@ -401,6 +414,29 @@ struct arm_smmu_domain { | |||
| 401 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | 414 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
| 402 | static LIST_HEAD(arm_smmu_devices); | 415 | static LIST_HEAD(arm_smmu_devices); |
| 403 | 416 | ||
| 417 | struct arm_smmu_option_prop { | ||
| 418 | u32 opt; | ||
| 419 | const char *prop; | ||
| 420 | }; | ||
| 421 | |||
| 422 | static struct arm_smmu_option_prop arm_smmu_options [] = { | ||
| 423 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, | ||
| 424 | { 0, NULL}, | ||
| 425 | }; | ||
| 426 | |||
| 427 | static void parse_driver_options(struct arm_smmu_device *smmu) | ||
| 428 | { | ||
| 429 | int i = 0; | ||
| 430 | do { | ||
| 431 | if (of_property_read_bool(smmu->dev->of_node, | ||
| 432 | arm_smmu_options[i].prop)) { | ||
| 433 | smmu->options |= arm_smmu_options[i].opt; | ||
| 434 | dev_notice(smmu->dev, "option %s\n", | ||
| 435 | arm_smmu_options[i].prop); | ||
| 436 | } | ||
| 437 | } while (arm_smmu_options[++i].opt); | ||
| 438 | } | ||
| 439 | |||
| 404 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | 440 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
| 405 | struct device_node *dev_node) | 441 | struct device_node *dev_node) |
| 406 | { | 442 | { |
| @@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |||
| 614 | { | 650 | { |
| 615 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | 651 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; |
| 616 | struct arm_smmu_device *smmu = dev; | 652 | struct arm_smmu_device *smmu = dev; |
| 617 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 653 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
| 618 | 654 | ||
| 619 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | 655 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); |
| 620 | if (!gfsr) | ||
| 621 | return IRQ_NONE; | ||
| 622 | |||
| 623 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | 656 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); |
| 624 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | 657 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); |
| 625 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | 658 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); |
| 626 | 659 | ||
| 660 | if (!gfsr) | ||
| 661 | return IRQ_NONE; | ||
| 662 | |||
| 627 | dev_err_ratelimited(smmu->dev, | 663 | dev_err_ratelimited(smmu->dev, |
| 628 | "Unexpected global fault, this could be serious\n"); | 664 | "Unexpected global fault, this could be serious\n"); |
| 629 | dev_err_ratelimited(smmu->dev, | 665 | dev_err_ratelimited(smmu->dev, |
| @@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, | |||
| 642 | 678 | ||
| 643 | /* Ensure new page tables are visible to the hardware walker */ | 679 | /* Ensure new page tables are visible to the hardware walker */ |
| 644 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | 680 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { |
| 645 | dsb(); | 681 | dsb(ishst); |
| 646 | } else { | 682 | } else { |
| 647 | /* | 683 | /* |
| 648 | * If the SMMU can't walk tables in the CPU caches, treat them | 684 | * If the SMMU can't walk tables in the CPU caches, treat them |
| @@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | |||
| 990 | 1026 | ||
| 991 | /* | 1027 | /* |
| 992 | * Recursively free the page tables for this domain. We don't | 1028 | * Recursively free the page tables for this domain. We don't |
| 993 | * care about speculative TLB filling, because the TLB will be | 1029 | * care about speculative TLB filling because the tables should |
| 994 | * nuked next time this context bank is re-allocated and no devices | 1030 | * not be active in any context bank at this point (SCTLR.M is 0). |
| 995 | * currently map to these tables. | ||
| 996 | */ | 1031 | */ |
| 997 | pgd = pgd_base; | 1032 | pgd = pgd_base; |
| 998 | for (i = 0; i < PTRS_PER_PGD; ++i) { | 1033 | for (i = 0; i < PTRS_PER_PGD; ++i) { |
| @@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | |||
| 1218 | 1253 | ||
| 1219 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | 1254 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, |
| 1220 | unsigned long addr, unsigned long end, | 1255 | unsigned long addr, unsigned long end, |
| 1221 | unsigned long pfn, int flags, int stage) | 1256 | unsigned long pfn, int prot, int stage) |
| 1222 | { | 1257 | { |
| 1223 | pte_t *pte, *start; | 1258 | pte_t *pte, *start; |
| 1224 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; | 1259 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; |
| @@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
| 1240 | 1275 | ||
| 1241 | if (stage == 1) { | 1276 | if (stage == 1) { |
| 1242 | pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; | 1277 | pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; |
| 1243 | if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) | 1278 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) |
| 1244 | pteval |= ARM_SMMU_PTE_AP_RDONLY; | 1279 | pteval |= ARM_SMMU_PTE_AP_RDONLY; |
| 1245 | 1280 | ||
| 1246 | if (flags & IOMMU_CACHE) | 1281 | if (prot & IOMMU_CACHE) |
| 1247 | pteval |= (MAIR_ATTR_IDX_CACHE << | 1282 | pteval |= (MAIR_ATTR_IDX_CACHE << |
| 1248 | ARM_SMMU_PTE_ATTRINDX_SHIFT); | 1283 | ARM_SMMU_PTE_ATTRINDX_SHIFT); |
| 1249 | } else { | 1284 | } else { |
| 1250 | pteval |= ARM_SMMU_PTE_HAP_FAULT; | 1285 | pteval |= ARM_SMMU_PTE_HAP_FAULT; |
| 1251 | if (flags & IOMMU_READ) | 1286 | if (prot & IOMMU_READ) |
| 1252 | pteval |= ARM_SMMU_PTE_HAP_READ; | 1287 | pteval |= ARM_SMMU_PTE_HAP_READ; |
| 1253 | if (flags & IOMMU_WRITE) | 1288 | if (prot & IOMMU_WRITE) |
| 1254 | pteval |= ARM_SMMU_PTE_HAP_WRITE; | 1289 | pteval |= ARM_SMMU_PTE_HAP_WRITE; |
| 1255 | if (flags & IOMMU_CACHE) | 1290 | if (prot & IOMMU_CACHE) |
| 1256 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; | 1291 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; |
| 1257 | else | 1292 | else |
| 1258 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | 1293 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; |
| 1259 | } | 1294 | } |
| 1260 | 1295 | ||
| 1261 | /* If no access, create a faulting entry to avoid TLB fills */ | 1296 | /* If no access, create a faulting entry to avoid TLB fills */ |
| 1262 | if (flags & IOMMU_EXEC) | 1297 | if (prot & IOMMU_EXEC) |
| 1263 | pteval &= ~ARM_SMMU_PTE_XN; | 1298 | pteval &= ~ARM_SMMU_PTE_XN; |
| 1264 | else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) | 1299 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
| 1265 | pteval &= ~ARM_SMMU_PTE_PAGE; | 1300 | pteval &= ~ARM_SMMU_PTE_PAGE; |
| 1266 | 1301 | ||
| 1267 | pteval |= ARM_SMMU_PTE_SH_IS; | 1302 | pteval |= ARM_SMMU_PTE_SH_IS; |
| @@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
| 1323 | 1358 | ||
| 1324 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | 1359 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, |
| 1325 | unsigned long addr, unsigned long end, | 1360 | unsigned long addr, unsigned long end, |
| 1326 | phys_addr_t phys, int flags, int stage) | 1361 | phys_addr_t phys, int prot, int stage) |
| 1327 | { | 1362 | { |
| 1328 | int ret; | 1363 | int ret; |
| 1329 | pmd_t *pmd; | 1364 | pmd_t *pmd; |
| @@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | |||
| 1347 | do { | 1382 | do { |
| 1348 | next = pmd_addr_end(addr, end); | 1383 | next = pmd_addr_end(addr, end); |
| 1349 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, | 1384 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, |
| 1350 | flags, stage); | 1385 | prot, stage); |
| 1351 | phys += next - addr; | 1386 | phys += next - addr; |
| 1352 | } while (pmd++, addr = next, addr < end); | 1387 | } while (pmd++, addr = next, addr < end); |
| 1353 | 1388 | ||
| @@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | |||
| 1356 | 1391 | ||
| 1357 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | 1392 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, |
| 1358 | unsigned long addr, unsigned long end, | 1393 | unsigned long addr, unsigned long end, |
| 1359 | phys_addr_t phys, int flags, int stage) | 1394 | phys_addr_t phys, int prot, int stage) |
| 1360 | { | 1395 | { |
| 1361 | int ret = 0; | 1396 | int ret = 0; |
| 1362 | pud_t *pud; | 1397 | pud_t *pud; |
| @@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | |||
| 1380 | do { | 1415 | do { |
| 1381 | next = pud_addr_end(addr, end); | 1416 | next = pud_addr_end(addr, end); |
| 1382 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, | 1417 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, |
| 1383 | flags, stage); | 1418 | prot, stage); |
| 1384 | phys += next - addr; | 1419 | phys += next - addr; |
| 1385 | } while (pud++, addr = next, addr < end); | 1420 | } while (pud++, addr = next, addr < end); |
| 1386 | 1421 | ||
| @@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | |||
| 1389 | 1424 | ||
| 1390 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | 1425 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, |
| 1391 | unsigned long iova, phys_addr_t paddr, | 1426 | unsigned long iova, phys_addr_t paddr, |
| 1392 | size_t size, int flags) | 1427 | size_t size, int prot) |
| 1393 | { | 1428 | { |
| 1394 | int ret, stage; | 1429 | int ret, stage; |
| 1395 | unsigned long end; | 1430 | unsigned long end; |
| @@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
| 1397 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1432 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; |
| 1398 | pgd_t *pgd = root_cfg->pgd; | 1433 | pgd_t *pgd = root_cfg->pgd; |
| 1399 | struct arm_smmu_device *smmu = root_cfg->smmu; | 1434 | struct arm_smmu_device *smmu = root_cfg->smmu; |
| 1400 | unsigned long irqflags; | 1435 | unsigned long flags; |
| 1401 | 1436 | ||
| 1402 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { | 1437 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { |
| 1403 | stage = 2; | 1438 | stage = 2; |
| @@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
| 1420 | if (paddr & ~output_mask) | 1455 | if (paddr & ~output_mask) |
| 1421 | return -ERANGE; | 1456 | return -ERANGE; |
| 1422 | 1457 | ||
| 1423 | spin_lock_irqsave(&smmu_domain->lock, irqflags); | 1458 | spin_lock_irqsave(&smmu_domain->lock, flags); |
| 1424 | pgd += pgd_index(iova); | 1459 | pgd += pgd_index(iova); |
| 1425 | end = iova + size; | 1460 | end = iova + size; |
| 1426 | do { | 1461 | do { |
| 1427 | unsigned long next = pgd_addr_end(iova, end); | 1462 | unsigned long next = pgd_addr_end(iova, end); |
| 1428 | 1463 | ||
| 1429 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, | 1464 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, |
| 1430 | flags, stage); | 1465 | prot, stage); |
| 1431 | if (ret) | 1466 | if (ret) |
| 1432 | goto out_unlock; | 1467 | goto out_unlock; |
| 1433 | 1468 | ||
| @@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
| 1436 | } while (pgd++, iova != end); | 1471 | } while (pgd++, iova != end); |
| 1437 | 1472 | ||
| 1438 | out_unlock: | 1473 | out_unlock: |
| 1439 | spin_unlock_irqrestore(&smmu_domain->lock, irqflags); | 1474 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
| 1440 | 1475 | ||
| 1441 | return ret; | 1476 | return ret; |
| 1442 | } | 1477 | } |
| 1443 | 1478 | ||
| 1444 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | 1479 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
| 1445 | phys_addr_t paddr, size_t size, int flags) | 1480 | phys_addr_t paddr, size_t size, int prot) |
| 1446 | { | 1481 | { |
| 1447 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1482 | struct arm_smmu_domain *smmu_domain = domain->priv; |
| 1448 | 1483 | ||
| @@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 1453 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) | 1488 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) |
| 1454 | return -ERANGE; | 1489 | return -ERANGE; |
| 1455 | 1490 | ||
| 1456 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); | 1491 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); |
| 1457 | } | 1492 | } |
| 1458 | 1493 | ||
| 1459 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | 1494 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
| @@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
| 1597 | int i = 0; | 1632 | int i = 0; |
| 1598 | u32 reg; | 1633 | u32 reg; |
| 1599 | 1634 | ||
| 1600 | /* Clear Global FSR */ | 1635 | /* clear global FSR */ |
| 1601 | reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | 1636 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
| 1602 | writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); | 1637 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
| 1603 | 1638 | ||
| 1604 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | 1639 | /* Mark all SMRn as invalid and all S2CRn as bypass */ |
| 1605 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1640 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
| @@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
| 1619 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); | 1654 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
| 1620 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | 1655 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); |
| 1621 | 1656 | ||
| 1622 | reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); | 1657 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
| 1623 | 1658 | ||
| 1624 | /* Enable fault reporting */ | 1659 | /* Enable fault reporting */ |
| 1625 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); | 1660 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
| @@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
| 1638 | 1673 | ||
| 1639 | /* Push the button */ | 1674 | /* Push the button */ |
| 1640 | arm_smmu_tlb_sync(smmu); | 1675 | arm_smmu_tlb_sync(smmu); |
| 1641 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); | 1676 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
| 1642 | } | 1677 | } |
| 1643 | 1678 | ||
| 1644 | static int arm_smmu_id_size_to_bits(int size) | 1679 | static int arm_smmu_id_size_to_bits(int size) |
| @@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
| 1885 | if (err) | 1920 | if (err) |
| 1886 | goto out_put_parent; | 1921 | goto out_put_parent; |
| 1887 | 1922 | ||
| 1923 | parse_driver_options(smmu); | ||
| 1924 | |||
| 1888 | if (smmu->version > 1 && | 1925 | if (smmu->version > 1 && |
| 1889 | smmu->num_context_banks != smmu->num_context_irqs) { | 1926 | smmu->num_context_banks != smmu->num_context_irqs) { |
| 1890 | dev_err(dev, | 1927 | dev_err(dev, |
| @@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) | |||
| 1969 | free_irq(smmu->irqs[i], smmu); | 2006 | free_irq(smmu->irqs[i], smmu); |
| 1970 | 2007 | ||
| 1971 | /* Turn the thing off */ | 2008 | /* Turn the thing off */ |
| 1972 | writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); | 2009 | writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
| 1973 | return 0; | 2010 | return 0; |
| 1974 | } | 2011 | } |
| 1975 | 2012 | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 158156543410..f445c10df8df 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
| @@ -43,14 +43,24 @@ | |||
| 43 | 43 | ||
| 44 | #include "irq_remapping.h" | 44 | #include "irq_remapping.h" |
| 45 | 45 | ||
| 46 | /* No locks are needed as DMA remapping hardware unit | 46 | /* |
| 47 | * list is constructed at boot time and hotplug of | 47 | * Assumptions: |
| 48 | * these units are not supported by the architecture. | 48 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added |
| 49 | * before IO devices managed by that unit. | ||
| 50 | * 2) The hotplug framework guarantees that DMAR unit will be hot-removed | ||
| 51 | * after IO devices managed by that unit. | ||
| 52 | * 3) Hotplug events are rare. | ||
| 53 | * | ||
| 54 | * Locking rules for DMA and interrupt remapping related global data structures: | ||
| 55 | * 1) Use dmar_global_lock in process context | ||
| 56 | * 2) Use RCU in interrupt context | ||
| 49 | */ | 57 | */ |
| 58 | DECLARE_RWSEM(dmar_global_lock); | ||
| 50 | LIST_HEAD(dmar_drhd_units); | 59 | LIST_HEAD(dmar_drhd_units); |
| 51 | 60 | ||
| 52 | struct acpi_table_header * __initdata dmar_tbl; | 61 | struct acpi_table_header * __initdata dmar_tbl; |
| 53 | static acpi_size dmar_tbl_size; | 62 | static acpi_size dmar_tbl_size; |
| 63 | static int dmar_dev_scope_status = 1; | ||
| 54 | 64 | ||
| 55 | static int alloc_iommu(struct dmar_drhd_unit *drhd); | 65 | static int alloc_iommu(struct dmar_drhd_unit *drhd); |
| 56 | static void free_iommu(struct intel_iommu *iommu); | 66 | static void free_iommu(struct intel_iommu *iommu); |
| @@ -62,73 +72,20 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |||
| 62 | * the very end. | 72 | * the very end. |
| 63 | */ | 73 | */ |
| 64 | if (drhd->include_all) | 74 | if (drhd->include_all) |
| 65 | list_add_tail(&drhd->list, &dmar_drhd_units); | 75 | list_add_tail_rcu(&drhd->list, &dmar_drhd_units); |
| 66 | else | 76 | else |
| 67 | list_add(&drhd->list, &dmar_drhd_units); | 77 | list_add_rcu(&drhd->list, &dmar_drhd_units); |
| 68 | } | 78 | } |
| 69 | 79 | ||
| 70 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | 80 | void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) |
| 71 | struct pci_dev **dev, u16 segment) | ||
| 72 | { | ||
| 73 | struct pci_bus *bus; | ||
| 74 | struct pci_dev *pdev = NULL; | ||
| 75 | struct acpi_dmar_pci_path *path; | ||
| 76 | int count; | ||
| 77 | |||
| 78 | bus = pci_find_bus(segment, scope->bus); | ||
| 79 | path = (struct acpi_dmar_pci_path *)(scope + 1); | ||
| 80 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | ||
| 81 | / sizeof(struct acpi_dmar_pci_path); | ||
| 82 | |||
| 83 | while (count) { | ||
| 84 | if (pdev) | ||
| 85 | pci_dev_put(pdev); | ||
| 86 | /* | ||
| 87 | * Some BIOSes list non-exist devices in DMAR table, just | ||
| 88 | * ignore it | ||
| 89 | */ | ||
| 90 | if (!bus) { | ||
| 91 | pr_warn("Device scope bus [%d] not found\n", scope->bus); | ||
| 92 | break; | ||
| 93 | } | ||
| 94 | pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function)); | ||
| 95 | if (!pdev) { | ||
| 96 | /* warning will be printed below */ | ||
| 97 | break; | ||
| 98 | } | ||
| 99 | path ++; | ||
| 100 | count --; | ||
| 101 | bus = pdev->subordinate; | ||
| 102 | } | ||
| 103 | if (!pdev) { | ||
| 104 | pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", | ||
| 105 | segment, scope->bus, path->device, path->function); | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | ||
| 109 | pdev->subordinate) || (scope->entry_type == \ | ||
| 110 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | ||
| 111 | pci_dev_put(pdev); | ||
| 112 | pr_warn("Device scope type does not match for %s\n", | ||
| 113 | pci_name(pdev)); | ||
| 114 | return -EINVAL; | ||
| 115 | } | ||
| 116 | *dev = pdev; | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | ||
| 121 | struct pci_dev ***devices, u16 segment) | ||
| 122 | { | 81 | { |
| 123 | struct acpi_dmar_device_scope *scope; | 82 | struct acpi_dmar_device_scope *scope; |
| 124 | void * tmp = start; | ||
| 125 | int index; | ||
| 126 | int ret; | ||
| 127 | 83 | ||
| 128 | *cnt = 0; | 84 | *cnt = 0; |
| 129 | while (start < end) { | 85 | while (start < end) { |
| 130 | scope = start; | 86 | scope = start; |
| 131 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 87 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI || |
| 88 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | ||
| 132 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | 89 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
| 133 | (*cnt)++; | 90 | (*cnt)++; |
| 134 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && | 91 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && |
| @@ -138,43 +95,236 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | |||
| 138 | start += scope->length; | 95 | start += scope->length; |
| 139 | } | 96 | } |
| 140 | if (*cnt == 0) | 97 | if (*cnt == 0) |
| 141 | return 0; | 98 | return NULL; |
| 142 | 99 | ||
| 143 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | 100 | return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL); |
| 144 | if (!*devices) | 101 | } |
| 145 | return -ENOMEM; | ||
| 146 | 102 | ||
| 147 | start = tmp; | 103 | void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt) |
| 148 | index = 0; | 104 | { |
| 149 | while (start < end) { | 105 | int i; |
| 106 | struct device *tmp_dev; | ||
| 107 | |||
| 108 | if (*devices && *cnt) { | ||
| 109 | for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) | ||
| 110 | put_device(tmp_dev); | ||
| 111 | kfree(*devices); | ||
| 112 | } | ||
| 113 | |||
| 114 | *devices = NULL; | ||
| 115 | *cnt = 0; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* Optimize out kzalloc()/kfree() for normal cases */ | ||
| 119 | static char dmar_pci_notify_info_buf[64]; | ||
| 120 | |||
| 121 | static struct dmar_pci_notify_info * | ||
| 122 | dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) | ||
| 123 | { | ||
| 124 | int level = 0; | ||
| 125 | size_t size; | ||
| 126 | struct pci_dev *tmp; | ||
| 127 | struct dmar_pci_notify_info *info; | ||
| 128 | |||
| 129 | BUG_ON(dev->is_virtfn); | ||
| 130 | |||
| 131 | /* Only generate path[] for device addition event */ | ||
| 132 | if (event == BUS_NOTIFY_ADD_DEVICE) | ||
| 133 | for (tmp = dev; tmp; tmp = tmp->bus->self) | ||
| 134 | level++; | ||
| 135 | |||
| 136 | size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); | ||
| 137 | if (size <= sizeof(dmar_pci_notify_info_buf)) { | ||
| 138 | info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; | ||
| 139 | } else { | ||
| 140 | info = kzalloc(size, GFP_KERNEL); | ||
| 141 | if (!info) { | ||
| 142 | pr_warn("Out of memory when allocating notify_info " | ||
| 143 | "for %s.\n", pci_name(dev)); | ||
| 144 | if (dmar_dev_scope_status == 0) | ||
| 145 | dmar_dev_scope_status = -ENOMEM; | ||
| 146 | return NULL; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | info->event = event; | ||
| 151 | info->dev = dev; | ||
| 152 | info->seg = pci_domain_nr(dev->bus); | ||
| 153 | info->level = level; | ||
| 154 | if (event == BUS_NOTIFY_ADD_DEVICE) { | ||
| 155 | for (tmp = dev, level--; tmp; tmp = tmp->bus->self) { | ||
| 156 | info->path[level].device = PCI_SLOT(tmp->devfn); | ||
| 157 | info->path[level].function = PCI_FUNC(tmp->devfn); | ||
| 158 | if (pci_is_root_bus(tmp->bus)) | ||
| 159 | info->bus = tmp->bus->number; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | return info; | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info) | ||
| 167 | { | ||
| 168 | if ((void *)info != dmar_pci_notify_info_buf) | ||
| 169 | kfree(info); | ||
| 170 | } | ||
| 171 | |||
| 172 | static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, | ||
| 173 | struct acpi_dmar_pci_path *path, int count) | ||
| 174 | { | ||
| 175 | int i; | ||
| 176 | |||
| 177 | if (info->bus != bus) | ||
| 178 | return false; | ||
| 179 | if (info->level != count) | ||
| 180 | return false; | ||
| 181 | |||
| 182 | for (i = 0; i < count; i++) { | ||
| 183 | if (path[i].device != info->path[i].device || | ||
| 184 | path[i].function != info->path[i].function) | ||
| 185 | return false; | ||
| 186 | } | ||
| 187 | |||
| 188 | return true; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ | ||
| 192 | int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, | ||
| 193 | void *start, void*end, u16 segment, | ||
| 194 | struct dmar_dev_scope *devices, | ||
| 195 | int devices_cnt) | ||
| 196 | { | ||
| 197 | int i, level; | ||
| 198 | struct device *tmp, *dev = &info->dev->dev; | ||
| 199 | struct acpi_dmar_device_scope *scope; | ||
| 200 | struct acpi_dmar_pci_path *path; | ||
| 201 | |||
| 202 | if (segment != info->seg) | ||
| 203 | return 0; | ||
| 204 | |||
| 205 | for (; start < end; start += scope->length) { | ||
| 150 | scope = start; | 206 | scope = start; |
| 151 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 207 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && |
| 152 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | 208 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
| 153 | ret = dmar_parse_one_dev_scope(scope, | 209 | continue; |
| 154 | &(*devices)[index], segment); | 210 | |
| 155 | if (ret) { | 211 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
| 156 | dmar_free_dev_scope(devices, cnt); | 212 | level = (scope->length - sizeof(*scope)) / sizeof(*path); |
| 157 | return ret; | 213 | if (!dmar_match_pci_path(info, scope->bus, path, level)) |
| 158 | } | 214 | continue; |
| 159 | index ++; | 215 | |
| 216 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^ | ||
| 217 | (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) { | ||
| 218 | pr_warn("Device scope type does not match for %s\n", | ||
| 219 | pci_name(info->dev)); | ||
| 220 | return -EINVAL; | ||
| 160 | } | 221 | } |
| 161 | start += scope->length; | 222 | |
| 223 | for_each_dev_scope(devices, devices_cnt, i, tmp) | ||
| 224 | if (tmp == NULL) { | ||
| 225 | devices[i].bus = info->dev->bus->number; | ||
| 226 | devices[i].devfn = info->dev->devfn; | ||
| 227 | rcu_assign_pointer(devices[i].dev, | ||
| 228 | get_device(dev)); | ||
| 229 | return 1; | ||
| 230 | } | ||
| 231 | BUG_ON(i >= devices_cnt); | ||
| 162 | } | 232 | } |
| 163 | 233 | ||
| 164 | return 0; | 234 | return 0; |
| 165 | } | 235 | } |
| 166 | 236 | ||
| 167 | void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt) | 237 | int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, |
| 238 | struct dmar_dev_scope *devices, int count) | ||
| 168 | { | 239 | { |
| 169 | if (*devices && *cnt) { | 240 | int index; |
| 170 | while (--*cnt >= 0) | 241 | struct device *tmp; |
| 171 | pci_dev_put((*devices)[*cnt]); | 242 | |
| 172 | kfree(*devices); | 243 | if (info->seg != segment) |
| 173 | *devices = NULL; | 244 | return 0; |
| 174 | *cnt = 0; | 245 | |
| 246 | for_each_active_dev_scope(devices, count, index, tmp) | ||
| 247 | if (tmp == &info->dev->dev) { | ||
| 248 | rcu_assign_pointer(devices[index].dev, NULL); | ||
| 249 | synchronize_rcu(); | ||
| 250 | put_device(tmp); | ||
| 251 | return 1; | ||
| 252 | } | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) | ||
| 258 | { | ||
| 259 | int ret = 0; | ||
| 260 | struct dmar_drhd_unit *dmaru; | ||
| 261 | struct acpi_dmar_hardware_unit *drhd; | ||
| 262 | |||
| 263 | for_each_drhd_unit(dmaru) { | ||
| 264 | if (dmaru->include_all) | ||
| 265 | continue; | ||
| 266 | |||
| 267 | drhd = container_of(dmaru->hdr, | ||
| 268 | struct acpi_dmar_hardware_unit, header); | ||
| 269 | ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), | ||
| 270 | ((void *)drhd) + drhd->header.length, | ||
| 271 | dmaru->segment, | ||
| 272 | dmaru->devices, dmaru->devices_cnt); | ||
| 273 | if (ret != 0) | ||
| 274 | break; | ||
| 175 | } | 275 | } |
| 276 | if (ret >= 0) | ||
| 277 | ret = dmar_iommu_notify_scope_dev(info); | ||
| 278 | if (ret < 0 && dmar_dev_scope_status == 0) | ||
| 279 | dmar_dev_scope_status = ret; | ||
| 280 | |||
| 281 | return ret; | ||
| 176 | } | 282 | } |
| 177 | 283 | ||
| 284 | static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info) | ||
| 285 | { | ||
| 286 | struct dmar_drhd_unit *dmaru; | ||
| 287 | |||
| 288 | for_each_drhd_unit(dmaru) | ||
| 289 | if (dmar_remove_dev_scope(info, dmaru->segment, | ||
| 290 | dmaru->devices, dmaru->devices_cnt)) | ||
| 291 | break; | ||
| 292 | dmar_iommu_notify_scope_dev(info); | ||
| 293 | } | ||
| 294 | |||
| 295 | static int dmar_pci_bus_notifier(struct notifier_block *nb, | ||
| 296 | unsigned long action, void *data) | ||
| 297 | { | ||
| 298 | struct pci_dev *pdev = to_pci_dev(data); | ||
| 299 | struct dmar_pci_notify_info *info; | ||
| 300 | |||
| 301 | /* Only care about add/remove events for physical functions */ | ||
| 302 | if (pdev->is_virtfn) | ||
| 303 | return NOTIFY_DONE; | ||
| 304 | if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) | ||
| 305 | return NOTIFY_DONE; | ||
| 306 | |||
| 307 | info = dmar_alloc_pci_notify_info(pdev, action); | ||
| 308 | if (!info) | ||
| 309 | return NOTIFY_DONE; | ||
| 310 | |||
| 311 | down_write(&dmar_global_lock); | ||
| 312 | if (action == BUS_NOTIFY_ADD_DEVICE) | ||
| 313 | dmar_pci_bus_add_dev(info); | ||
| 314 | else if (action == BUS_NOTIFY_DEL_DEVICE) | ||
| 315 | dmar_pci_bus_del_dev(info); | ||
| 316 | up_write(&dmar_global_lock); | ||
| 317 | |||
| 318 | dmar_free_pci_notify_info(info); | ||
| 319 | |||
| 320 | return NOTIFY_OK; | ||
| 321 | } | ||
| 322 | |||
| 323 | static struct notifier_block dmar_pci_bus_nb = { | ||
| 324 | .notifier_call = dmar_pci_bus_notifier, | ||
| 325 | .priority = INT_MIN, | ||
| 326 | }; | ||
| 327 | |||
| 178 | /** | 328 | /** |
| 179 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | 329 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition |
| 180 | * structure which uniquely represent one DMA remapping hardware unit | 330 | * structure which uniquely represent one DMA remapping hardware unit |
| @@ -196,9 +346,18 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
| 196 | dmaru->reg_base_addr = drhd->address; | 346 | dmaru->reg_base_addr = drhd->address; |
| 197 | dmaru->segment = drhd->segment; | 347 | dmaru->segment = drhd->segment; |
| 198 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 348 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
| 349 | dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1), | ||
| 350 | ((void *)drhd) + drhd->header.length, | ||
| 351 | &dmaru->devices_cnt); | ||
| 352 | if (dmaru->devices_cnt && dmaru->devices == NULL) { | ||
| 353 | kfree(dmaru); | ||
| 354 | return -ENOMEM; | ||
| 355 | } | ||
| 199 | 356 | ||
| 200 | ret = alloc_iommu(dmaru); | 357 | ret = alloc_iommu(dmaru); |
| 201 | if (ret) { | 358 | if (ret) { |
| 359 | dmar_free_dev_scope(&dmaru->devices, | ||
| 360 | &dmaru->devices_cnt); | ||
| 202 | kfree(dmaru); | 361 | kfree(dmaru); |
| 203 | return ret; | 362 | return ret; |
| 204 | } | 363 | } |
| @@ -215,19 +374,24 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) | |||
| 215 | kfree(dmaru); | 374 | kfree(dmaru); |
| 216 | } | 375 | } |
| 217 | 376 | ||
| 218 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | 377 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) |
| 219 | { | 378 | { |
| 220 | struct acpi_dmar_hardware_unit *drhd; | 379 | struct acpi_dmar_andd *andd = (void *)header; |
| 221 | 380 | ||
| 222 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | 381 | /* Check for NUL termination within the designated length */ |
| 223 | 382 | if (strnlen(andd->object_name, header->length - 8) == header->length - 8) { | |
| 224 | if (dmaru->include_all) | 383 | WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, |
| 225 | return 0; | 384 | "Your BIOS is broken; ANDD object name is not NUL-terminated\n" |
| 385 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 386 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 387 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 388 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 389 | return -EINVAL; | ||
| 390 | } | ||
| 391 | pr_info("ANDD device: %x name: %s\n", andd->device_number, | ||
| 392 | andd->object_name); | ||
| 226 | 393 | ||
| 227 | return dmar_parse_dev_scope((void *)(drhd + 1), | 394 | return 0; |
| 228 | ((void *)drhd) + drhd->header.length, | ||
| 229 | &dmaru->devices_cnt, &dmaru->devices, | ||
| 230 | drhd->segment); | ||
| 231 | } | 395 | } |
| 232 | 396 | ||
| 233 | #ifdef CONFIG_ACPI_NUMA | 397 | #ifdef CONFIG_ACPI_NUMA |
| @@ -293,6 +457,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
| 293 | (unsigned long long)rhsa->base_address, | 457 | (unsigned long long)rhsa->base_address, |
| 294 | rhsa->proximity_domain); | 458 | rhsa->proximity_domain); |
| 295 | break; | 459 | break; |
| 460 | case ACPI_DMAR_TYPE_ANDD: | ||
| 461 | /* We don't print this here because we need to sanity-check | ||
| 462 | it first. So print it in dmar_parse_one_andd() instead. */ | ||
| 463 | break; | ||
| 296 | } | 464 | } |
| 297 | } | 465 | } |
| 298 | 466 | ||
| @@ -378,6 +546,9 @@ parse_dmar_table(void) | |||
| 378 | ret = dmar_parse_one_rhsa(entry_header); | 546 | ret = dmar_parse_one_rhsa(entry_header); |
| 379 | #endif | 547 | #endif |
| 380 | break; | 548 | break; |
| 549 | case ACPI_DMAR_TYPE_ANDD: | ||
| 550 | ret = dmar_parse_one_andd(entry_header); | ||
| 551 | break; | ||
| 381 | default: | 552 | default: |
| 382 | pr_warn("Unknown DMAR structure type %d\n", | 553 | pr_warn("Unknown DMAR structure type %d\n", |
| 383 | entry_header->type); | 554 | entry_header->type); |
| @@ -394,14 +565,15 @@ parse_dmar_table(void) | |||
| 394 | return ret; | 565 | return ret; |
| 395 | } | 566 | } |
| 396 | 567 | ||
| 397 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | 568 | static int dmar_pci_device_match(struct dmar_dev_scope devices[], |
| 398 | struct pci_dev *dev) | 569 | int cnt, struct pci_dev *dev) |
| 399 | { | 570 | { |
| 400 | int index; | 571 | int index; |
| 572 | struct device *tmp; | ||
| 401 | 573 | ||
| 402 | while (dev) { | 574 | while (dev) { |
| 403 | for (index = 0; index < cnt; index++) | 575 | for_each_active_dev_scope(devices, cnt, index, tmp) |
| 404 | if (dev == devices[index]) | 576 | if (dev_is_pci(tmp) && dev == to_pci_dev(tmp)) |
| 405 | return 1; | 577 | return 1; |
| 406 | 578 | ||
| 407 | /* Check our parent */ | 579 | /* Check our parent */ |
| @@ -414,11 +586,12 @@ static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | |||
| 414 | struct dmar_drhd_unit * | 586 | struct dmar_drhd_unit * |
| 415 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | 587 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
| 416 | { | 588 | { |
| 417 | struct dmar_drhd_unit *dmaru = NULL; | 589 | struct dmar_drhd_unit *dmaru; |
| 418 | struct acpi_dmar_hardware_unit *drhd; | 590 | struct acpi_dmar_hardware_unit *drhd; |
| 419 | 591 | ||
| 420 | dev = pci_physfn(dev); | 592 | dev = pci_physfn(dev); |
| 421 | 593 | ||
| 594 | rcu_read_lock(); | ||
| 422 | for_each_drhd_unit(dmaru) { | 595 | for_each_drhd_unit(dmaru) { |
| 423 | drhd = container_of(dmaru->hdr, | 596 | drhd = container_of(dmaru->hdr, |
| 424 | struct acpi_dmar_hardware_unit, | 597 | struct acpi_dmar_hardware_unit, |
| @@ -426,44 +599,128 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) | |||
| 426 | 599 | ||
| 427 | if (dmaru->include_all && | 600 | if (dmaru->include_all && |
| 428 | drhd->segment == pci_domain_nr(dev->bus)) | 601 | drhd->segment == pci_domain_nr(dev->bus)) |
| 429 | return dmaru; | 602 | goto out; |
| 430 | 603 | ||
| 431 | if (dmar_pci_device_match(dmaru->devices, | 604 | if (dmar_pci_device_match(dmaru->devices, |
| 432 | dmaru->devices_cnt, dev)) | 605 | dmaru->devices_cnt, dev)) |
| 433 | return dmaru; | 606 | goto out; |
| 434 | } | 607 | } |
| 608 | dmaru = NULL; | ||
| 609 | out: | ||
| 610 | rcu_read_unlock(); | ||
| 435 | 611 | ||
| 436 | return NULL; | 612 | return dmaru; |
| 437 | } | 613 | } |
| 438 | 614 | ||
| 439 | int __init dmar_dev_scope_init(void) | 615 | static void __init dmar_acpi_insert_dev_scope(u8 device_number, |
| 616 | struct acpi_device *adev) | ||
| 440 | { | 617 | { |
| 441 | static int dmar_dev_scope_initialized; | 618 | struct dmar_drhd_unit *dmaru; |
| 442 | struct dmar_drhd_unit *drhd; | 619 | struct acpi_dmar_hardware_unit *drhd; |
| 443 | int ret = -ENODEV; | 620 | struct acpi_dmar_device_scope *scope; |
| 444 | 621 | struct device *tmp; | |
| 445 | if (dmar_dev_scope_initialized) | 622 | int i; |
| 446 | return dmar_dev_scope_initialized; | 623 | struct acpi_dmar_pci_path *path; |
| 447 | 624 | ||
| 448 | if (list_empty(&dmar_drhd_units)) | 625 | for_each_drhd_unit(dmaru) { |
| 449 | goto fail; | 626 | drhd = container_of(dmaru->hdr, |
| 627 | struct acpi_dmar_hardware_unit, | ||
| 628 | header); | ||
| 450 | 629 | ||
| 451 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | 630 | for (scope = (void *)(drhd + 1); |
| 452 | ret = dmar_parse_dev(drhd); | 631 | (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; |
| 453 | if (ret) | 632 | scope = ((void *)scope) + scope->length) { |
| 454 | goto fail; | 633 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI) |
| 634 | continue; | ||
| 635 | if (scope->enumeration_id != device_number) | ||
| 636 | continue; | ||
| 637 | |||
| 638 | path = (void *)(scope + 1); | ||
| 639 | pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", | ||
| 640 | dev_name(&adev->dev), dmaru->reg_base_addr, | ||
| 641 | scope->bus, path->device, path->function); | ||
| 642 | for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) | ||
| 643 | if (tmp == NULL) { | ||
| 644 | dmaru->devices[i].bus = scope->bus; | ||
| 645 | dmaru->devices[i].devfn = PCI_DEVFN(path->device, | ||
| 646 | path->function); | ||
| 647 | rcu_assign_pointer(dmaru->devices[i].dev, | ||
| 648 | get_device(&adev->dev)); | ||
| 649 | return; | ||
| 650 | } | ||
| 651 | BUG_ON(i >= dmaru->devices_cnt); | ||
| 652 | } | ||
| 455 | } | 653 | } |
| 654 | pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", | ||
| 655 | device_number, dev_name(&adev->dev)); | ||
| 656 | } | ||
| 456 | 657 | ||
| 457 | ret = dmar_parse_rmrr_atsr_dev(); | 658 | static int __init dmar_acpi_dev_scope_init(void) |
| 458 | if (ret) | 659 | { |
| 459 | goto fail; | 660 | struct acpi_dmar_andd *andd; |
| 661 | |||
| 662 | if (dmar_tbl == NULL) | ||
| 663 | return -ENODEV; | ||
| 460 | 664 | ||
| 461 | dmar_dev_scope_initialized = 1; | 665 | for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar); |
| 666 | ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length; | ||
| 667 | andd = ((void *)andd) + andd->header.length) { | ||
| 668 | if (andd->header.type == ACPI_DMAR_TYPE_ANDD) { | ||
| 669 | acpi_handle h; | ||
| 670 | struct acpi_device *adev; | ||
| 671 | |||
| 672 | if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, | ||
| 673 | andd->object_name, | ||
| 674 | &h))) { | ||
| 675 | pr_err("Failed to find handle for ACPI object %s\n", | ||
| 676 | andd->object_name); | ||
| 677 | continue; | ||
| 678 | } | ||
| 679 | acpi_bus_get_device(h, &adev); | ||
| 680 | if (!adev) { | ||
| 681 | pr_err("Failed to get device for ACPI object %s\n", | ||
| 682 | andd->object_name); | ||
| 683 | continue; | ||
| 684 | } | ||
| 685 | dmar_acpi_insert_dev_scope(andd->device_number, adev); | ||
| 686 | } | ||
| 687 | } | ||
| 462 | return 0; | 688 | return 0; |
| 689 | } | ||
| 463 | 690 | ||
| 464 | fail: | 691 | int __init dmar_dev_scope_init(void) |
| 465 | dmar_dev_scope_initialized = ret; | 692 | { |
| 466 | return ret; | 693 | struct pci_dev *dev = NULL; |
| 694 | struct dmar_pci_notify_info *info; | ||
| 695 | |||
| 696 | if (dmar_dev_scope_status != 1) | ||
| 697 | return dmar_dev_scope_status; | ||
| 698 | |||
| 699 | if (list_empty(&dmar_drhd_units)) { | ||
| 700 | dmar_dev_scope_status = -ENODEV; | ||
| 701 | } else { | ||
| 702 | dmar_dev_scope_status = 0; | ||
| 703 | |||
| 704 | dmar_acpi_dev_scope_init(); | ||
| 705 | |||
| 706 | for_each_pci_dev(dev) { | ||
| 707 | if (dev->is_virtfn) | ||
| 708 | continue; | ||
| 709 | |||
| 710 | info = dmar_alloc_pci_notify_info(dev, | ||
| 711 | BUS_NOTIFY_ADD_DEVICE); | ||
| 712 | if (!info) { | ||
| 713 | return dmar_dev_scope_status; | ||
| 714 | } else { | ||
| 715 | dmar_pci_bus_add_dev(info); | ||
| 716 | dmar_free_pci_notify_info(info); | ||
| 717 | } | ||
| 718 | } | ||
| 719 | |||
| 720 | bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb); | ||
| 721 | } | ||
| 722 | |||
| 723 | return dmar_dev_scope_status; | ||
| 467 | } | 724 | } |
| 468 | 725 | ||
| 469 | 726 | ||
| @@ -557,6 +814,7 @@ int __init detect_intel_iommu(void) | |||
| 557 | { | 814 | { |
| 558 | int ret; | 815 | int ret; |
| 559 | 816 | ||
| 817 | down_write(&dmar_global_lock); | ||
| 560 | ret = dmar_table_detect(); | 818 | ret = dmar_table_detect(); |
| 561 | if (ret) | 819 | if (ret) |
| 562 | ret = check_zero_address(); | 820 | ret = check_zero_address(); |
| @@ -574,6 +832,7 @@ int __init detect_intel_iommu(void) | |||
| 574 | } | 832 | } |
| 575 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); | 833 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); |
| 576 | dmar_tbl = NULL; | 834 | dmar_tbl = NULL; |
| 835 | up_write(&dmar_global_lock); | ||
| 577 | 836 | ||
| 578 | return ret ? 1 : -ENODEV; | 837 | return ret ? 1 : -ENODEV; |
| 579 | } | 838 | } |
| @@ -696,6 +955,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 696 | } | 955 | } |
| 697 | iommu->agaw = agaw; | 956 | iommu->agaw = agaw; |
| 698 | iommu->msagaw = msagaw; | 957 | iommu->msagaw = msagaw; |
| 958 | iommu->segment = drhd->segment; | ||
| 699 | 959 | ||
| 700 | iommu->node = -1; | 960 | iommu->node = -1; |
| 701 | 961 | ||
| @@ -1386,10 +1646,15 @@ static int __init dmar_free_unused_resources(void) | |||
| 1386 | if (irq_remapping_enabled || intel_iommu_enabled) | 1646 | if (irq_remapping_enabled || intel_iommu_enabled) |
| 1387 | return 0; | 1647 | return 0; |
| 1388 | 1648 | ||
| 1649 | if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) | ||
| 1650 | bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb); | ||
| 1651 | |||
| 1652 | down_write(&dmar_global_lock); | ||
| 1389 | list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { | 1653 | list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { |
| 1390 | list_del(&dmaru->list); | 1654 | list_del(&dmaru->list); |
| 1391 | dmar_free_drhd(dmaru); | 1655 | dmar_free_drhd(dmaru); |
| 1392 | } | 1656 | } |
| 1657 | up_write(&dmar_global_lock); | ||
| 1393 | 1658 | ||
| 1394 | return 0; | 1659 | return 0; |
| 1395 | } | 1660 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a22c86c867fa..69fa7da5e48b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. | 2 | * Copyright © 2006-2014 Intel Corporation. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
| @@ -10,15 +10,11 @@ | |||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. | 11 | * more details. |
| 12 | * | 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with | 13 | * Authors: David Woodhouse <dwmw2@infradead.org>, |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 14 | * Ashok Raj <ashok.raj@intel.com>, |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Shaohua Li <shaohua.li@intel.com>, |
| 16 | * | 16 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>, |
| 17 | * Copyright (C) 2006-2008 Intel Corporation | 17 | * Fenghua Yu <fenghua.yu@intel.com> |
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> | ||
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> | ||
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
| 21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
| 22 | */ | 18 | */ |
| 23 | 19 | ||
| 24 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| @@ -33,6 +29,7 @@ | |||
| 33 | #include <linux/dmar.h> | 29 | #include <linux/dmar.h> |
| 34 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
| 35 | #include <linux/mempool.h> | 31 | #include <linux/mempool.h> |
| 32 | #include <linux/memory.h> | ||
| 36 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
| 37 | #include <linux/iova.h> | 34 | #include <linux/iova.h> |
| 38 | #include <linux/iommu.h> | 35 | #include <linux/iommu.h> |
| @@ -372,14 +369,36 @@ struct dmar_domain { | |||
| 372 | struct device_domain_info { | 369 | struct device_domain_info { |
| 373 | struct list_head link; /* link to domain siblings */ | 370 | struct list_head link; /* link to domain siblings */ |
| 374 | struct list_head global; /* link to global list */ | 371 | struct list_head global; /* link to global list */ |
| 375 | int segment; /* PCI domain */ | ||
| 376 | u8 bus; /* PCI bus number */ | 372 | u8 bus; /* PCI bus number */ |
| 377 | u8 devfn; /* PCI devfn number */ | 373 | u8 devfn; /* PCI devfn number */ |
| 378 | struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ | 374 | struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ |
| 379 | struct intel_iommu *iommu; /* IOMMU used by this device */ | 375 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
| 380 | struct dmar_domain *domain; /* pointer to domain */ | 376 | struct dmar_domain *domain; /* pointer to domain */ |
| 381 | }; | 377 | }; |
| 382 | 378 | ||
| 379 | struct dmar_rmrr_unit { | ||
| 380 | struct list_head list; /* list of rmrr units */ | ||
| 381 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
| 382 | u64 base_address; /* reserved base address*/ | ||
| 383 | u64 end_address; /* reserved end address */ | ||
| 384 | struct dmar_dev_scope *devices; /* target devices */ | ||
| 385 | int devices_cnt; /* target device count */ | ||
| 386 | }; | ||
| 387 | |||
| 388 | struct dmar_atsr_unit { | ||
| 389 | struct list_head list; /* list of ATSR units */ | ||
| 390 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
| 391 | struct dmar_dev_scope *devices; /* target devices */ | ||
| 392 | int devices_cnt; /* target device count */ | ||
| 393 | u8 include_all:1; /* include all ports */ | ||
| 394 | }; | ||
| 395 | |||
| 396 | static LIST_HEAD(dmar_atsr_units); | ||
| 397 | static LIST_HEAD(dmar_rmrr_units); | ||
| 398 | |||
| 399 | #define for_each_rmrr_units(rmrr) \ | ||
| 400 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | ||
| 401 | |||
| 383 | static void flush_unmaps_timeout(unsigned long data); | 402 | static void flush_unmaps_timeout(unsigned long data); |
| 384 | 403 | ||
| 385 | static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 404 | static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
| @@ -389,6 +408,7 @@ struct deferred_flush_tables { | |||
| 389 | int next; | 408 | int next; |
| 390 | struct iova *iova[HIGH_WATER_MARK]; | 409 | struct iova *iova[HIGH_WATER_MARK]; |
| 391 | struct dmar_domain *domain[HIGH_WATER_MARK]; | 410 | struct dmar_domain *domain[HIGH_WATER_MARK]; |
| 411 | struct page *freelist[HIGH_WATER_MARK]; | ||
| 392 | }; | 412 | }; |
| 393 | 413 | ||
| 394 | static struct deferred_flush_tables *deferred_flush; | 414 | static struct deferred_flush_tables *deferred_flush; |
| @@ -402,7 +422,12 @@ static LIST_HEAD(unmaps_to_do); | |||
| 402 | static int timer_on; | 422 | static int timer_on; |
| 403 | static long list_size; | 423 | static long list_size; |
| 404 | 424 | ||
| 425 | static void domain_exit(struct dmar_domain *domain); | ||
| 405 | static void domain_remove_dev_info(struct dmar_domain *domain); | 426 | static void domain_remove_dev_info(struct dmar_domain *domain); |
| 427 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | ||
| 428 | struct device *dev); | ||
| 429 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | ||
| 430 | struct device *dev); | ||
| 406 | 431 | ||
| 407 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON | 432 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON |
| 408 | int dmar_disabled = 0; | 433 | int dmar_disabled = 0; |
| @@ -566,18 +591,31 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | |||
| 566 | 591 | ||
| 567 | static void domain_update_iommu_coherency(struct dmar_domain *domain) | 592 | static void domain_update_iommu_coherency(struct dmar_domain *domain) |
| 568 | { | 593 | { |
| 569 | int i; | 594 | struct dmar_drhd_unit *drhd; |
| 570 | 595 | struct intel_iommu *iommu; | |
| 571 | i = find_first_bit(domain->iommu_bmp, g_num_of_iommus); | 596 | int i, found = 0; |
| 572 | 597 | ||
| 573 | domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0; | 598 | domain->iommu_coherency = 1; |
| 574 | 599 | ||
| 575 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { | 600 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { |
| 601 | found = 1; | ||
| 576 | if (!ecap_coherent(g_iommus[i]->ecap)) { | 602 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
| 577 | domain->iommu_coherency = 0; | 603 | domain->iommu_coherency = 0; |
| 578 | break; | 604 | break; |
| 579 | } | 605 | } |
| 580 | } | 606 | } |
| 607 | if (found) | ||
| 608 | return; | ||
| 609 | |||
| 610 | /* No hardware attached; use lowest common denominator */ | ||
| 611 | rcu_read_lock(); | ||
| 612 | for_each_active_iommu(iommu, drhd) { | ||
| 613 | if (!ecap_coherent(iommu->ecap)) { | ||
| 614 | domain->iommu_coherency = 0; | ||
| 615 | break; | ||
| 616 | } | ||
| 617 | } | ||
| 618 | rcu_read_unlock(); | ||
| 581 | } | 619 | } |
| 582 | 620 | ||
| 583 | static void domain_update_iommu_snooping(struct dmar_domain *domain) | 621 | static void domain_update_iommu_snooping(struct dmar_domain *domain) |
| @@ -606,12 +644,15 @@ static void domain_update_iommu_superpage(struct dmar_domain *domain) | |||
| 606 | } | 644 | } |
| 607 | 645 | ||
| 608 | /* set iommu_superpage to the smallest common denominator */ | 646 | /* set iommu_superpage to the smallest common denominator */ |
| 647 | rcu_read_lock(); | ||
| 609 | for_each_active_iommu(iommu, drhd) { | 648 | for_each_active_iommu(iommu, drhd) { |
| 610 | mask &= cap_super_page_val(iommu->cap); | 649 | mask &= cap_super_page_val(iommu->cap); |
| 611 | if (!mask) { | 650 | if (!mask) { |
| 612 | break; | 651 | break; |
| 613 | } | 652 | } |
| 614 | } | 653 | } |
| 654 | rcu_read_unlock(); | ||
| 655 | |||
| 615 | domain->iommu_superpage = fls(mask); | 656 | domain->iommu_superpage = fls(mask); |
| 616 | } | 657 | } |
| 617 | 658 | ||
| @@ -623,32 +664,56 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) | |||
| 623 | domain_update_iommu_superpage(domain); | 664 | domain_update_iommu_superpage(domain); |
| 624 | } | 665 | } |
| 625 | 666 | ||
| 626 | static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) | 667 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) |
| 627 | { | 668 | { |
| 628 | struct dmar_drhd_unit *drhd = NULL; | 669 | struct dmar_drhd_unit *drhd = NULL; |
| 670 | struct intel_iommu *iommu; | ||
| 671 | struct device *tmp; | ||
| 672 | struct pci_dev *ptmp, *pdev = NULL; | ||
| 673 | u16 segment; | ||
| 629 | int i; | 674 | int i; |
| 630 | 675 | ||
| 631 | for_each_active_drhd_unit(drhd) { | 676 | if (dev_is_pci(dev)) { |
| 632 | if (segment != drhd->segment) | 677 | pdev = to_pci_dev(dev); |
| 678 | segment = pci_domain_nr(pdev->bus); | ||
| 679 | } else if (ACPI_COMPANION(dev)) | ||
| 680 | dev = &ACPI_COMPANION(dev)->dev; | ||
| 681 | |||
| 682 | rcu_read_lock(); | ||
| 683 | for_each_active_iommu(iommu, drhd) { | ||
| 684 | if (pdev && segment != drhd->segment) | ||
| 633 | continue; | 685 | continue; |
| 634 | 686 | ||
| 635 | for (i = 0; i < drhd->devices_cnt; i++) { | 687 | for_each_active_dev_scope(drhd->devices, |
| 636 | if (drhd->devices[i] && | 688 | drhd->devices_cnt, i, tmp) { |
| 637 | drhd->devices[i]->bus->number == bus && | 689 | if (tmp == dev) { |
| 638 | drhd->devices[i]->devfn == devfn) | 690 | *bus = drhd->devices[i].bus; |
| 639 | return drhd->iommu; | 691 | *devfn = drhd->devices[i].devfn; |
| 640 | if (drhd->devices[i] && | 692 | goto out; |
| 641 | drhd->devices[i]->subordinate && | 693 | } |
| 642 | drhd->devices[i]->subordinate->number <= bus && | 694 | |
| 643 | drhd->devices[i]->subordinate->busn_res.end >= bus) | 695 | if (!pdev || !dev_is_pci(tmp)) |
| 644 | return drhd->iommu; | 696 | continue; |
| 697 | |||
| 698 | ptmp = to_pci_dev(tmp); | ||
| 699 | if (ptmp->subordinate && | ||
| 700 | ptmp->subordinate->number <= pdev->bus->number && | ||
| 701 | ptmp->subordinate->busn_res.end >= pdev->bus->number) | ||
| 702 | goto got_pdev; | ||
| 645 | } | 703 | } |
| 646 | 704 | ||
| 647 | if (drhd->include_all) | 705 | if (pdev && drhd->include_all) { |
| 648 | return drhd->iommu; | 706 | got_pdev: |
| 707 | *bus = pdev->bus->number; | ||
| 708 | *devfn = pdev->devfn; | ||
| 709 | goto out; | ||
| 710 | } | ||
| 649 | } | 711 | } |
| 712 | iommu = NULL; | ||
| 713 | out: | ||
| 714 | rcu_read_unlock(); | ||
| 650 | 715 | ||
| 651 | return NULL; | 716 | return iommu; |
| 652 | } | 717 | } |
| 653 | 718 | ||
| 654 | static void domain_flush_cache(struct dmar_domain *domain, | 719 | static void domain_flush_cache(struct dmar_domain *domain, |
| @@ -748,7 +813,7 @@ out: | |||
| 748 | } | 813 | } |
| 749 | 814 | ||
| 750 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 815 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
| 751 | unsigned long pfn, int target_level) | 816 | unsigned long pfn, int *target_level) |
| 752 | { | 817 | { |
| 753 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 818 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
| 754 | struct dma_pte *parent, *pte = NULL; | 819 | struct dma_pte *parent, *pte = NULL; |
| @@ -763,14 +828,14 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
| 763 | 828 | ||
| 764 | parent = domain->pgd; | 829 | parent = domain->pgd; |
| 765 | 830 | ||
| 766 | while (level > 0) { | 831 | while (1) { |
| 767 | void *tmp_page; | 832 | void *tmp_page; |
| 768 | 833 | ||
| 769 | offset = pfn_level_offset(pfn, level); | 834 | offset = pfn_level_offset(pfn, level); |
| 770 | pte = &parent[offset]; | 835 | pte = &parent[offset]; |
| 771 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) | 836 | if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
| 772 | break; | 837 | break; |
| 773 | if (level == target_level) | 838 | if (level == *target_level) |
| 774 | break; | 839 | break; |
| 775 | 840 | ||
| 776 | if (!dma_pte_present(pte)) { | 841 | if (!dma_pte_present(pte)) { |
| @@ -791,10 +856,16 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
| 791 | domain_flush_cache(domain, pte, sizeof(*pte)); | 856 | domain_flush_cache(domain, pte, sizeof(*pte)); |
| 792 | } | 857 | } |
| 793 | } | 858 | } |
| 859 | if (level == 1) | ||
| 860 | break; | ||
| 861 | |||
| 794 | parent = phys_to_virt(dma_pte_addr(pte)); | 862 | parent = phys_to_virt(dma_pte_addr(pte)); |
| 795 | level--; | 863 | level--; |
| 796 | } | 864 | } |
| 797 | 865 | ||
| 866 | if (!*target_level) | ||
| 867 | *target_level = level; | ||
| 868 | |||
| 798 | return pte; | 869 | return pte; |
| 799 | } | 870 | } |
| 800 | 871 | ||
| @@ -832,7 +903,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
| 832 | } | 903 | } |
| 833 | 904 | ||
| 834 | /* clear last level pte, a tlb flush should be followed */ | 905 | /* clear last level pte, a tlb flush should be followed */ |
| 835 | static int dma_pte_clear_range(struct dmar_domain *domain, | 906 | static void dma_pte_clear_range(struct dmar_domain *domain, |
| 836 | unsigned long start_pfn, | 907 | unsigned long start_pfn, |
| 837 | unsigned long last_pfn) | 908 | unsigned long last_pfn) |
| 838 | { | 909 | { |
| @@ -862,8 +933,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain, | |||
| 862 | (void *)pte - (void *)first_pte); | 933 | (void *)pte - (void *)first_pte); |
| 863 | 934 | ||
| 864 | } while (start_pfn && start_pfn <= last_pfn); | 935 | } while (start_pfn && start_pfn <= last_pfn); |
| 865 | |||
| 866 | return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH); | ||
| 867 | } | 936 | } |
| 868 | 937 | ||
| 869 | static void dma_pte_free_level(struct dmar_domain *domain, int level, | 938 | static void dma_pte_free_level(struct dmar_domain *domain, int level, |
| @@ -921,6 +990,123 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
| 921 | } | 990 | } |
| 922 | } | 991 | } |
| 923 | 992 | ||
| 993 | /* When a page at a given level is being unlinked from its parent, we don't | ||
| 994 | need to *modify* it at all. All we need to do is make a list of all the | ||
| 995 | pages which can be freed just as soon as we've flushed the IOTLB and we | ||
| 996 | know the hardware page-walk will no longer touch them. | ||
| 997 | The 'pte' argument is the *parent* PTE, pointing to the page that is to | ||
| 998 | be freed. */ | ||
| 999 | static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, | ||
| 1000 | int level, struct dma_pte *pte, | ||
| 1001 | struct page *freelist) | ||
| 1002 | { | ||
| 1003 | struct page *pg; | ||
| 1004 | |||
| 1005 | pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); | ||
| 1006 | pg->freelist = freelist; | ||
| 1007 | freelist = pg; | ||
| 1008 | |||
| 1009 | if (level == 1) | ||
| 1010 | return freelist; | ||
| 1011 | |||
| 1012 | for (pte = page_address(pg); !first_pte_in_page(pte); pte++) { | ||
| 1013 | if (dma_pte_present(pte) && !dma_pte_superpage(pte)) | ||
| 1014 | freelist = dma_pte_list_pagetables(domain, level - 1, | ||
| 1015 | pte, freelist); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | return freelist; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, | ||
| 1022 | struct dma_pte *pte, unsigned long pfn, | ||
| 1023 | unsigned long start_pfn, | ||
| 1024 | unsigned long last_pfn, | ||
| 1025 | struct page *freelist) | ||
| 1026 | { | ||
| 1027 | struct dma_pte *first_pte = NULL, *last_pte = NULL; | ||
| 1028 | |||
| 1029 | pfn = max(start_pfn, pfn); | ||
| 1030 | pte = &pte[pfn_level_offset(pfn, level)]; | ||
| 1031 | |||
| 1032 | do { | ||
| 1033 | unsigned long level_pfn; | ||
| 1034 | |||
| 1035 | if (!dma_pte_present(pte)) | ||
| 1036 | goto next; | ||
| 1037 | |||
| 1038 | level_pfn = pfn & level_mask(level); | ||
| 1039 | |||
| 1040 | /* If range covers entire pagetable, free it */ | ||
| 1041 | if (start_pfn <= level_pfn && | ||
| 1042 | last_pfn >= level_pfn + level_size(level) - 1) { | ||
| 1043 | /* These suborbinate page tables are going away entirely. Don't | ||
| 1044 | bother to clear them; we're just going to *free* them. */ | ||
| 1045 | if (level > 1 && !dma_pte_superpage(pte)) | ||
| 1046 | freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); | ||
| 1047 | |||
| 1048 | dma_clear_pte(pte); | ||
| 1049 | if (!first_pte) | ||
| 1050 | first_pte = pte; | ||
| 1051 | last_pte = pte; | ||
| 1052 | } else if (level > 1) { | ||
| 1053 | /* Recurse down into a level that isn't *entirely* obsolete */ | ||
| 1054 | freelist = dma_pte_clear_level(domain, level - 1, | ||
| 1055 | phys_to_virt(dma_pte_addr(pte)), | ||
| 1056 | level_pfn, start_pfn, last_pfn, | ||
| 1057 | freelist); | ||
| 1058 | } | ||
| 1059 | next: | ||
| 1060 | pfn += level_size(level); | ||
| 1061 | } while (!first_pte_in_page(++pte) && pfn <= last_pfn); | ||
| 1062 | |||
| 1063 | if (first_pte) | ||
| 1064 | domain_flush_cache(domain, first_pte, | ||
| 1065 | (void *)++last_pte - (void *)first_pte); | ||
| 1066 | |||
| 1067 | return freelist; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | /* We can't just free the pages because the IOMMU may still be walking | ||
| 1071 | the page tables, and may have cached the intermediate levels. The | ||
| 1072 | pages can only be freed after the IOTLB flush has been done. */ | ||
| 1073 | struct page *domain_unmap(struct dmar_domain *domain, | ||
| 1074 | unsigned long start_pfn, | ||
| 1075 | unsigned long last_pfn) | ||
| 1076 | { | ||
| 1077 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
| 1078 | struct page *freelist = NULL; | ||
| 1079 | |||
| 1080 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | ||
| 1081 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | ||
| 1082 | BUG_ON(start_pfn > last_pfn); | ||
| 1083 | |||
| 1084 | /* we don't need lock here; nobody else touches the iova range */ | ||
| 1085 | freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), | ||
| 1086 | domain->pgd, 0, start_pfn, last_pfn, NULL); | ||
| 1087 | |||
| 1088 | /* free pgd */ | ||
| 1089 | if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { | ||
| 1090 | struct page *pgd_page = virt_to_page(domain->pgd); | ||
| 1091 | pgd_page->freelist = freelist; | ||
| 1092 | freelist = pgd_page; | ||
| 1093 | |||
| 1094 | domain->pgd = NULL; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | return freelist; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | void dma_free_pagelist(struct page *freelist) | ||
| 1101 | { | ||
| 1102 | struct page *pg; | ||
| 1103 | |||
| 1104 | while ((pg = freelist)) { | ||
| 1105 | freelist = pg->freelist; | ||
| 1106 | free_pgtable_page(page_address(pg)); | ||
| 1107 | } | ||
| 1108 | } | ||
| 1109 | |||
| 924 | /* iommu handling */ | 1110 | /* iommu handling */ |
| 925 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) | 1111 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) |
| 926 | { | 1112 | { |
| @@ -1030,7 +1216,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
| 1030 | break; | 1216 | break; |
| 1031 | case DMA_TLB_PSI_FLUSH: | 1217 | case DMA_TLB_PSI_FLUSH: |
| 1032 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); | 1218 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
| 1033 | /* Note: always flush non-leaf currently */ | 1219 | /* IH bit is passed in as part of address */ |
| 1034 | val_iva = size_order | addr; | 1220 | val_iva = size_order | addr; |
| 1035 | break; | 1221 | break; |
| 1036 | default: | 1222 | default: |
| @@ -1069,13 +1255,14 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
| 1069 | (unsigned long long)DMA_TLB_IAIG(val)); | 1255 | (unsigned long long)DMA_TLB_IAIG(val)); |
| 1070 | } | 1256 | } |
| 1071 | 1257 | ||
| 1072 | static struct device_domain_info *iommu_support_dev_iotlb( | 1258 | static struct device_domain_info * |
| 1073 | struct dmar_domain *domain, int segment, u8 bus, u8 devfn) | 1259 | iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, |
| 1260 | u8 bus, u8 devfn) | ||
| 1074 | { | 1261 | { |
| 1075 | int found = 0; | 1262 | int found = 0; |
| 1076 | unsigned long flags; | 1263 | unsigned long flags; |
| 1077 | struct device_domain_info *info; | 1264 | struct device_domain_info *info; |
| 1078 | struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); | 1265 | struct pci_dev *pdev; |
| 1079 | 1266 | ||
| 1080 | if (!ecap_dev_iotlb_support(iommu->ecap)) | 1267 | if (!ecap_dev_iotlb_support(iommu->ecap)) |
| 1081 | return NULL; | 1268 | return NULL; |
| @@ -1091,34 +1278,35 @@ static struct device_domain_info *iommu_support_dev_iotlb( | |||
| 1091 | } | 1278 | } |
| 1092 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1279 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1093 | 1280 | ||
| 1094 | if (!found || !info->dev) | 1281 | if (!found || !info->dev || !dev_is_pci(info->dev)) |
| 1095 | return NULL; | 1282 | return NULL; |
| 1096 | 1283 | ||
| 1097 | if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) | 1284 | pdev = to_pci_dev(info->dev); |
| 1098 | return NULL; | ||
| 1099 | 1285 | ||
| 1100 | if (!dmar_find_matched_atsr_unit(info->dev)) | 1286 | if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS)) |
| 1101 | return NULL; | 1287 | return NULL; |
| 1102 | 1288 | ||
| 1103 | info->iommu = iommu; | 1289 | if (!dmar_find_matched_atsr_unit(pdev)) |
| 1290 | return NULL; | ||
| 1104 | 1291 | ||
| 1105 | return info; | 1292 | return info; |
| 1106 | } | 1293 | } |
| 1107 | 1294 | ||
| 1108 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) | 1295 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
| 1109 | { | 1296 | { |
| 1110 | if (!info) | 1297 | if (!info || !dev_is_pci(info->dev)) |
| 1111 | return; | 1298 | return; |
| 1112 | 1299 | ||
| 1113 | pci_enable_ats(info->dev, VTD_PAGE_SHIFT); | 1300 | pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT); |
| 1114 | } | 1301 | } |
| 1115 | 1302 | ||
| 1116 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) | 1303 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) |
| 1117 | { | 1304 | { |
| 1118 | if (!info->dev || !pci_ats_enabled(info->dev)) | 1305 | if (!info->dev || !dev_is_pci(info->dev) || |
| 1306 | !pci_ats_enabled(to_pci_dev(info->dev))) | ||
| 1119 | return; | 1307 | return; |
| 1120 | 1308 | ||
| 1121 | pci_disable_ats(info->dev); | 1309 | pci_disable_ats(to_pci_dev(info->dev)); |
| 1122 | } | 1310 | } |
| 1123 | 1311 | ||
| 1124 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | 1312 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, |
| @@ -1130,24 +1318,31 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | |||
| 1130 | 1318 | ||
| 1131 | spin_lock_irqsave(&device_domain_lock, flags); | 1319 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1132 | list_for_each_entry(info, &domain->devices, link) { | 1320 | list_for_each_entry(info, &domain->devices, link) { |
| 1133 | if (!info->dev || !pci_ats_enabled(info->dev)) | 1321 | struct pci_dev *pdev; |
| 1322 | if (!info->dev || !dev_is_pci(info->dev)) | ||
| 1323 | continue; | ||
| 1324 | |||
| 1325 | pdev = to_pci_dev(info->dev); | ||
| 1326 | if (!pci_ats_enabled(pdev)) | ||
| 1134 | continue; | 1327 | continue; |
| 1135 | 1328 | ||
| 1136 | sid = info->bus << 8 | info->devfn; | 1329 | sid = info->bus << 8 | info->devfn; |
| 1137 | qdep = pci_ats_queue_depth(info->dev); | 1330 | qdep = pci_ats_queue_depth(pdev); |
| 1138 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); | 1331 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); |
| 1139 | } | 1332 | } |
| 1140 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1333 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1141 | } | 1334 | } |
| 1142 | 1335 | ||
| 1143 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 1336 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
| 1144 | unsigned long pfn, unsigned int pages, int map) | 1337 | unsigned long pfn, unsigned int pages, int ih, int map) |
| 1145 | { | 1338 | { |
| 1146 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); | 1339 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
| 1147 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; | 1340 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; |
| 1148 | 1341 | ||
| 1149 | BUG_ON(pages == 0); | 1342 | BUG_ON(pages == 0); |
| 1150 | 1343 | ||
| 1344 | if (ih) | ||
| 1345 | ih = 1 << 6; | ||
| 1151 | /* | 1346 | /* |
| 1152 | * Fallback to domain selective flush if no PSI support or the size is | 1347 | * Fallback to domain selective flush if no PSI support or the size is |
| 1153 | * too big. | 1348 | * too big. |
| @@ -1158,7 +1353,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
| 1158 | iommu->flush.flush_iotlb(iommu, did, 0, 0, | 1353 | iommu->flush.flush_iotlb(iommu, did, 0, 0, |
| 1159 | DMA_TLB_DSI_FLUSH); | 1354 | DMA_TLB_DSI_FLUSH); |
| 1160 | else | 1355 | else |
| 1161 | iommu->flush.flush_iotlb(iommu, did, addr, mask, | 1356 | iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, |
| 1162 | DMA_TLB_PSI_FLUSH); | 1357 | DMA_TLB_PSI_FLUSH); |
| 1163 | 1358 | ||
| 1164 | /* | 1359 | /* |
| @@ -1261,10 +1456,6 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
| 1261 | return 0; | 1456 | return 0; |
| 1262 | } | 1457 | } |
| 1263 | 1458 | ||
| 1264 | |||
| 1265 | static void domain_exit(struct dmar_domain *domain); | ||
| 1266 | static void vm_domain_exit(struct dmar_domain *domain); | ||
| 1267 | |||
| 1268 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1459 | static void free_dmar_iommu(struct intel_iommu *iommu) |
| 1269 | { | 1460 | { |
| 1270 | struct dmar_domain *domain; | 1461 | struct dmar_domain *domain; |
| @@ -1273,18 +1464,21 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 1273 | 1464 | ||
| 1274 | if ((iommu->domains) && (iommu->domain_ids)) { | 1465 | if ((iommu->domains) && (iommu->domain_ids)) { |
| 1275 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { | 1466 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { |
| 1467 | /* | ||
| 1468 | * Domain id 0 is reserved for invalid translation | ||
| 1469 | * if hardware supports caching mode. | ||
| 1470 | */ | ||
| 1471 | if (cap_caching_mode(iommu->cap) && i == 0) | ||
| 1472 | continue; | ||
| 1473 | |||
| 1276 | domain = iommu->domains[i]; | 1474 | domain = iommu->domains[i]; |
| 1277 | clear_bit(i, iommu->domain_ids); | 1475 | clear_bit(i, iommu->domain_ids); |
| 1278 | 1476 | ||
| 1279 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1477 | spin_lock_irqsave(&domain->iommu_lock, flags); |
| 1280 | count = --domain->iommu_count; | 1478 | count = --domain->iommu_count; |
| 1281 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | 1479 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
| 1282 | if (count == 0) { | 1480 | if (count == 0) |
| 1283 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | 1481 | domain_exit(domain); |
| 1284 | vm_domain_exit(domain); | ||
| 1285 | else | ||
| 1286 | domain_exit(domain); | ||
| 1287 | } | ||
| 1288 | } | 1482 | } |
| 1289 | } | 1483 | } |
| 1290 | 1484 | ||
| @@ -1298,21 +1492,14 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 1298 | 1492 | ||
| 1299 | g_iommus[iommu->seq_id] = NULL; | 1493 | g_iommus[iommu->seq_id] = NULL; |
| 1300 | 1494 | ||
| 1301 | /* if all iommus are freed, free g_iommus */ | ||
| 1302 | for (i = 0; i < g_num_of_iommus; i++) { | ||
| 1303 | if (g_iommus[i]) | ||
| 1304 | break; | ||
| 1305 | } | ||
| 1306 | |||
| 1307 | if (i == g_num_of_iommus) | ||
| 1308 | kfree(g_iommus); | ||
| 1309 | |||
| 1310 | /* free context mapping */ | 1495 | /* free context mapping */ |
| 1311 | free_context_table(iommu); | 1496 | free_context_table(iommu); |
| 1312 | } | 1497 | } |
| 1313 | 1498 | ||
| 1314 | static struct dmar_domain *alloc_domain(void) | 1499 | static struct dmar_domain *alloc_domain(bool vm) |
| 1315 | { | 1500 | { |
| 1501 | /* domain id for virtual machine, it won't be set in context */ | ||
| 1502 | static atomic_t vm_domid = ATOMIC_INIT(0); | ||
| 1316 | struct dmar_domain *domain; | 1503 | struct dmar_domain *domain; |
| 1317 | 1504 | ||
| 1318 | domain = alloc_domain_mem(); | 1505 | domain = alloc_domain_mem(); |
| @@ -1320,8 +1507,15 @@ static struct dmar_domain *alloc_domain(void) | |||
| 1320 | return NULL; | 1507 | return NULL; |
| 1321 | 1508 | ||
| 1322 | domain->nid = -1; | 1509 | domain->nid = -1; |
| 1510 | domain->iommu_count = 0; | ||
| 1323 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); | 1511 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); |
| 1324 | domain->flags = 0; | 1512 | domain->flags = 0; |
| 1513 | spin_lock_init(&domain->iommu_lock); | ||
| 1514 | INIT_LIST_HEAD(&domain->devices); | ||
| 1515 | if (vm) { | ||
| 1516 | domain->id = atomic_inc_return(&vm_domid); | ||
| 1517 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
| 1518 | } | ||
| 1325 | 1519 | ||
| 1326 | return domain; | 1520 | return domain; |
| 1327 | } | 1521 | } |
| @@ -1345,6 +1539,7 @@ static int iommu_attach_domain(struct dmar_domain *domain, | |||
| 1345 | } | 1539 | } |
| 1346 | 1540 | ||
| 1347 | domain->id = num; | 1541 | domain->id = num; |
| 1542 | domain->iommu_count++; | ||
| 1348 | set_bit(num, iommu->domain_ids); | 1543 | set_bit(num, iommu->domain_ids); |
| 1349 | set_bit(iommu->seq_id, domain->iommu_bmp); | 1544 | set_bit(iommu->seq_id, domain->iommu_bmp); |
| 1350 | iommu->domains[num] = domain; | 1545 | iommu->domains[num] = domain; |
| @@ -1358,22 +1553,16 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
| 1358 | { | 1553 | { |
| 1359 | unsigned long flags; | 1554 | unsigned long flags; |
| 1360 | int num, ndomains; | 1555 | int num, ndomains; |
| 1361 | int found = 0; | ||
| 1362 | 1556 | ||
| 1363 | spin_lock_irqsave(&iommu->lock, flags); | 1557 | spin_lock_irqsave(&iommu->lock, flags); |
| 1364 | ndomains = cap_ndoms(iommu->cap); | 1558 | ndomains = cap_ndoms(iommu->cap); |
| 1365 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | 1559 | for_each_set_bit(num, iommu->domain_ids, ndomains) { |
| 1366 | if (iommu->domains[num] == domain) { | 1560 | if (iommu->domains[num] == domain) { |
| 1367 | found = 1; | 1561 | clear_bit(num, iommu->domain_ids); |
| 1562 | iommu->domains[num] = NULL; | ||
| 1368 | break; | 1563 | break; |
| 1369 | } | 1564 | } |
| 1370 | } | 1565 | } |
| 1371 | |||
| 1372 | if (found) { | ||
| 1373 | clear_bit(num, iommu->domain_ids); | ||
| 1374 | clear_bit(iommu->seq_id, domain->iommu_bmp); | ||
| 1375 | iommu->domains[num] = NULL; | ||
| 1376 | } | ||
| 1377 | spin_unlock_irqrestore(&iommu->lock, flags); | 1566 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1378 | } | 1567 | } |
| 1379 | 1568 | ||
| @@ -1445,8 +1634,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1445 | unsigned long sagaw; | 1634 | unsigned long sagaw; |
| 1446 | 1635 | ||
| 1447 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1636 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
| 1448 | spin_lock_init(&domain->iommu_lock); | ||
| 1449 | |||
| 1450 | domain_reserve_special_ranges(domain); | 1637 | domain_reserve_special_ranges(domain); |
| 1451 | 1638 | ||
| 1452 | /* calculate AGAW */ | 1639 | /* calculate AGAW */ |
| @@ -1465,7 +1652,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1465 | return -ENODEV; | 1652 | return -ENODEV; |
| 1466 | } | 1653 | } |
| 1467 | domain->agaw = agaw; | 1654 | domain->agaw = agaw; |
| 1468 | INIT_LIST_HEAD(&domain->devices); | ||
| 1469 | 1655 | ||
| 1470 | if (ecap_coherent(iommu->ecap)) | 1656 | if (ecap_coherent(iommu->ecap)) |
| 1471 | domain->iommu_coherency = 1; | 1657 | domain->iommu_coherency = 1; |
| @@ -1477,8 +1663,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1477 | else | 1663 | else |
| 1478 | domain->iommu_snooping = 0; | 1664 | domain->iommu_snooping = 0; |
| 1479 | 1665 | ||
| 1480 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); | 1666 | if (intel_iommu_superpage) |
| 1481 | domain->iommu_count = 1; | 1667 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); |
| 1668 | else | ||
| 1669 | domain->iommu_superpage = 0; | ||
| 1670 | |||
| 1482 | domain->nid = iommu->node; | 1671 | domain->nid = iommu->node; |
| 1483 | 1672 | ||
| 1484 | /* always allocate the top pgd */ | 1673 | /* always allocate the top pgd */ |
| @@ -1493,6 +1682,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1493 | { | 1682 | { |
| 1494 | struct dmar_drhd_unit *drhd; | 1683 | struct dmar_drhd_unit *drhd; |
| 1495 | struct intel_iommu *iommu; | 1684 | struct intel_iommu *iommu; |
| 1685 | struct page *freelist = NULL; | ||
| 1496 | 1686 | ||
| 1497 | /* Domain 0 is reserved, so dont process it */ | 1687 | /* Domain 0 is reserved, so dont process it */ |
| 1498 | if (!domain) | 1688 | if (!domain) |
| @@ -1502,29 +1692,33 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1502 | if (!intel_iommu_strict) | 1692 | if (!intel_iommu_strict) |
| 1503 | flush_unmaps_timeout(0); | 1693 | flush_unmaps_timeout(0); |
| 1504 | 1694 | ||
| 1695 | /* remove associated devices */ | ||
| 1505 | domain_remove_dev_info(domain); | 1696 | domain_remove_dev_info(domain); |
| 1697 | |||
| 1506 | /* destroy iovas */ | 1698 | /* destroy iovas */ |
| 1507 | put_iova_domain(&domain->iovad); | 1699 | put_iova_domain(&domain->iovad); |
| 1508 | 1700 | ||
| 1509 | /* clear ptes */ | 1701 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
| 1510 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
| 1511 | |||
| 1512 | /* free page tables */ | ||
| 1513 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
| 1514 | 1702 | ||
| 1703 | /* clear attached or cached domains */ | ||
| 1704 | rcu_read_lock(); | ||
| 1515 | for_each_active_iommu(iommu, drhd) | 1705 | for_each_active_iommu(iommu, drhd) |
| 1516 | if (test_bit(iommu->seq_id, domain->iommu_bmp)) | 1706 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 1707 | test_bit(iommu->seq_id, domain->iommu_bmp)) | ||
| 1517 | iommu_detach_domain(domain, iommu); | 1708 | iommu_detach_domain(domain, iommu); |
| 1709 | rcu_read_unlock(); | ||
| 1710 | |||
| 1711 | dma_free_pagelist(freelist); | ||
| 1518 | 1712 | ||
| 1519 | free_domain_mem(domain); | 1713 | free_domain_mem(domain); |
| 1520 | } | 1714 | } |
| 1521 | 1715 | ||
| 1522 | static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | 1716 | static int domain_context_mapping_one(struct dmar_domain *domain, |
| 1523 | u8 bus, u8 devfn, int translation) | 1717 | struct intel_iommu *iommu, |
| 1718 | u8 bus, u8 devfn, int translation) | ||
| 1524 | { | 1719 | { |
| 1525 | struct context_entry *context; | 1720 | struct context_entry *context; |
| 1526 | unsigned long flags; | 1721 | unsigned long flags; |
| 1527 | struct intel_iommu *iommu; | ||
| 1528 | struct dma_pte *pgd; | 1722 | struct dma_pte *pgd; |
| 1529 | unsigned long num; | 1723 | unsigned long num; |
| 1530 | unsigned long ndomains; | 1724 | unsigned long ndomains; |
| @@ -1539,10 +1733,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1539 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && | 1733 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && |
| 1540 | translation != CONTEXT_TT_MULTI_LEVEL); | 1734 | translation != CONTEXT_TT_MULTI_LEVEL); |
| 1541 | 1735 | ||
| 1542 | iommu = device_to_iommu(segment, bus, devfn); | ||
| 1543 | if (!iommu) | ||
| 1544 | return -ENODEV; | ||
| 1545 | |||
| 1546 | context = device_to_context_entry(iommu, bus, devfn); | 1736 | context = device_to_context_entry(iommu, bus, devfn); |
| 1547 | if (!context) | 1737 | if (!context) |
| 1548 | return -ENOMEM; | 1738 | return -ENOMEM; |
| @@ -1600,7 +1790,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1600 | context_set_domain_id(context, id); | 1790 | context_set_domain_id(context, id); |
| 1601 | 1791 | ||
| 1602 | if (translation != CONTEXT_TT_PASS_THROUGH) { | 1792 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
| 1603 | info = iommu_support_dev_iotlb(domain, segment, bus, devfn); | 1793 | info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); |
| 1604 | translation = info ? CONTEXT_TT_DEV_IOTLB : | 1794 | translation = info ? CONTEXT_TT_DEV_IOTLB : |
| 1605 | CONTEXT_TT_MULTI_LEVEL; | 1795 | CONTEXT_TT_MULTI_LEVEL; |
| 1606 | } | 1796 | } |
| @@ -1650,27 +1840,32 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1650 | } | 1840 | } |
| 1651 | 1841 | ||
| 1652 | static int | 1842 | static int |
| 1653 | domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | 1843 | domain_context_mapping(struct dmar_domain *domain, struct device *dev, |
| 1654 | int translation) | 1844 | int translation) |
| 1655 | { | 1845 | { |
| 1656 | int ret; | 1846 | int ret; |
| 1657 | struct pci_dev *tmp, *parent; | 1847 | struct pci_dev *pdev, *tmp, *parent; |
| 1848 | struct intel_iommu *iommu; | ||
| 1849 | u8 bus, devfn; | ||
| 1850 | |||
| 1851 | iommu = device_to_iommu(dev, &bus, &devfn); | ||
| 1852 | if (!iommu) | ||
| 1853 | return -ENODEV; | ||
| 1658 | 1854 | ||
| 1659 | ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), | 1855 | ret = domain_context_mapping_one(domain, iommu, bus, devfn, |
| 1660 | pdev->bus->number, pdev->devfn, | ||
| 1661 | translation); | 1856 | translation); |
| 1662 | if (ret) | 1857 | if (ret || !dev_is_pci(dev)) |
| 1663 | return ret; | 1858 | return ret; |
| 1664 | 1859 | ||
| 1665 | /* dependent device mapping */ | 1860 | /* dependent device mapping */ |
| 1861 | pdev = to_pci_dev(dev); | ||
| 1666 | tmp = pci_find_upstream_pcie_bridge(pdev); | 1862 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1667 | if (!tmp) | 1863 | if (!tmp) |
| 1668 | return 0; | 1864 | return 0; |
| 1669 | /* Secondary interface's bus number and devfn 0 */ | 1865 | /* Secondary interface's bus number and devfn 0 */ |
| 1670 | parent = pdev->bus->self; | 1866 | parent = pdev->bus->self; |
| 1671 | while (parent != tmp) { | 1867 | while (parent != tmp) { |
| 1672 | ret = domain_context_mapping_one(domain, | 1868 | ret = domain_context_mapping_one(domain, iommu, |
| 1673 | pci_domain_nr(parent->bus), | ||
| 1674 | parent->bus->number, | 1869 | parent->bus->number, |
| 1675 | parent->devfn, translation); | 1870 | parent->devfn, translation); |
| 1676 | if (ret) | 1871 | if (ret) |
| @@ -1678,33 +1873,33 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | |||
| 1678 | parent = parent->bus->self; | 1873 | parent = parent->bus->self; |
| 1679 | } | 1874 | } |
| 1680 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ | 1875 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
| 1681 | return domain_context_mapping_one(domain, | 1876 | return domain_context_mapping_one(domain, iommu, |
| 1682 | pci_domain_nr(tmp->subordinate), | ||
| 1683 | tmp->subordinate->number, 0, | 1877 | tmp->subordinate->number, 0, |
| 1684 | translation); | 1878 | translation); |
| 1685 | else /* this is a legacy PCI bridge */ | 1879 | else /* this is a legacy PCI bridge */ |
| 1686 | return domain_context_mapping_one(domain, | 1880 | return domain_context_mapping_one(domain, iommu, |
| 1687 | pci_domain_nr(tmp->bus), | ||
| 1688 | tmp->bus->number, | 1881 | tmp->bus->number, |
| 1689 | tmp->devfn, | 1882 | tmp->devfn, |
| 1690 | translation); | 1883 | translation); |
| 1691 | } | 1884 | } |
| 1692 | 1885 | ||
| 1693 | static int domain_context_mapped(struct pci_dev *pdev) | 1886 | static int domain_context_mapped(struct device *dev) |
| 1694 | { | 1887 | { |
| 1695 | int ret; | 1888 | int ret; |
| 1696 | struct pci_dev *tmp, *parent; | 1889 | struct pci_dev *pdev, *tmp, *parent; |
| 1697 | struct intel_iommu *iommu; | 1890 | struct intel_iommu *iommu; |
| 1891 | u8 bus, devfn; | ||
| 1698 | 1892 | ||
| 1699 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 1893 | iommu = device_to_iommu(dev, &bus, &devfn); |
| 1700 | pdev->devfn); | ||
| 1701 | if (!iommu) | 1894 | if (!iommu) |
| 1702 | return -ENODEV; | 1895 | return -ENODEV; |
| 1703 | 1896 | ||
| 1704 | ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); | 1897 | ret = device_context_mapped(iommu, bus, devfn); |
| 1705 | if (!ret) | 1898 | if (!ret || !dev_is_pci(dev)) |
| 1706 | return ret; | 1899 | return ret; |
| 1900 | |||
| 1707 | /* dependent device mapping */ | 1901 | /* dependent device mapping */ |
| 1902 | pdev = to_pci_dev(dev); | ||
| 1708 | tmp = pci_find_upstream_pcie_bridge(pdev); | 1903 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1709 | if (!tmp) | 1904 | if (!tmp) |
| 1710 | return ret; | 1905 | return ret; |
| @@ -1800,7 +1995,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
| 1800 | if (!pte) { | 1995 | if (!pte) { |
| 1801 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); | 1996 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); |
| 1802 | 1997 | ||
| 1803 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); | 1998 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); |
| 1804 | if (!pte) | 1999 | if (!pte) |
| 1805 | return -ENOMEM; | 2000 | return -ENOMEM; |
| 1806 | /* It is large page*/ | 2001 | /* It is large page*/ |
| @@ -1899,14 +2094,13 @@ static inline void unlink_domain_info(struct device_domain_info *info) | |||
| 1899 | list_del(&info->link); | 2094 | list_del(&info->link); |
| 1900 | list_del(&info->global); | 2095 | list_del(&info->global); |
| 1901 | if (info->dev) | 2096 | if (info->dev) |
| 1902 | info->dev->dev.archdata.iommu = NULL; | 2097 | info->dev->archdata.iommu = NULL; |
| 1903 | } | 2098 | } |
| 1904 | 2099 | ||
| 1905 | static void domain_remove_dev_info(struct dmar_domain *domain) | 2100 | static void domain_remove_dev_info(struct dmar_domain *domain) |
| 1906 | { | 2101 | { |
| 1907 | struct device_domain_info *info; | 2102 | struct device_domain_info *info; |
| 1908 | unsigned long flags; | 2103 | unsigned long flags, flags2; |
| 1909 | struct intel_iommu *iommu; | ||
| 1910 | 2104 | ||
| 1911 | spin_lock_irqsave(&device_domain_lock, flags); | 2105 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1912 | while (!list_empty(&domain->devices)) { | 2106 | while (!list_empty(&domain->devices)) { |
| @@ -1916,10 +2110,23 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
| 1916 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2110 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1917 | 2111 | ||
| 1918 | iommu_disable_dev_iotlb(info); | 2112 | iommu_disable_dev_iotlb(info); |
| 1919 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); | 2113 | iommu_detach_dev(info->iommu, info->bus, info->devfn); |
| 1920 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
| 1921 | free_devinfo_mem(info); | ||
| 1922 | 2114 | ||
| 2115 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | ||
| 2116 | iommu_detach_dependent_devices(info->iommu, info->dev); | ||
| 2117 | /* clear this iommu in iommu_bmp, update iommu count | ||
| 2118 | * and capabilities | ||
| 2119 | */ | ||
| 2120 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
| 2121 | if (test_and_clear_bit(info->iommu->seq_id, | ||
| 2122 | domain->iommu_bmp)) { | ||
| 2123 | domain->iommu_count--; | ||
| 2124 | domain_update_iommu_cap(domain); | ||
| 2125 | } | ||
| 2126 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
| 2127 | } | ||
| 2128 | |||
| 2129 | free_devinfo_mem(info); | ||
| 1923 | spin_lock_irqsave(&device_domain_lock, flags); | 2130 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1924 | } | 2131 | } |
| 1925 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2132 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| @@ -1927,155 +2134,151 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
| 1927 | 2134 | ||
| 1928 | /* | 2135 | /* |
| 1929 | * find_domain | 2136 | * find_domain |
| 1930 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info | 2137 | * Note: we use struct device->archdata.iommu stores the info |
| 1931 | */ | 2138 | */ |
| 1932 | static struct dmar_domain * | 2139 | static struct dmar_domain *find_domain(struct device *dev) |
| 1933 | find_domain(struct pci_dev *pdev) | ||
| 1934 | { | 2140 | { |
| 1935 | struct device_domain_info *info; | 2141 | struct device_domain_info *info; |
| 1936 | 2142 | ||
| 1937 | /* No lock here, assumes no domain exit in normal case */ | 2143 | /* No lock here, assumes no domain exit in normal case */ |
| 1938 | info = pdev->dev.archdata.iommu; | 2144 | info = dev->archdata.iommu; |
| 1939 | if (info) | 2145 | if (info) |
| 1940 | return info->domain; | 2146 | return info->domain; |
| 1941 | return NULL; | 2147 | return NULL; |
| 1942 | } | 2148 | } |
| 1943 | 2149 | ||
| 2150 | static inline struct device_domain_info * | ||
| 2151 | dmar_search_domain_by_dev_info(int segment, int bus, int devfn) | ||
| 2152 | { | ||
| 2153 | struct device_domain_info *info; | ||
| 2154 | |||
| 2155 | list_for_each_entry(info, &device_domain_list, global) | ||
| 2156 | if (info->iommu->segment == segment && info->bus == bus && | ||
| 2157 | info->devfn == devfn) | ||
| 2158 | return info; | ||
| 2159 | |||
| 2160 | return NULL; | ||
| 2161 | } | ||
| 2162 | |||
| 2163 | static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, | ||
| 2164 | int bus, int devfn, | ||
| 2165 | struct device *dev, | ||
| 2166 | struct dmar_domain *domain) | ||
| 2167 | { | ||
| 2168 | struct dmar_domain *found = NULL; | ||
| 2169 | struct device_domain_info *info; | ||
| 2170 | unsigned long flags; | ||
| 2171 | |||
| 2172 | info = alloc_devinfo_mem(); | ||
| 2173 | if (!info) | ||
| 2174 | return NULL; | ||
| 2175 | |||
| 2176 | info->bus = bus; | ||
| 2177 | info->devfn = devfn; | ||
| 2178 | info->dev = dev; | ||
| 2179 | info->domain = domain; | ||
| 2180 | info->iommu = iommu; | ||
| 2181 | if (!dev) | ||
| 2182 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; | ||
| 2183 | |||
| 2184 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 2185 | if (dev) | ||
| 2186 | found = find_domain(dev); | ||
| 2187 | else { | ||
| 2188 | struct device_domain_info *info2; | ||
| 2189 | info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); | ||
| 2190 | if (info2) | ||
| 2191 | found = info2->domain; | ||
| 2192 | } | ||
| 2193 | if (found) { | ||
| 2194 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2195 | free_devinfo_mem(info); | ||
| 2196 | /* Caller must free the original domain */ | ||
| 2197 | return found; | ||
| 2198 | } | ||
| 2199 | |||
| 2200 | list_add(&info->link, &domain->devices); | ||
| 2201 | list_add(&info->global, &device_domain_list); | ||
| 2202 | if (dev) | ||
| 2203 | dev->archdata.iommu = info; | ||
| 2204 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2205 | |||
| 2206 | return domain; | ||
| 2207 | } | ||
| 2208 | |||
| 1944 | /* domain is initialized */ | 2209 | /* domain is initialized */ |
| 1945 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 2210 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) |
| 1946 | { | 2211 | { |
| 1947 | struct dmar_domain *domain, *found = NULL; | 2212 | struct dmar_domain *domain, *free = NULL; |
| 1948 | struct intel_iommu *iommu; | 2213 | struct intel_iommu *iommu = NULL; |
| 1949 | struct dmar_drhd_unit *drhd; | 2214 | struct device_domain_info *info; |
| 1950 | struct device_domain_info *info, *tmp; | 2215 | struct pci_dev *dev_tmp = NULL; |
| 1951 | struct pci_dev *dev_tmp; | ||
| 1952 | unsigned long flags; | 2216 | unsigned long flags; |
| 1953 | int bus = 0, devfn = 0; | 2217 | u8 bus, devfn, bridge_bus, bridge_devfn; |
| 1954 | int segment; | ||
| 1955 | int ret; | ||
| 1956 | 2218 | ||
| 1957 | domain = find_domain(pdev); | 2219 | domain = find_domain(dev); |
| 1958 | if (domain) | 2220 | if (domain) |
| 1959 | return domain; | 2221 | return domain; |
| 1960 | 2222 | ||
| 1961 | segment = pci_domain_nr(pdev->bus); | 2223 | if (dev_is_pci(dev)) { |
| 2224 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 2225 | u16 segment; | ||
| 1962 | 2226 | ||
| 1963 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); | 2227 | segment = pci_domain_nr(pdev->bus); |
| 1964 | if (dev_tmp) { | 2228 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1965 | if (pci_is_pcie(dev_tmp)) { | 2229 | if (dev_tmp) { |
| 1966 | bus = dev_tmp->subordinate->number; | 2230 | if (pci_is_pcie(dev_tmp)) { |
| 1967 | devfn = 0; | 2231 | bridge_bus = dev_tmp->subordinate->number; |
| 1968 | } else { | 2232 | bridge_devfn = 0; |
| 1969 | bus = dev_tmp->bus->number; | 2233 | } else { |
| 1970 | devfn = dev_tmp->devfn; | 2234 | bridge_bus = dev_tmp->bus->number; |
| 1971 | } | 2235 | bridge_devfn = dev_tmp->devfn; |
| 1972 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 1973 | list_for_each_entry(info, &device_domain_list, global) { | ||
| 1974 | if (info->segment == segment && | ||
| 1975 | info->bus == bus && info->devfn == devfn) { | ||
| 1976 | found = info->domain; | ||
| 1977 | break; | ||
| 1978 | } | 2236 | } |
| 1979 | } | 2237 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1980 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2238 | info = dmar_search_domain_by_dev_info(segment, bus, devfn); |
| 1981 | /* pcie-pci bridge already has a domain, uses it */ | 2239 | if (info) { |
| 1982 | if (found) { | 2240 | iommu = info->iommu; |
| 1983 | domain = found; | 2241 | domain = info->domain; |
| 1984 | goto found_domain; | 2242 | } |
| 2243 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2244 | /* pcie-pci bridge already has a domain, uses it */ | ||
| 2245 | if (info) | ||
| 2246 | goto found_domain; | ||
| 1985 | } | 2247 | } |
| 1986 | } | 2248 | } |
| 1987 | 2249 | ||
| 1988 | domain = alloc_domain(); | 2250 | iommu = device_to_iommu(dev, &bus, &devfn); |
| 1989 | if (!domain) | 2251 | if (!iommu) |
| 1990 | goto error; | 2252 | goto error; |
| 1991 | 2253 | ||
| 1992 | /* Allocate new domain for the device */ | 2254 | /* Allocate and initialize new domain for the device */ |
| 1993 | drhd = dmar_find_matched_drhd_unit(pdev); | 2255 | domain = alloc_domain(false); |
| 1994 | if (!drhd) { | 2256 | if (!domain) |
| 1995 | printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", | 2257 | goto error; |
| 1996 | pci_name(pdev)); | 2258 | if (iommu_attach_domain(domain, iommu)) { |
| 1997 | free_domain_mem(domain); | ||
| 1998 | return NULL; | ||
| 1999 | } | ||
| 2000 | iommu = drhd->iommu; | ||
| 2001 | |||
| 2002 | ret = iommu_attach_domain(domain, iommu); | ||
| 2003 | if (ret) { | ||
| 2004 | free_domain_mem(domain); | 2259 | free_domain_mem(domain); |
| 2260 | domain = NULL; | ||
| 2005 | goto error; | 2261 | goto error; |
| 2006 | } | 2262 | } |
| 2007 | 2263 | free = domain; | |
| 2008 | if (domain_init(domain, gaw)) { | 2264 | if (domain_init(domain, gaw)) |
| 2009 | domain_exit(domain); | ||
| 2010 | goto error; | 2265 | goto error; |
| 2011 | } | ||
| 2012 | 2266 | ||
| 2013 | /* register pcie-to-pci device */ | 2267 | /* register pcie-to-pci device */ |
| 2014 | if (dev_tmp) { | 2268 | if (dev_tmp) { |
| 2015 | info = alloc_devinfo_mem(); | 2269 | domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, |
| 2016 | if (!info) { | 2270 | NULL, domain); |
| 2017 | domain_exit(domain); | 2271 | if (!domain) |
| 2018 | goto error; | 2272 | goto error; |
| 2019 | } | ||
| 2020 | info->segment = segment; | ||
| 2021 | info->bus = bus; | ||
| 2022 | info->devfn = devfn; | ||
| 2023 | info->dev = NULL; | ||
| 2024 | info->domain = domain; | ||
| 2025 | /* This domain is shared by devices under p2p bridge */ | ||
| 2026 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; | ||
| 2027 | |||
| 2028 | /* pcie-to-pci bridge already has a domain, uses it */ | ||
| 2029 | found = NULL; | ||
| 2030 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 2031 | list_for_each_entry(tmp, &device_domain_list, global) { | ||
| 2032 | if (tmp->segment == segment && | ||
| 2033 | tmp->bus == bus && tmp->devfn == devfn) { | ||
| 2034 | found = tmp->domain; | ||
| 2035 | break; | ||
| 2036 | } | ||
| 2037 | } | ||
| 2038 | if (found) { | ||
| 2039 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2040 | free_devinfo_mem(info); | ||
| 2041 | domain_exit(domain); | ||
| 2042 | domain = found; | ||
| 2043 | } else { | ||
| 2044 | list_add(&info->link, &domain->devices); | ||
| 2045 | list_add(&info->global, &device_domain_list); | ||
| 2046 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2047 | } | ||
| 2048 | } | 2273 | } |
| 2049 | 2274 | ||
| 2050 | found_domain: | 2275 | found_domain: |
| 2051 | info = alloc_devinfo_mem(); | 2276 | domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); |
| 2052 | if (!info) | ||
| 2053 | goto error; | ||
| 2054 | info->segment = segment; | ||
| 2055 | info->bus = pdev->bus->number; | ||
| 2056 | info->devfn = pdev->devfn; | ||
| 2057 | info->dev = pdev; | ||
| 2058 | info->domain = domain; | ||
| 2059 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 2060 | /* somebody is fast */ | ||
| 2061 | found = find_domain(pdev); | ||
| 2062 | if (found != NULL) { | ||
| 2063 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2064 | if (found != domain) { | ||
| 2065 | domain_exit(domain); | ||
| 2066 | domain = found; | ||
| 2067 | } | ||
| 2068 | free_devinfo_mem(info); | ||
| 2069 | return domain; | ||
| 2070 | } | ||
| 2071 | list_add(&info->link, &domain->devices); | ||
| 2072 | list_add(&info->global, &device_domain_list); | ||
| 2073 | pdev->dev.archdata.iommu = info; | ||
| 2074 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2075 | return domain; | ||
| 2076 | error: | 2277 | error: |
| 2077 | /* recheck it here, maybe others set it */ | 2278 | if (free != domain) |
| 2078 | return find_domain(pdev); | 2279 | domain_exit(free); |
| 2280 | |||
| 2281 | return domain; | ||
| 2079 | } | 2282 | } |
| 2080 | 2283 | ||
| 2081 | static int iommu_identity_mapping; | 2284 | static int iommu_identity_mapping; |
| @@ -2109,14 +2312,14 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, | |||
| 2109 | DMA_PTE_READ|DMA_PTE_WRITE); | 2312 | DMA_PTE_READ|DMA_PTE_WRITE); |
| 2110 | } | 2313 | } |
| 2111 | 2314 | ||
| 2112 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 2315 | static int iommu_prepare_identity_map(struct device *dev, |
| 2113 | unsigned long long start, | 2316 | unsigned long long start, |
| 2114 | unsigned long long end) | 2317 | unsigned long long end) |
| 2115 | { | 2318 | { |
| 2116 | struct dmar_domain *domain; | 2319 | struct dmar_domain *domain; |
| 2117 | int ret; | 2320 | int ret; |
| 2118 | 2321 | ||
| 2119 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 2322 | domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 2120 | if (!domain) | 2323 | if (!domain) |
| 2121 | return -ENOMEM; | 2324 | return -ENOMEM; |
| 2122 | 2325 | ||
| @@ -2126,13 +2329,13 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 2126 | up to start with in si_domain */ | 2329 | up to start with in si_domain */ |
| 2127 | if (domain == si_domain && hw_pass_through) { | 2330 | if (domain == si_domain && hw_pass_through) { |
| 2128 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", | 2331 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", |
| 2129 | pci_name(pdev), start, end); | 2332 | dev_name(dev), start, end); |
| 2130 | return 0; | 2333 | return 0; |
| 2131 | } | 2334 | } |
| 2132 | 2335 | ||
| 2133 | printk(KERN_INFO | 2336 | printk(KERN_INFO |
| 2134 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 2337 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
| 2135 | pci_name(pdev), start, end); | 2338 | dev_name(dev), start, end); |
| 2136 | 2339 | ||
| 2137 | if (end < start) { | 2340 | if (end < start) { |
| 2138 | WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" | 2341 | WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" |
| @@ -2160,7 +2363,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 2160 | goto error; | 2363 | goto error; |
| 2161 | 2364 | ||
| 2162 | /* context entry init */ | 2365 | /* context entry init */ |
| 2163 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 2366 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); |
| 2164 | if (ret) | 2367 | if (ret) |
| 2165 | goto error; | 2368 | goto error; |
| 2166 | 2369 | ||
| @@ -2172,12 +2375,12 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 2172 | } | 2375 | } |
| 2173 | 2376 | ||
| 2174 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, | 2377 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, |
| 2175 | struct pci_dev *pdev) | 2378 | struct device *dev) |
| 2176 | { | 2379 | { |
| 2177 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2380 | if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
| 2178 | return 0; | 2381 | return 0; |
| 2179 | return iommu_prepare_identity_map(pdev, rmrr->base_address, | 2382 | return iommu_prepare_identity_map(dev, rmrr->base_address, |
| 2180 | rmrr->end_address); | 2383 | rmrr->end_address); |
| 2181 | } | 2384 | } |
| 2182 | 2385 | ||
| 2183 | #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA | 2386 | #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA |
| @@ -2191,7 +2394,7 @@ static inline void iommu_prepare_isa(void) | |||
| 2191 | return; | 2394 | return; |
| 2192 | 2395 | ||
| 2193 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); | 2396 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); |
| 2194 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); | 2397 | ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1); |
| 2195 | 2398 | ||
| 2196 | if (ret) | 2399 | if (ret) |
| 2197 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " | 2400 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " |
| @@ -2213,10 +2416,12 @@ static int __init si_domain_init(int hw) | |||
| 2213 | struct intel_iommu *iommu; | 2416 | struct intel_iommu *iommu; |
| 2214 | int nid, ret = 0; | 2417 | int nid, ret = 0; |
| 2215 | 2418 | ||
| 2216 | si_domain = alloc_domain(); | 2419 | si_domain = alloc_domain(false); |
| 2217 | if (!si_domain) | 2420 | if (!si_domain) |
| 2218 | return -EFAULT; | 2421 | return -EFAULT; |
| 2219 | 2422 | ||
| 2423 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
| 2424 | |||
| 2220 | for_each_active_iommu(iommu, drhd) { | 2425 | for_each_active_iommu(iommu, drhd) { |
| 2221 | ret = iommu_attach_domain(si_domain, iommu); | 2426 | ret = iommu_attach_domain(si_domain, iommu); |
| 2222 | if (ret) { | 2427 | if (ret) { |
| @@ -2230,7 +2435,6 @@ static int __init si_domain_init(int hw) | |||
| 2230 | return -EFAULT; | 2435 | return -EFAULT; |
| 2231 | } | 2436 | } |
| 2232 | 2437 | ||
| 2233 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
| 2234 | pr_debug("IOMMU: identity mapping domain is domain %d\n", | 2438 | pr_debug("IOMMU: identity mapping domain is domain %d\n", |
| 2235 | si_domain->id); | 2439 | si_domain->id); |
| 2236 | 2440 | ||
| @@ -2252,16 +2456,14 @@ static int __init si_domain_init(int hw) | |||
| 2252 | return 0; | 2456 | return 0; |
| 2253 | } | 2457 | } |
| 2254 | 2458 | ||
| 2255 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | 2459 | static int identity_mapping(struct device *dev) |
| 2256 | struct pci_dev *pdev); | ||
| 2257 | static int identity_mapping(struct pci_dev *pdev) | ||
| 2258 | { | 2460 | { |
| 2259 | struct device_domain_info *info; | 2461 | struct device_domain_info *info; |
| 2260 | 2462 | ||
| 2261 | if (likely(!iommu_identity_mapping)) | 2463 | if (likely(!iommu_identity_mapping)) |
| 2262 | return 0; | 2464 | return 0; |
| 2263 | 2465 | ||
| 2264 | info = pdev->dev.archdata.iommu; | 2466 | info = dev->archdata.iommu; |
| 2265 | if (info && info != DUMMY_DEVICE_DOMAIN_INFO) | 2467 | if (info && info != DUMMY_DEVICE_DOMAIN_INFO) |
| 2266 | return (info->domain == si_domain); | 2468 | return (info->domain == si_domain); |
| 2267 | 2469 | ||
| @@ -2269,111 +2471,112 @@ static int identity_mapping(struct pci_dev *pdev) | |||
| 2269 | } | 2471 | } |
| 2270 | 2472 | ||
| 2271 | static int domain_add_dev_info(struct dmar_domain *domain, | 2473 | static int domain_add_dev_info(struct dmar_domain *domain, |
| 2272 | struct pci_dev *pdev, | 2474 | struct device *dev, int translation) |
| 2273 | int translation) | ||
| 2274 | { | 2475 | { |
| 2275 | struct device_domain_info *info; | 2476 | struct dmar_domain *ndomain; |
| 2276 | unsigned long flags; | 2477 | struct intel_iommu *iommu; |
| 2478 | u8 bus, devfn; | ||
| 2277 | int ret; | 2479 | int ret; |
| 2278 | 2480 | ||
| 2279 | info = alloc_devinfo_mem(); | 2481 | iommu = device_to_iommu(dev, &bus, &devfn); |
| 2280 | if (!info) | 2482 | if (!iommu) |
| 2281 | return -ENOMEM; | 2483 | return -ENODEV; |
| 2282 | |||
| 2283 | info->segment = pci_domain_nr(pdev->bus); | ||
| 2284 | info->bus = pdev->bus->number; | ||
| 2285 | info->devfn = pdev->devfn; | ||
| 2286 | info->dev = pdev; | ||
| 2287 | info->domain = domain; | ||
| 2288 | 2484 | ||
| 2289 | spin_lock_irqsave(&device_domain_lock, flags); | 2485 | ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); |
| 2290 | list_add(&info->link, &domain->devices); | 2486 | if (ndomain != domain) |
| 2291 | list_add(&info->global, &device_domain_list); | 2487 | return -EBUSY; |
| 2292 | pdev->dev.archdata.iommu = info; | ||
| 2293 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2294 | 2488 | ||
| 2295 | ret = domain_context_mapping(domain, pdev, translation); | 2489 | ret = domain_context_mapping(domain, dev, translation); |
| 2296 | if (ret) { | 2490 | if (ret) { |
| 2297 | spin_lock_irqsave(&device_domain_lock, flags); | 2491 | domain_remove_one_dev_info(domain, dev); |
| 2298 | unlink_domain_info(info); | ||
| 2299 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2300 | free_devinfo_mem(info); | ||
| 2301 | return ret; | 2492 | return ret; |
| 2302 | } | 2493 | } |
| 2303 | 2494 | ||
| 2304 | return 0; | 2495 | return 0; |
| 2305 | } | 2496 | } |
| 2306 | 2497 | ||
| 2307 | static bool device_has_rmrr(struct pci_dev *dev) | 2498 | static bool device_has_rmrr(struct device *dev) |
| 2308 | { | 2499 | { |
| 2309 | struct dmar_rmrr_unit *rmrr; | 2500 | struct dmar_rmrr_unit *rmrr; |
| 2501 | struct device *tmp; | ||
| 2310 | int i; | 2502 | int i; |
| 2311 | 2503 | ||
| 2504 | rcu_read_lock(); | ||
| 2312 | for_each_rmrr_units(rmrr) { | 2505 | for_each_rmrr_units(rmrr) { |
| 2313 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2506 | /* |
| 2314 | /* | 2507 | * Return TRUE if this RMRR contains the device that |
| 2315 | * Return TRUE if this RMRR contains the device that | 2508 | * is passed in. |
| 2316 | * is passed in. | 2509 | */ |
| 2317 | */ | 2510 | for_each_active_dev_scope(rmrr->devices, |
| 2318 | if (rmrr->devices[i] == dev) | 2511 | rmrr->devices_cnt, i, tmp) |
| 2512 | if (tmp == dev) { | ||
| 2513 | rcu_read_unlock(); | ||
| 2319 | return true; | 2514 | return true; |
| 2320 | } | 2515 | } |
| 2321 | } | 2516 | } |
| 2517 | rcu_read_unlock(); | ||
| 2322 | return false; | 2518 | return false; |
| 2323 | } | 2519 | } |
| 2324 | 2520 | ||
| 2325 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | 2521 | static int iommu_should_identity_map(struct device *dev, int startup) |
| 2326 | { | 2522 | { |
| 2327 | 2523 | ||
| 2328 | /* | 2524 | if (dev_is_pci(dev)) { |
| 2329 | * We want to prevent any device associated with an RMRR from | 2525 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2330 | * getting placed into the SI Domain. This is done because | ||
| 2331 | * problems exist when devices are moved in and out of domains | ||
| 2332 | * and their respective RMRR info is lost. We exempt USB devices | ||
| 2333 | * from this process due to their usage of RMRRs that are known | ||
| 2334 | * to not be needed after BIOS hand-off to OS. | ||
| 2335 | */ | ||
| 2336 | if (device_has_rmrr(pdev) && | ||
| 2337 | (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) | ||
| 2338 | return 0; | ||
| 2339 | 2526 | ||
| 2340 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) | 2527 | /* |
| 2341 | return 1; | 2528 | * We want to prevent any device associated with an RMRR from |
| 2529 | * getting placed into the SI Domain. This is done because | ||
| 2530 | * problems exist when devices are moved in and out of domains | ||
| 2531 | * and their respective RMRR info is lost. We exempt USB devices | ||
| 2532 | * from this process due to their usage of RMRRs that are known | ||
| 2533 | * to not be needed after BIOS hand-off to OS. | ||
| 2534 | */ | ||
| 2535 | if (device_has_rmrr(dev) && | ||
| 2536 | (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) | ||
| 2537 | return 0; | ||
| 2342 | 2538 | ||
| 2343 | if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) | 2539 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) |
| 2344 | return 1; | 2540 | return 1; |
| 2345 | 2541 | ||
| 2346 | if (!(iommu_identity_mapping & IDENTMAP_ALL)) | 2542 | if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) |
| 2347 | return 0; | 2543 | return 1; |
| 2348 | 2544 | ||
| 2349 | /* | 2545 | if (!(iommu_identity_mapping & IDENTMAP_ALL)) |
| 2350 | * We want to start off with all devices in the 1:1 domain, and | ||
| 2351 | * take them out later if we find they can't access all of memory. | ||
| 2352 | * | ||
| 2353 | * However, we can't do this for PCI devices behind bridges, | ||
| 2354 | * because all PCI devices behind the same bridge will end up | ||
| 2355 | * with the same source-id on their transactions. | ||
| 2356 | * | ||
| 2357 | * Practically speaking, we can't change things around for these | ||
| 2358 | * devices at run-time, because we can't be sure there'll be no | ||
| 2359 | * DMA transactions in flight for any of their siblings. | ||
| 2360 | * | ||
| 2361 | * So PCI devices (unless they're on the root bus) as well as | ||
| 2362 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
| 2363 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
| 2364 | * not to be able to map all of memory. | ||
| 2365 | */ | ||
| 2366 | if (!pci_is_pcie(pdev)) { | ||
| 2367 | if (!pci_is_root_bus(pdev->bus)) | ||
| 2368 | return 0; | 2546 | return 0; |
| 2369 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | 2547 | |
| 2548 | /* | ||
| 2549 | * We want to start off with all devices in the 1:1 domain, and | ||
| 2550 | * take them out later if we find they can't access all of memory. | ||
| 2551 | * | ||
| 2552 | * However, we can't do this for PCI devices behind bridges, | ||
| 2553 | * because all PCI devices behind the same bridge will end up | ||
| 2554 | * with the same source-id on their transactions. | ||
| 2555 | * | ||
| 2556 | * Practically speaking, we can't change things around for these | ||
| 2557 | * devices at run-time, because we can't be sure there'll be no | ||
| 2558 | * DMA transactions in flight for any of their siblings. | ||
| 2559 | * | ||
| 2560 | * So PCI devices (unless they're on the root bus) as well as | ||
| 2561 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
| 2562 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
| 2563 | * not to be able to map all of memory. | ||
| 2564 | */ | ||
| 2565 | if (!pci_is_pcie(pdev)) { | ||
| 2566 | if (!pci_is_root_bus(pdev->bus)) | ||
| 2567 | return 0; | ||
| 2568 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
| 2569 | return 0; | ||
| 2570 | } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) | ||
| 2370 | return 0; | 2571 | return 0; |
| 2371 | } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) | 2572 | } else { |
| 2372 | return 0; | 2573 | if (device_has_rmrr(dev)) |
| 2574 | return 0; | ||
| 2575 | } | ||
| 2373 | 2576 | ||
| 2374 | /* | 2577 | /* |
| 2375 | * At boot time, we don't yet know if devices will be 64-bit capable. | 2578 | * At boot time, we don't yet know if devices will be 64-bit capable. |
| 2376 | * Assume that they will -- if they turn out not to be, then we can | 2579 | * Assume that they will — if they turn out not to be, then we can |
| 2377 | * take them out of the 1:1 domain later. | 2580 | * take them out of the 1:1 domain later. |
| 2378 | */ | 2581 | */ |
| 2379 | if (!startup) { | 2582 | if (!startup) { |
| @@ -2381,42 +2584,77 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | |||
| 2381 | * If the device's dma_mask is less than the system's memory | 2584 | * If the device's dma_mask is less than the system's memory |
| 2382 | * size then this is not a candidate for identity mapping. | 2585 | * size then this is not a candidate for identity mapping. |
| 2383 | */ | 2586 | */ |
| 2384 | u64 dma_mask = pdev->dma_mask; | 2587 | u64 dma_mask = *dev->dma_mask; |
| 2385 | 2588 | ||
| 2386 | if (pdev->dev.coherent_dma_mask && | 2589 | if (dev->coherent_dma_mask && |
| 2387 | pdev->dev.coherent_dma_mask < dma_mask) | 2590 | dev->coherent_dma_mask < dma_mask) |
| 2388 | dma_mask = pdev->dev.coherent_dma_mask; | 2591 | dma_mask = dev->coherent_dma_mask; |
| 2389 | 2592 | ||
| 2390 | return dma_mask >= dma_get_required_mask(&pdev->dev); | 2593 | return dma_mask >= dma_get_required_mask(dev); |
| 2391 | } | 2594 | } |
| 2392 | 2595 | ||
| 2393 | return 1; | 2596 | return 1; |
| 2394 | } | 2597 | } |
| 2395 | 2598 | ||
| 2599 | static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) | ||
| 2600 | { | ||
| 2601 | int ret; | ||
| 2602 | |||
| 2603 | if (!iommu_should_identity_map(dev, 1)) | ||
| 2604 | return 0; | ||
| 2605 | |||
| 2606 | ret = domain_add_dev_info(si_domain, dev, | ||
| 2607 | hw ? CONTEXT_TT_PASS_THROUGH : | ||
| 2608 | CONTEXT_TT_MULTI_LEVEL); | ||
| 2609 | if (!ret) | ||
| 2610 | pr_info("IOMMU: %s identity mapping for device %s\n", | ||
| 2611 | hw ? "hardware" : "software", dev_name(dev)); | ||
| 2612 | else if (ret == -ENODEV) | ||
| 2613 | /* device not associated with an iommu */ | ||
| 2614 | ret = 0; | ||
| 2615 | |||
| 2616 | return ret; | ||
| 2617 | } | ||
| 2618 | |||
| 2619 | |||
| 2396 | static int __init iommu_prepare_static_identity_mapping(int hw) | 2620 | static int __init iommu_prepare_static_identity_mapping(int hw) |
| 2397 | { | 2621 | { |
| 2398 | struct pci_dev *pdev = NULL; | 2622 | struct pci_dev *pdev = NULL; |
| 2399 | int ret; | 2623 | struct dmar_drhd_unit *drhd; |
| 2624 | struct intel_iommu *iommu; | ||
| 2625 | struct device *dev; | ||
| 2626 | int i; | ||
| 2627 | int ret = 0; | ||
| 2400 | 2628 | ||
| 2401 | ret = si_domain_init(hw); | 2629 | ret = si_domain_init(hw); |
| 2402 | if (ret) | 2630 | if (ret) |
| 2403 | return -EFAULT; | 2631 | return -EFAULT; |
| 2404 | 2632 | ||
| 2405 | for_each_pci_dev(pdev) { | 2633 | for_each_pci_dev(pdev) { |
| 2406 | if (iommu_should_identity_map(pdev, 1)) { | 2634 | ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); |
| 2407 | ret = domain_add_dev_info(si_domain, pdev, | 2635 | if (ret) |
| 2408 | hw ? CONTEXT_TT_PASS_THROUGH : | 2636 | return ret; |
| 2409 | CONTEXT_TT_MULTI_LEVEL); | 2637 | } |
| 2410 | if (ret) { | 2638 | |
| 2411 | /* device not associated with an iommu */ | 2639 | for_each_active_iommu(iommu, drhd) |
| 2412 | if (ret == -ENODEV) | 2640 | for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { |
| 2413 | continue; | 2641 | struct acpi_device_physical_node *pn; |
| 2414 | return ret; | 2642 | struct acpi_device *adev; |
| 2643 | |||
| 2644 | if (dev->bus != &acpi_bus_type) | ||
| 2645 | continue; | ||
| 2646 | |||
| 2647 | adev= to_acpi_device(dev); | ||
| 2648 | mutex_lock(&adev->physical_node_lock); | ||
| 2649 | list_for_each_entry(pn, &adev->physical_node_list, node) { | ||
| 2650 | ret = dev_prepare_static_identity_mapping(pn->dev, hw); | ||
| 2651 | if (ret) | ||
| 2652 | break; | ||
| 2415 | } | 2653 | } |
| 2416 | pr_info("IOMMU: %s identity mapping for device %s\n", | 2654 | mutex_unlock(&adev->physical_node_lock); |
| 2417 | hw ? "hardware" : "software", pci_name(pdev)); | 2655 | if (ret) |
| 2656 | return ret; | ||
| 2418 | } | 2657 | } |
| 2419 | } | ||
| 2420 | 2658 | ||
| 2421 | return 0; | 2659 | return 0; |
| 2422 | } | 2660 | } |
| @@ -2425,7 +2663,7 @@ static int __init init_dmars(void) | |||
| 2425 | { | 2663 | { |
| 2426 | struct dmar_drhd_unit *drhd; | 2664 | struct dmar_drhd_unit *drhd; |
| 2427 | struct dmar_rmrr_unit *rmrr; | 2665 | struct dmar_rmrr_unit *rmrr; |
| 2428 | struct pci_dev *pdev; | 2666 | struct device *dev; |
| 2429 | struct intel_iommu *iommu; | 2667 | struct intel_iommu *iommu; |
| 2430 | int i, ret; | 2668 | int i, ret; |
| 2431 | 2669 | ||
| @@ -2461,7 +2699,7 @@ static int __init init_dmars(void) | |||
| 2461 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 2699 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
| 2462 | if (!deferred_flush) { | 2700 | if (!deferred_flush) { |
| 2463 | ret = -ENOMEM; | 2701 | ret = -ENOMEM; |
| 2464 | goto error; | 2702 | goto free_g_iommus; |
| 2465 | } | 2703 | } |
| 2466 | 2704 | ||
| 2467 | for_each_active_iommu(iommu, drhd) { | 2705 | for_each_active_iommu(iommu, drhd) { |
| @@ -2469,7 +2707,7 @@ static int __init init_dmars(void) | |||
| 2469 | 2707 | ||
| 2470 | ret = iommu_init_domains(iommu); | 2708 | ret = iommu_init_domains(iommu); |
| 2471 | if (ret) | 2709 | if (ret) |
| 2472 | goto error; | 2710 | goto free_iommu; |
| 2473 | 2711 | ||
| 2474 | /* | 2712 | /* |
| 2475 | * TBD: | 2713 | * TBD: |
| @@ -2479,7 +2717,7 @@ static int __init init_dmars(void) | |||
| 2479 | ret = iommu_alloc_root_entry(iommu); | 2717 | ret = iommu_alloc_root_entry(iommu); |
| 2480 | if (ret) { | 2718 | if (ret) { |
| 2481 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); | 2719 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); |
| 2482 | goto error; | 2720 | goto free_iommu; |
| 2483 | } | 2721 | } |
| 2484 | if (!ecap_pass_through(iommu->ecap)) | 2722 | if (!ecap_pass_through(iommu->ecap)) |
| 2485 | hw_pass_through = 0; | 2723 | hw_pass_through = 0; |
| @@ -2548,7 +2786,7 @@ static int __init init_dmars(void) | |||
| 2548 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); | 2786 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); |
| 2549 | if (ret) { | 2787 | if (ret) { |
| 2550 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); | 2788 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); |
| 2551 | goto error; | 2789 | goto free_iommu; |
| 2552 | } | 2790 | } |
| 2553 | } | 2791 | } |
| 2554 | /* | 2792 | /* |
| @@ -2567,15 +2805,10 @@ static int __init init_dmars(void) | |||
| 2567 | */ | 2805 | */ |
| 2568 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | 2806 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); |
| 2569 | for_each_rmrr_units(rmrr) { | 2807 | for_each_rmrr_units(rmrr) { |
| 2570 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2808 | /* some BIOS lists non-exist devices in DMAR table. */ |
| 2571 | pdev = rmrr->devices[i]; | 2809 | for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, |
| 2572 | /* | 2810 | i, dev) { |
| 2573 | * some BIOS lists non-exist devices in DMAR | 2811 | ret = iommu_prepare_rmrr_dev(rmrr, dev); |
| 2574 | * table. | ||
| 2575 | */ | ||
| 2576 | if (!pdev) | ||
| 2577 | continue; | ||
| 2578 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); | ||
| 2579 | if (ret) | 2812 | if (ret) |
| 2580 | printk(KERN_ERR | 2813 | printk(KERN_ERR |
| 2581 | "IOMMU: mapping reserved region failed\n"); | 2814 | "IOMMU: mapping reserved region failed\n"); |
| @@ -2606,7 +2839,7 @@ static int __init init_dmars(void) | |||
| 2606 | 2839 | ||
| 2607 | ret = dmar_set_interrupt(iommu); | 2840 | ret = dmar_set_interrupt(iommu); |
| 2608 | if (ret) | 2841 | if (ret) |
| 2609 | goto error; | 2842 | goto free_iommu; |
| 2610 | 2843 | ||
| 2611 | iommu_set_root_entry(iommu); | 2844 | iommu_set_root_entry(iommu); |
| 2612 | 2845 | ||
| @@ -2615,17 +2848,20 @@ static int __init init_dmars(void) | |||
| 2615 | 2848 | ||
| 2616 | ret = iommu_enable_translation(iommu); | 2849 | ret = iommu_enable_translation(iommu); |
| 2617 | if (ret) | 2850 | if (ret) |
| 2618 | goto error; | 2851 | goto free_iommu; |
| 2619 | 2852 | ||
| 2620 | iommu_disable_protect_mem_regions(iommu); | 2853 | iommu_disable_protect_mem_regions(iommu); |
| 2621 | } | 2854 | } |
| 2622 | 2855 | ||
| 2623 | return 0; | 2856 | return 0; |
| 2624 | error: | 2857 | |
| 2858 | free_iommu: | ||
| 2625 | for_each_active_iommu(iommu, drhd) | 2859 | for_each_active_iommu(iommu, drhd) |
| 2626 | free_dmar_iommu(iommu); | 2860 | free_dmar_iommu(iommu); |
| 2627 | kfree(deferred_flush); | 2861 | kfree(deferred_flush); |
| 2862 | free_g_iommus: | ||
| 2628 | kfree(g_iommus); | 2863 | kfree(g_iommus); |
| 2864 | error: | ||
| 2629 | return ret; | 2865 | return ret; |
| 2630 | } | 2866 | } |
| 2631 | 2867 | ||
| @@ -2634,7 +2870,6 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
| 2634 | struct dmar_domain *domain, | 2870 | struct dmar_domain *domain, |
| 2635 | unsigned long nrpages, uint64_t dma_mask) | 2871 | unsigned long nrpages, uint64_t dma_mask) |
| 2636 | { | 2872 | { |
| 2637 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 2638 | struct iova *iova = NULL; | 2873 | struct iova *iova = NULL; |
| 2639 | 2874 | ||
| 2640 | /* Restrict dma_mask to the width that the iommu can handle */ | 2875 | /* Restrict dma_mask to the width that the iommu can handle */ |
| @@ -2654,34 +2889,31 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
| 2654 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); | 2889 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); |
| 2655 | if (unlikely(!iova)) { | 2890 | if (unlikely(!iova)) { |
| 2656 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", | 2891 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", |
| 2657 | nrpages, pci_name(pdev)); | 2892 | nrpages, dev_name(dev)); |
| 2658 | return NULL; | 2893 | return NULL; |
| 2659 | } | 2894 | } |
| 2660 | 2895 | ||
| 2661 | return iova; | 2896 | return iova; |
| 2662 | } | 2897 | } |
| 2663 | 2898 | ||
| 2664 | static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) | 2899 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) |
| 2665 | { | 2900 | { |
| 2666 | struct dmar_domain *domain; | 2901 | struct dmar_domain *domain; |
| 2667 | int ret; | 2902 | int ret; |
| 2668 | 2903 | ||
| 2669 | domain = get_domain_for_dev(pdev, | 2904 | domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 2670 | DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
| 2671 | if (!domain) { | 2905 | if (!domain) { |
| 2672 | printk(KERN_ERR | 2906 | printk(KERN_ERR "Allocating domain for %s failed", |
| 2673 | "Allocating domain for %s failed", pci_name(pdev)); | 2907 | dev_name(dev)); |
| 2674 | return NULL; | 2908 | return NULL; |
| 2675 | } | 2909 | } |
| 2676 | 2910 | ||
| 2677 | /* make sure context mapping is ok */ | 2911 | /* make sure context mapping is ok */ |
| 2678 | if (unlikely(!domain_context_mapped(pdev))) { | 2912 | if (unlikely(!domain_context_mapped(dev))) { |
| 2679 | ret = domain_context_mapping(domain, pdev, | 2913 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); |
| 2680 | CONTEXT_TT_MULTI_LEVEL); | ||
| 2681 | if (ret) { | 2914 | if (ret) { |
| 2682 | printk(KERN_ERR | 2915 | printk(KERN_ERR "Domain context map for %s failed", |
| 2683 | "Domain context map for %s failed", | 2916 | dev_name(dev)); |
| 2684 | pci_name(pdev)); | ||
| 2685 | return NULL; | 2917 | return NULL; |
| 2686 | } | 2918 | } |
| 2687 | } | 2919 | } |
| @@ -2689,51 +2921,46 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) | |||
| 2689 | return domain; | 2921 | return domain; |
| 2690 | } | 2922 | } |
| 2691 | 2923 | ||
| 2692 | static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) | 2924 | static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) |
| 2693 | { | 2925 | { |
| 2694 | struct device_domain_info *info; | 2926 | struct device_domain_info *info; |
| 2695 | 2927 | ||
| 2696 | /* No lock here, assumes no domain exit in normal case */ | 2928 | /* No lock here, assumes no domain exit in normal case */ |
| 2697 | info = dev->dev.archdata.iommu; | 2929 | info = dev->archdata.iommu; |
| 2698 | if (likely(info)) | 2930 | if (likely(info)) |
| 2699 | return info->domain; | 2931 | return info->domain; |
| 2700 | 2932 | ||
| 2701 | return __get_valid_domain_for_dev(dev); | 2933 | return __get_valid_domain_for_dev(dev); |
| 2702 | } | 2934 | } |
| 2703 | 2935 | ||
| 2704 | static int iommu_dummy(struct pci_dev *pdev) | 2936 | static int iommu_dummy(struct device *dev) |
| 2705 | { | 2937 | { |
| 2706 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | 2938 | return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; |
| 2707 | } | 2939 | } |
| 2708 | 2940 | ||
| 2709 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | 2941 | /* Check if the dev needs to go through non-identity map and unmap process.*/ |
| 2710 | static int iommu_no_mapping(struct device *dev) | 2942 | static int iommu_no_mapping(struct device *dev) |
| 2711 | { | 2943 | { |
| 2712 | struct pci_dev *pdev; | ||
| 2713 | int found; | 2944 | int found; |
| 2714 | 2945 | ||
| 2715 | if (unlikely(!dev_is_pci(dev))) | 2946 | if (iommu_dummy(dev)) |
| 2716 | return 1; | ||
| 2717 | |||
| 2718 | pdev = to_pci_dev(dev); | ||
| 2719 | if (iommu_dummy(pdev)) | ||
| 2720 | return 1; | 2947 | return 1; |
| 2721 | 2948 | ||
| 2722 | if (!iommu_identity_mapping) | 2949 | if (!iommu_identity_mapping) |
| 2723 | return 0; | 2950 | return 0; |
| 2724 | 2951 | ||
| 2725 | found = identity_mapping(pdev); | 2952 | found = identity_mapping(dev); |
| 2726 | if (found) { | 2953 | if (found) { |
| 2727 | if (iommu_should_identity_map(pdev, 0)) | 2954 | if (iommu_should_identity_map(dev, 0)) |
| 2728 | return 1; | 2955 | return 1; |
| 2729 | else { | 2956 | else { |
| 2730 | /* | 2957 | /* |
| 2731 | * 32 bit DMA is removed from si_domain and fall back | 2958 | * 32 bit DMA is removed from si_domain and fall back |
| 2732 | * to non-identity mapping. | 2959 | * to non-identity mapping. |
| 2733 | */ | 2960 | */ |
| 2734 | domain_remove_one_dev_info(si_domain, pdev); | 2961 | domain_remove_one_dev_info(si_domain, dev); |
| 2735 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", | 2962 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", |
| 2736 | pci_name(pdev)); | 2963 | dev_name(dev)); |
| 2737 | return 0; | 2964 | return 0; |
| 2738 | } | 2965 | } |
| 2739 | } else { | 2966 | } else { |
| @@ -2741,15 +2968,15 @@ static int iommu_no_mapping(struct device *dev) | |||
| 2741 | * In case of a detached 64 bit DMA device from vm, the device | 2968 | * In case of a detached 64 bit DMA device from vm, the device |
| 2742 | * is put into si_domain for identity mapping. | 2969 | * is put into si_domain for identity mapping. |
| 2743 | */ | 2970 | */ |
| 2744 | if (iommu_should_identity_map(pdev, 0)) { | 2971 | if (iommu_should_identity_map(dev, 0)) { |
| 2745 | int ret; | 2972 | int ret; |
| 2746 | ret = domain_add_dev_info(si_domain, pdev, | 2973 | ret = domain_add_dev_info(si_domain, dev, |
| 2747 | hw_pass_through ? | 2974 | hw_pass_through ? |
| 2748 | CONTEXT_TT_PASS_THROUGH : | 2975 | CONTEXT_TT_PASS_THROUGH : |
| 2749 | CONTEXT_TT_MULTI_LEVEL); | 2976 | CONTEXT_TT_MULTI_LEVEL); |
| 2750 | if (!ret) { | 2977 | if (!ret) { |
| 2751 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2978 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
| 2752 | pci_name(pdev)); | 2979 | dev_name(dev)); |
| 2753 | return 1; | 2980 | return 1; |
| 2754 | } | 2981 | } |
| 2755 | } | 2982 | } |
| @@ -2758,10 +2985,9 @@ static int iommu_no_mapping(struct device *dev) | |||
| 2758 | return 0; | 2985 | return 0; |
| 2759 | } | 2986 | } |
| 2760 | 2987 | ||
| 2761 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2988 | static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, |
| 2762 | size_t size, int dir, u64 dma_mask) | 2989 | size_t size, int dir, u64 dma_mask) |
| 2763 | { | 2990 | { |
| 2764 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
| 2765 | struct dmar_domain *domain; | 2991 | struct dmar_domain *domain; |
| 2766 | phys_addr_t start_paddr; | 2992 | phys_addr_t start_paddr; |
| 2767 | struct iova *iova; | 2993 | struct iova *iova; |
| @@ -2772,17 +2998,17 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2772 | 2998 | ||
| 2773 | BUG_ON(dir == DMA_NONE); | 2999 | BUG_ON(dir == DMA_NONE); |
| 2774 | 3000 | ||
| 2775 | if (iommu_no_mapping(hwdev)) | 3001 | if (iommu_no_mapping(dev)) |
| 2776 | return paddr; | 3002 | return paddr; |
| 2777 | 3003 | ||
| 2778 | domain = get_valid_domain_for_dev(pdev); | 3004 | domain = get_valid_domain_for_dev(dev); |
| 2779 | if (!domain) | 3005 | if (!domain) |
| 2780 | return 0; | 3006 | return 0; |
| 2781 | 3007 | ||
| 2782 | iommu = domain_get_iommu(domain); | 3008 | iommu = domain_get_iommu(domain); |
| 2783 | size = aligned_nrpages(paddr, size); | 3009 | size = aligned_nrpages(paddr, size); |
| 2784 | 3010 | ||
| 2785 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); | 3011 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); |
| 2786 | if (!iova) | 3012 | if (!iova) |
| 2787 | goto error; | 3013 | goto error; |
| 2788 | 3014 | ||
| @@ -2808,7 +3034,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2808 | 3034 | ||
| 2809 | /* it's a non-present to present mapping. Only flush if caching mode */ | 3035 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 2810 | if (cap_caching_mode(iommu->cap)) | 3036 | if (cap_caching_mode(iommu->cap)) |
| 2811 | iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); | 3037 | iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); |
| 2812 | else | 3038 | else |
| 2813 | iommu_flush_write_buffer(iommu); | 3039 | iommu_flush_write_buffer(iommu); |
| 2814 | 3040 | ||
| @@ -2820,7 +3046,7 @@ error: | |||
| 2820 | if (iova) | 3046 | if (iova) |
| 2821 | __free_iova(&domain->iovad, iova); | 3047 | __free_iova(&domain->iovad, iova); |
| 2822 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", | 3048 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", |
| 2823 | pci_name(pdev), size, (unsigned long long)paddr, dir); | 3049 | dev_name(dev), size, (unsigned long long)paddr, dir); |
| 2824 | return 0; | 3050 | return 0; |
| 2825 | } | 3051 | } |
| 2826 | 3052 | ||
| @@ -2830,7 +3056,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, | |||
| 2830 | struct dma_attrs *attrs) | 3056 | struct dma_attrs *attrs) |
| 2831 | { | 3057 | { |
| 2832 | return __intel_map_single(dev, page_to_phys(page) + offset, size, | 3058 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
| 2833 | dir, to_pci_dev(dev)->dma_mask); | 3059 | dir, *dev->dma_mask); |
| 2834 | } | 3060 | } |
| 2835 | 3061 | ||
| 2836 | static void flush_unmaps(void) | 3062 | static void flush_unmaps(void) |
| @@ -2860,13 +3086,16 @@ static void flush_unmaps(void) | |||
| 2860 | /* On real hardware multiple invalidations are expensive */ | 3086 | /* On real hardware multiple invalidations are expensive */ |
| 2861 | if (cap_caching_mode(iommu->cap)) | 3087 | if (cap_caching_mode(iommu->cap)) |
| 2862 | iommu_flush_iotlb_psi(iommu, domain->id, | 3088 | iommu_flush_iotlb_psi(iommu, domain->id, |
| 2863 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); | 3089 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, |
| 3090 | !deferred_flush[i].freelist[j], 0); | ||
| 2864 | else { | 3091 | else { |
| 2865 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); | 3092 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); |
| 2866 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 3093 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
| 2867 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | 3094 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); |
| 2868 | } | 3095 | } |
| 2869 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); | 3096 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
| 3097 | if (deferred_flush[i].freelist[j]) | ||
| 3098 | dma_free_pagelist(deferred_flush[i].freelist[j]); | ||
| 2870 | } | 3099 | } |
| 2871 | deferred_flush[i].next = 0; | 3100 | deferred_flush[i].next = 0; |
| 2872 | } | 3101 | } |
| @@ -2883,7 +3112,7 @@ static void flush_unmaps_timeout(unsigned long data) | |||
| 2883 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3112 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
| 2884 | } | 3113 | } |
| 2885 | 3114 | ||
| 2886 | static void add_unmap(struct dmar_domain *dom, struct iova *iova) | 3115 | static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) |
| 2887 | { | 3116 | { |
| 2888 | unsigned long flags; | 3117 | unsigned long flags; |
| 2889 | int next, iommu_id; | 3118 | int next, iommu_id; |
| @@ -2899,6 +3128,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
| 2899 | next = deferred_flush[iommu_id].next; | 3128 | next = deferred_flush[iommu_id].next; |
| 2900 | deferred_flush[iommu_id].domain[next] = dom; | 3129 | deferred_flush[iommu_id].domain[next] = dom; |
| 2901 | deferred_flush[iommu_id].iova[next] = iova; | 3130 | deferred_flush[iommu_id].iova[next] = iova; |
| 3131 | deferred_flush[iommu_id].freelist[next] = freelist; | ||
| 2902 | deferred_flush[iommu_id].next++; | 3132 | deferred_flush[iommu_id].next++; |
| 2903 | 3133 | ||
| 2904 | if (!timer_on) { | 3134 | if (!timer_on) { |
| @@ -2913,16 +3143,16 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2913 | size_t size, enum dma_data_direction dir, | 3143 | size_t size, enum dma_data_direction dir, |
| 2914 | struct dma_attrs *attrs) | 3144 | struct dma_attrs *attrs) |
| 2915 | { | 3145 | { |
| 2916 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 2917 | struct dmar_domain *domain; | 3146 | struct dmar_domain *domain; |
| 2918 | unsigned long start_pfn, last_pfn; | 3147 | unsigned long start_pfn, last_pfn; |
| 2919 | struct iova *iova; | 3148 | struct iova *iova; |
| 2920 | struct intel_iommu *iommu; | 3149 | struct intel_iommu *iommu; |
| 3150 | struct page *freelist; | ||
| 2921 | 3151 | ||
| 2922 | if (iommu_no_mapping(dev)) | 3152 | if (iommu_no_mapping(dev)) |
| 2923 | return; | 3153 | return; |
| 2924 | 3154 | ||
| 2925 | domain = find_domain(pdev); | 3155 | domain = find_domain(dev); |
| 2926 | BUG_ON(!domain); | 3156 | BUG_ON(!domain); |
| 2927 | 3157 | ||
| 2928 | iommu = domain_get_iommu(domain); | 3158 | iommu = domain_get_iommu(domain); |
| @@ -2936,21 +3166,18 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2936 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | 3166 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
| 2937 | 3167 | ||
| 2938 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", | 3168 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", |
| 2939 | pci_name(pdev), start_pfn, last_pfn); | 3169 | dev_name(dev), start_pfn, last_pfn); |
| 2940 | 3170 | ||
| 2941 | /* clear the whole page */ | 3171 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
| 2942 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
| 2943 | |||
| 2944 | /* free page tables */ | ||
| 2945 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); | ||
| 2946 | 3172 | ||
| 2947 | if (intel_iommu_strict) { | 3173 | if (intel_iommu_strict) { |
| 2948 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 3174 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
| 2949 | last_pfn - start_pfn + 1, 0); | 3175 | last_pfn - start_pfn + 1, !freelist, 0); |
| 2950 | /* free iova */ | 3176 | /* free iova */ |
| 2951 | __free_iova(&domain->iovad, iova); | 3177 | __free_iova(&domain->iovad, iova); |
| 3178 | dma_free_pagelist(freelist); | ||
| 2952 | } else { | 3179 | } else { |
| 2953 | add_unmap(domain, iova); | 3180 | add_unmap(domain, iova, freelist); |
| 2954 | /* | 3181 | /* |
| 2955 | * queue up the release of the unmap to save the 1/6th of the | 3182 | * queue up the release of the unmap to save the 1/6th of the |
| 2956 | * cpu used up by the iotlb flush operation... | 3183 | * cpu used up by the iotlb flush operation... |
| @@ -2958,7 +3185,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2958 | } | 3185 | } |
| 2959 | } | 3186 | } |
| 2960 | 3187 | ||
| 2961 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, | 3188 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
| 2962 | dma_addr_t *dma_handle, gfp_t flags, | 3189 | dma_addr_t *dma_handle, gfp_t flags, |
| 2963 | struct dma_attrs *attrs) | 3190 | struct dma_attrs *attrs) |
| 2964 | { | 3191 | { |
| @@ -2968,10 +3195,10 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
| 2968 | size = PAGE_ALIGN(size); | 3195 | size = PAGE_ALIGN(size); |
| 2969 | order = get_order(size); | 3196 | order = get_order(size); |
| 2970 | 3197 | ||
| 2971 | if (!iommu_no_mapping(hwdev)) | 3198 | if (!iommu_no_mapping(dev)) |
| 2972 | flags &= ~(GFP_DMA | GFP_DMA32); | 3199 | flags &= ~(GFP_DMA | GFP_DMA32); |
| 2973 | else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { | 3200 | else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { |
| 2974 | if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) | 3201 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
| 2975 | flags |= GFP_DMA; | 3202 | flags |= GFP_DMA; |
| 2976 | else | 3203 | else |
| 2977 | flags |= GFP_DMA32; | 3204 | flags |= GFP_DMA32; |
| @@ -2982,16 +3209,16 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
| 2982 | return NULL; | 3209 | return NULL; |
| 2983 | memset(vaddr, 0, size); | 3210 | memset(vaddr, 0, size); |
| 2984 | 3211 | ||
| 2985 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, | 3212 | *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size, |
| 2986 | DMA_BIDIRECTIONAL, | 3213 | DMA_BIDIRECTIONAL, |
| 2987 | hwdev->coherent_dma_mask); | 3214 | dev->coherent_dma_mask); |
| 2988 | if (*dma_handle) | 3215 | if (*dma_handle) |
| 2989 | return vaddr; | 3216 | return vaddr; |
| 2990 | free_pages((unsigned long)vaddr, order); | 3217 | free_pages((unsigned long)vaddr, order); |
| 2991 | return NULL; | 3218 | return NULL; |
| 2992 | } | 3219 | } |
| 2993 | 3220 | ||
| 2994 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 3221 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 2995 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 3222 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
| 2996 | { | 3223 | { |
| 2997 | int order; | 3224 | int order; |
| @@ -2999,24 +3226,24 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 2999 | size = PAGE_ALIGN(size); | 3226 | size = PAGE_ALIGN(size); |
| 3000 | order = get_order(size); | 3227 | order = get_order(size); |
| 3001 | 3228 | ||
| 3002 | intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 3229 | intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); |
| 3003 | free_pages((unsigned long)vaddr, order); | 3230 | free_pages((unsigned long)vaddr, order); |
| 3004 | } | 3231 | } |
| 3005 | 3232 | ||
| 3006 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 3233 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |
| 3007 | int nelems, enum dma_data_direction dir, | 3234 | int nelems, enum dma_data_direction dir, |
| 3008 | struct dma_attrs *attrs) | 3235 | struct dma_attrs *attrs) |
| 3009 | { | 3236 | { |
| 3010 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
| 3011 | struct dmar_domain *domain; | 3237 | struct dmar_domain *domain; |
| 3012 | unsigned long start_pfn, last_pfn; | 3238 | unsigned long start_pfn, last_pfn; |
| 3013 | struct iova *iova; | 3239 | struct iova *iova; |
| 3014 | struct intel_iommu *iommu; | 3240 | struct intel_iommu *iommu; |
| 3241 | struct page *freelist; | ||
| 3015 | 3242 | ||
| 3016 | if (iommu_no_mapping(hwdev)) | 3243 | if (iommu_no_mapping(dev)) |
| 3017 | return; | 3244 | return; |
| 3018 | 3245 | ||
| 3019 | domain = find_domain(pdev); | 3246 | domain = find_domain(dev); |
| 3020 | BUG_ON(!domain); | 3247 | BUG_ON(!domain); |
| 3021 | 3248 | ||
| 3022 | iommu = domain_get_iommu(domain); | 3249 | iommu = domain_get_iommu(domain); |
| @@ -3029,19 +3256,16 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 3029 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); | 3256 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
| 3030 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | 3257 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
| 3031 | 3258 | ||
| 3032 | /* clear the whole page */ | 3259 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
| 3033 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
| 3034 | |||
| 3035 | /* free page tables */ | ||
| 3036 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); | ||
| 3037 | 3260 | ||
| 3038 | if (intel_iommu_strict) { | 3261 | if (intel_iommu_strict) { |
| 3039 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 3262 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
| 3040 | last_pfn - start_pfn + 1, 0); | 3263 | last_pfn - start_pfn + 1, !freelist, 0); |
| 3041 | /* free iova */ | 3264 | /* free iova */ |
| 3042 | __free_iova(&domain->iovad, iova); | 3265 | __free_iova(&domain->iovad, iova); |
| 3266 | dma_free_pagelist(freelist); | ||
| 3043 | } else { | 3267 | } else { |
| 3044 | add_unmap(domain, iova); | 3268 | add_unmap(domain, iova, freelist); |
| 3045 | /* | 3269 | /* |
| 3046 | * queue up the release of the unmap to save the 1/6th of the | 3270 | * queue up the release of the unmap to save the 1/6th of the |
| 3047 | * cpu used up by the iotlb flush operation... | 3271 | * cpu used up by the iotlb flush operation... |
| @@ -3063,11 +3287,10 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
| 3063 | return nelems; | 3287 | return nelems; |
| 3064 | } | 3288 | } |
| 3065 | 3289 | ||
| 3066 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | 3290 | static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, |
| 3067 | enum dma_data_direction dir, struct dma_attrs *attrs) | 3291 | enum dma_data_direction dir, struct dma_attrs *attrs) |
| 3068 | { | 3292 | { |
| 3069 | int i; | 3293 | int i; |
| 3070 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
| 3071 | struct dmar_domain *domain; | 3294 | struct dmar_domain *domain; |
| 3072 | size_t size = 0; | 3295 | size_t size = 0; |
| 3073 | int prot = 0; | 3296 | int prot = 0; |
| @@ -3078,10 +3301,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 3078 | struct intel_iommu *iommu; | 3301 | struct intel_iommu *iommu; |
| 3079 | 3302 | ||
| 3080 | BUG_ON(dir == DMA_NONE); | 3303 | BUG_ON(dir == DMA_NONE); |
| 3081 | if (iommu_no_mapping(hwdev)) | 3304 | if (iommu_no_mapping(dev)) |
| 3082 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 3305 | return intel_nontranslate_map_sg(dev, sglist, nelems, dir); |
| 3083 | 3306 | ||
| 3084 | domain = get_valid_domain_for_dev(pdev); | 3307 | domain = get_valid_domain_for_dev(dev); |
| 3085 | if (!domain) | 3308 | if (!domain) |
| 3086 | return 0; | 3309 | return 0; |
| 3087 | 3310 | ||
| @@ -3090,8 +3313,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 3090 | for_each_sg(sglist, sg, nelems, i) | 3313 | for_each_sg(sglist, sg, nelems, i) |
| 3091 | size += aligned_nrpages(sg->offset, sg->length); | 3314 | size += aligned_nrpages(sg->offset, sg->length); |
| 3092 | 3315 | ||
| 3093 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), | 3316 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), |
| 3094 | pdev->dma_mask); | 3317 | *dev->dma_mask); |
| 3095 | if (!iova) { | 3318 | if (!iova) { |
| 3096 | sglist->dma_length = 0; | 3319 | sglist->dma_length = 0; |
| 3097 | return 0; | 3320 | return 0; |
| @@ -3124,7 +3347,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 3124 | 3347 | ||
| 3125 | /* it's a non-present to present mapping. Only flush if caching mode */ | 3348 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 3126 | if (cap_caching_mode(iommu->cap)) | 3349 | if (cap_caching_mode(iommu->cap)) |
| 3127 | iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); | 3350 | iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); |
| 3128 | else | 3351 | else |
| 3129 | iommu_flush_write_buffer(iommu); | 3352 | iommu_flush_write_buffer(iommu); |
| 3130 | 3353 | ||
| @@ -3259,29 +3482,28 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quir | |||
| 3259 | static void __init init_no_remapping_devices(void) | 3482 | static void __init init_no_remapping_devices(void) |
| 3260 | { | 3483 | { |
| 3261 | struct dmar_drhd_unit *drhd; | 3484 | struct dmar_drhd_unit *drhd; |
| 3485 | struct device *dev; | ||
| 3486 | int i; | ||
| 3262 | 3487 | ||
| 3263 | for_each_drhd_unit(drhd) { | 3488 | for_each_drhd_unit(drhd) { |
| 3264 | if (!drhd->include_all) { | 3489 | if (!drhd->include_all) { |
| 3265 | int i; | 3490 | for_each_active_dev_scope(drhd->devices, |
| 3266 | for (i = 0; i < drhd->devices_cnt; i++) | 3491 | drhd->devices_cnt, i, dev) |
| 3267 | if (drhd->devices[i] != NULL) | 3492 | break; |
| 3268 | break; | 3493 | /* ignore DMAR unit if no devices exist */ |
| 3269 | /* ignore DMAR unit if no pci devices exist */ | ||
| 3270 | if (i == drhd->devices_cnt) | 3494 | if (i == drhd->devices_cnt) |
| 3271 | drhd->ignored = 1; | 3495 | drhd->ignored = 1; |
| 3272 | } | 3496 | } |
| 3273 | } | 3497 | } |
| 3274 | 3498 | ||
| 3275 | for_each_active_drhd_unit(drhd) { | 3499 | for_each_active_drhd_unit(drhd) { |
| 3276 | int i; | ||
| 3277 | if (drhd->include_all) | 3500 | if (drhd->include_all) |
| 3278 | continue; | 3501 | continue; |
| 3279 | 3502 | ||
| 3280 | for (i = 0; i < drhd->devices_cnt; i++) | 3503 | for_each_active_dev_scope(drhd->devices, |
| 3281 | if (drhd->devices[i] && | 3504 | drhd->devices_cnt, i, dev) |
| 3282 | !IS_GFX_DEVICE(drhd->devices[i])) | 3505 | if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev))) |
| 3283 | break; | 3506 | break; |
| 3284 | |||
| 3285 | if (i < drhd->devices_cnt) | 3507 | if (i < drhd->devices_cnt) |
| 3286 | continue; | 3508 | continue; |
| 3287 | 3509 | ||
| @@ -3291,11 +3513,9 @@ static void __init init_no_remapping_devices(void) | |||
| 3291 | intel_iommu_gfx_mapped = 1; | 3513 | intel_iommu_gfx_mapped = 1; |
| 3292 | } else { | 3514 | } else { |
| 3293 | drhd->ignored = 1; | 3515 | drhd->ignored = 1; |
| 3294 | for (i = 0; i < drhd->devices_cnt; i++) { | 3516 | for_each_active_dev_scope(drhd->devices, |
| 3295 | if (!drhd->devices[i]) | 3517 | drhd->devices_cnt, i, dev) |
| 3296 | continue; | 3518 | dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; |
| 3297 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
| 3298 | } | ||
| 3299 | } | 3519 | } |
| 3300 | } | 3520 | } |
| 3301 | } | 3521 | } |
| @@ -3438,13 +3658,6 @@ static void __init init_iommu_pm_ops(void) | |||
| 3438 | static inline void init_iommu_pm_ops(void) {} | 3658 | static inline void init_iommu_pm_ops(void) {} |
| 3439 | #endif /* CONFIG_PM */ | 3659 | #endif /* CONFIG_PM */ |
| 3440 | 3660 | ||
| 3441 | LIST_HEAD(dmar_rmrr_units); | ||
| 3442 | |||
| 3443 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
| 3444 | { | ||
| 3445 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
| 3446 | } | ||
| 3447 | |||
| 3448 | 3661 | ||
| 3449 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 3662 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
| 3450 | { | 3663 | { |
| @@ -3459,25 +3672,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |||
| 3459 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 3672 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
| 3460 | rmrru->base_address = rmrr->base_address; | 3673 | rmrru->base_address = rmrr->base_address; |
| 3461 | rmrru->end_address = rmrr->end_address; | 3674 | rmrru->end_address = rmrr->end_address; |
| 3675 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), | ||
| 3676 | ((void *)rmrr) + rmrr->header.length, | ||
| 3677 | &rmrru->devices_cnt); | ||
| 3678 | if (rmrru->devices_cnt && rmrru->devices == NULL) { | ||
| 3679 | kfree(rmrru); | ||
| 3680 | return -ENOMEM; | ||
| 3681 | } | ||
| 3462 | 3682 | ||
| 3463 | dmar_register_rmrr_unit(rmrru); | 3683 | list_add(&rmrru->list, &dmar_rmrr_units); |
| 3464 | return 0; | ||
| 3465 | } | ||
| 3466 | |||
| 3467 | static int __init | ||
| 3468 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
| 3469 | { | ||
| 3470 | struct acpi_dmar_reserved_memory *rmrr; | ||
| 3471 | 3684 | ||
| 3472 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | 3685 | return 0; |
| 3473 | return dmar_parse_dev_scope((void *)(rmrr + 1), | ||
| 3474 | ((void *)rmrr) + rmrr->header.length, | ||
| 3475 | &rmrru->devices_cnt, &rmrru->devices, | ||
| 3476 | rmrr->segment); | ||
| 3477 | } | 3686 | } |
| 3478 | 3687 | ||
| 3479 | static LIST_HEAD(dmar_atsr_units); | ||
| 3480 | |||
| 3481 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | 3688 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) |
| 3482 | { | 3689 | { |
| 3483 | struct acpi_dmar_atsr *atsr; | 3690 | struct acpi_dmar_atsr *atsr; |
| @@ -3490,26 +3697,21 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | |||
| 3490 | 3697 | ||
| 3491 | atsru->hdr = hdr; | 3698 | atsru->hdr = hdr; |
| 3492 | atsru->include_all = atsr->flags & 0x1; | 3699 | atsru->include_all = atsr->flags & 0x1; |
| 3700 | if (!atsru->include_all) { | ||
| 3701 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), | ||
| 3702 | (void *)atsr + atsr->header.length, | ||
| 3703 | &atsru->devices_cnt); | ||
| 3704 | if (atsru->devices_cnt && atsru->devices == NULL) { | ||
| 3705 | kfree(atsru); | ||
| 3706 | return -ENOMEM; | ||
| 3707 | } | ||
| 3708 | } | ||
| 3493 | 3709 | ||
| 3494 | list_add(&atsru->list, &dmar_atsr_units); | 3710 | list_add_rcu(&atsru->list, &dmar_atsr_units); |
| 3495 | 3711 | ||
| 3496 | return 0; | 3712 | return 0; |
| 3497 | } | 3713 | } |
| 3498 | 3714 | ||
| 3499 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | ||
| 3500 | { | ||
| 3501 | struct acpi_dmar_atsr *atsr; | ||
| 3502 | |||
| 3503 | if (atsru->include_all) | ||
| 3504 | return 0; | ||
| 3505 | |||
| 3506 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 3507 | return dmar_parse_dev_scope((void *)(atsr + 1), | ||
| 3508 | (void *)atsr + atsr->header.length, | ||
| 3509 | &atsru->devices_cnt, &atsru->devices, | ||
| 3510 | atsr->segment); | ||
| 3511 | } | ||
| 3512 | |||
| 3513 | static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) | 3715 | static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) |
| 3514 | { | 3716 | { |
| 3515 | dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); | 3717 | dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); |
| @@ -3535,62 +3737,97 @@ static void intel_iommu_free_dmars(void) | |||
| 3535 | 3737 | ||
| 3536 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | 3738 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) |
| 3537 | { | 3739 | { |
| 3538 | int i; | 3740 | int i, ret = 1; |
| 3539 | struct pci_bus *bus; | 3741 | struct pci_bus *bus; |
| 3742 | struct pci_dev *bridge = NULL; | ||
| 3743 | struct device *tmp; | ||
| 3540 | struct acpi_dmar_atsr *atsr; | 3744 | struct acpi_dmar_atsr *atsr; |
| 3541 | struct dmar_atsr_unit *atsru; | 3745 | struct dmar_atsr_unit *atsru; |
| 3542 | 3746 | ||
| 3543 | dev = pci_physfn(dev); | 3747 | dev = pci_physfn(dev); |
| 3544 | |||
| 3545 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | ||
| 3546 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 3547 | if (atsr->segment == pci_domain_nr(dev->bus)) | ||
| 3548 | goto found; | ||
| 3549 | } | ||
| 3550 | |||
| 3551 | return 0; | ||
| 3552 | |||
| 3553 | found: | ||
| 3554 | for (bus = dev->bus; bus; bus = bus->parent) { | 3748 | for (bus = dev->bus; bus; bus = bus->parent) { |
| 3555 | struct pci_dev *bridge = bus->self; | 3749 | bridge = bus->self; |
| 3556 | |||
| 3557 | if (!bridge || !pci_is_pcie(bridge) || | 3750 | if (!bridge || !pci_is_pcie(bridge) || |
| 3558 | pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) | 3751 | pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) |
| 3559 | return 0; | 3752 | return 0; |
| 3560 | 3753 | if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) | |
| 3561 | if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { | ||
| 3562 | for (i = 0; i < atsru->devices_cnt; i++) | ||
| 3563 | if (atsru->devices[i] == bridge) | ||
| 3564 | return 1; | ||
| 3565 | break; | 3754 | break; |
| 3566 | } | ||
| 3567 | } | 3755 | } |
| 3756 | if (!bridge) | ||
| 3757 | return 0; | ||
| 3568 | 3758 | ||
| 3569 | if (atsru->include_all) | 3759 | rcu_read_lock(); |
| 3570 | return 1; | 3760 | list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { |
| 3761 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 3762 | if (atsr->segment != pci_domain_nr(dev->bus)) | ||
| 3763 | continue; | ||
| 3571 | 3764 | ||
| 3572 | return 0; | 3765 | for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) |
| 3766 | if (tmp == &bridge->dev) | ||
| 3767 | goto out; | ||
| 3768 | |||
| 3769 | if (atsru->include_all) | ||
| 3770 | goto out; | ||
| 3771 | } | ||
| 3772 | ret = 0; | ||
| 3773 | out: | ||
| 3774 | rcu_read_unlock(); | ||
| 3775 | |||
| 3776 | return ret; | ||
| 3573 | } | 3777 | } |
| 3574 | 3778 | ||
| 3575 | int __init dmar_parse_rmrr_atsr_dev(void) | 3779 | int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
| 3576 | { | 3780 | { |
| 3577 | struct dmar_rmrr_unit *rmrr; | ||
| 3578 | struct dmar_atsr_unit *atsr; | ||
| 3579 | int ret = 0; | 3781 | int ret = 0; |
| 3782 | struct dmar_rmrr_unit *rmrru; | ||
| 3783 | struct dmar_atsr_unit *atsru; | ||
| 3784 | struct acpi_dmar_atsr *atsr; | ||
| 3785 | struct acpi_dmar_reserved_memory *rmrr; | ||
| 3580 | 3786 | ||
| 3581 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) { | 3787 | if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING) |
| 3582 | ret = rmrr_parse_dev(rmrr); | 3788 | return 0; |
| 3583 | if (ret) | 3789 | |
| 3584 | return ret; | 3790 | list_for_each_entry(rmrru, &dmar_rmrr_units, list) { |
| 3791 | rmrr = container_of(rmrru->hdr, | ||
| 3792 | struct acpi_dmar_reserved_memory, header); | ||
| 3793 | if (info->event == BUS_NOTIFY_ADD_DEVICE) { | ||
| 3794 | ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), | ||
| 3795 | ((void *)rmrr) + rmrr->header.length, | ||
| 3796 | rmrr->segment, rmrru->devices, | ||
| 3797 | rmrru->devices_cnt); | ||
| 3798 | if (ret > 0) | ||
| 3799 | break; | ||
| 3800 | else if(ret < 0) | ||
| 3801 | return ret; | ||
| 3802 | } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { | ||
| 3803 | if (dmar_remove_dev_scope(info, rmrr->segment, | ||
| 3804 | rmrru->devices, rmrru->devices_cnt)) | ||
| 3805 | break; | ||
| 3806 | } | ||
| 3585 | } | 3807 | } |
| 3586 | 3808 | ||
| 3587 | list_for_each_entry(atsr, &dmar_atsr_units, list) { | 3809 | list_for_each_entry(atsru, &dmar_atsr_units, list) { |
| 3588 | ret = atsr_parse_dev(atsr); | 3810 | if (atsru->include_all) |
| 3589 | if (ret) | 3811 | continue; |
| 3590 | return ret; | 3812 | |
| 3813 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 3814 | if (info->event == BUS_NOTIFY_ADD_DEVICE) { | ||
| 3815 | ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), | ||
| 3816 | (void *)atsr + atsr->header.length, | ||
| 3817 | atsr->segment, atsru->devices, | ||
| 3818 | atsru->devices_cnt); | ||
| 3819 | if (ret > 0) | ||
| 3820 | break; | ||
| 3821 | else if(ret < 0) | ||
| 3822 | return ret; | ||
| 3823 | } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { | ||
| 3824 | if (dmar_remove_dev_scope(info, atsr->segment, | ||
| 3825 | atsru->devices, atsru->devices_cnt)) | ||
| 3826 | break; | ||
| 3827 | } | ||
| 3591 | } | 3828 | } |
| 3592 | 3829 | ||
| 3593 | return ret; | 3830 | return 0; |
| 3594 | } | 3831 | } |
| 3595 | 3832 | ||
| 3596 | /* | 3833 | /* |
| @@ -3603,24 +3840,26 @@ static int device_notifier(struct notifier_block *nb, | |||
| 3603 | unsigned long action, void *data) | 3840 | unsigned long action, void *data) |
| 3604 | { | 3841 | { |
| 3605 | struct device *dev = data; | 3842 | struct device *dev = data; |
| 3606 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 3607 | struct dmar_domain *domain; | 3843 | struct dmar_domain *domain; |
| 3608 | 3844 | ||
| 3609 | if (iommu_no_mapping(dev)) | 3845 | if (iommu_dummy(dev)) |
| 3610 | return 0; | 3846 | return 0; |
| 3611 | 3847 | ||
| 3612 | domain = find_domain(pdev); | 3848 | if (action != BUS_NOTIFY_UNBOUND_DRIVER && |
| 3613 | if (!domain) | 3849 | action != BUS_NOTIFY_DEL_DEVICE) |
| 3614 | return 0; | 3850 | return 0; |
| 3615 | 3851 | ||
| 3616 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { | 3852 | domain = find_domain(dev); |
| 3617 | domain_remove_one_dev_info(domain, pdev); | 3853 | if (!domain) |
| 3854 | return 0; | ||
| 3618 | 3855 | ||
| 3619 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | 3856 | down_read(&dmar_global_lock); |
| 3620 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && | 3857 | domain_remove_one_dev_info(domain, dev); |
| 3621 | list_empty(&domain->devices)) | 3858 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && |
| 3622 | domain_exit(domain); | 3859 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && |
| 3623 | } | 3860 | list_empty(&domain->devices)) |
| 3861 | domain_exit(domain); | ||
| 3862 | up_read(&dmar_global_lock); | ||
| 3624 | 3863 | ||
| 3625 | return 0; | 3864 | return 0; |
| 3626 | } | 3865 | } |
| @@ -3629,6 +3868,75 @@ static struct notifier_block device_nb = { | |||
| 3629 | .notifier_call = device_notifier, | 3868 | .notifier_call = device_notifier, |
| 3630 | }; | 3869 | }; |
| 3631 | 3870 | ||
| 3871 | static int intel_iommu_memory_notifier(struct notifier_block *nb, | ||
| 3872 | unsigned long val, void *v) | ||
| 3873 | { | ||
| 3874 | struct memory_notify *mhp = v; | ||
| 3875 | unsigned long long start, end; | ||
| 3876 | unsigned long start_vpfn, last_vpfn; | ||
| 3877 | |||
| 3878 | switch (val) { | ||
| 3879 | case MEM_GOING_ONLINE: | ||
| 3880 | start = mhp->start_pfn << PAGE_SHIFT; | ||
| 3881 | end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; | ||
| 3882 | if (iommu_domain_identity_map(si_domain, start, end)) { | ||
| 3883 | pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", | ||
| 3884 | start, end); | ||
| 3885 | return NOTIFY_BAD; | ||
| 3886 | } | ||
| 3887 | break; | ||
| 3888 | |||
| 3889 | case MEM_OFFLINE: | ||
| 3890 | case MEM_CANCEL_ONLINE: | ||
| 3891 | start_vpfn = mm_to_dma_pfn(mhp->start_pfn); | ||
| 3892 | last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); | ||
| 3893 | while (start_vpfn <= last_vpfn) { | ||
| 3894 | struct iova *iova; | ||
| 3895 | struct dmar_drhd_unit *drhd; | ||
| 3896 | struct intel_iommu *iommu; | ||
| 3897 | struct page *freelist; | ||
| 3898 | |||
| 3899 | iova = find_iova(&si_domain->iovad, start_vpfn); | ||
| 3900 | if (iova == NULL) { | ||
| 3901 | pr_debug("dmar: failed get IOVA for PFN %lx\n", | ||
| 3902 | start_vpfn); | ||
| 3903 | break; | ||
| 3904 | } | ||
| 3905 | |||
| 3906 | iova = split_and_remove_iova(&si_domain->iovad, iova, | ||
| 3907 | start_vpfn, last_vpfn); | ||
| 3908 | if (iova == NULL) { | ||
| 3909 | pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", | ||
| 3910 | start_vpfn, last_vpfn); | ||
| 3911 | return NOTIFY_BAD; | ||
| 3912 | } | ||
| 3913 | |||
| 3914 | freelist = domain_unmap(si_domain, iova->pfn_lo, | ||
| 3915 | iova->pfn_hi); | ||
| 3916 | |||
| 3917 | rcu_read_lock(); | ||
| 3918 | for_each_active_iommu(iommu, drhd) | ||
| 3919 | iommu_flush_iotlb_psi(iommu, si_domain->id, | ||
| 3920 | iova->pfn_lo, | ||
| 3921 | iova->pfn_hi - iova->pfn_lo + 1, | ||
| 3922 | !freelist, 0); | ||
| 3923 | rcu_read_unlock(); | ||
| 3924 | dma_free_pagelist(freelist); | ||
| 3925 | |||
| 3926 | start_vpfn = iova->pfn_hi + 1; | ||
| 3927 | free_iova_mem(iova); | ||
| 3928 | } | ||
| 3929 | break; | ||
| 3930 | } | ||
| 3931 | |||
| 3932 | return NOTIFY_OK; | ||
| 3933 | } | ||
| 3934 | |||
| 3935 | static struct notifier_block intel_iommu_memory_nb = { | ||
| 3936 | .notifier_call = intel_iommu_memory_notifier, | ||
| 3937 | .priority = 0 | ||
| 3938 | }; | ||
| 3939 | |||
| 3632 | int __init intel_iommu_init(void) | 3940 | int __init intel_iommu_init(void) |
| 3633 | { | 3941 | { |
| 3634 | int ret = -ENODEV; | 3942 | int ret = -ENODEV; |
| @@ -3638,6 +3946,13 @@ int __init intel_iommu_init(void) | |||
| 3638 | /* VT-d is required for a TXT/tboot launch, so enforce that */ | 3946 | /* VT-d is required for a TXT/tboot launch, so enforce that */ |
| 3639 | force_on = tboot_force_iommu(); | 3947 | force_on = tboot_force_iommu(); |
| 3640 | 3948 | ||
| 3949 | if (iommu_init_mempool()) { | ||
| 3950 | if (force_on) | ||
| 3951 | panic("tboot: Failed to initialize iommu memory\n"); | ||
| 3952 | return -ENOMEM; | ||
| 3953 | } | ||
| 3954 | |||
| 3955 | down_write(&dmar_global_lock); | ||
| 3641 | if (dmar_table_init()) { | 3956 | if (dmar_table_init()) { |
| 3642 | if (force_on) | 3957 | if (force_on) |
| 3643 | panic("tboot: Failed to initialize DMAR table\n"); | 3958 | panic("tboot: Failed to initialize DMAR table\n"); |
| @@ -3660,12 +3975,6 @@ int __init intel_iommu_init(void) | |||
| 3660 | if (no_iommu || dmar_disabled) | 3975 | if (no_iommu || dmar_disabled) |
| 3661 | goto out_free_dmar; | 3976 | goto out_free_dmar; |
| 3662 | 3977 | ||
| 3663 | if (iommu_init_mempool()) { | ||
| 3664 | if (force_on) | ||
| 3665 | panic("tboot: Failed to initialize iommu memory\n"); | ||
| 3666 | goto out_free_dmar; | ||
| 3667 | } | ||
| 3668 | |||
| 3669 | if (list_empty(&dmar_rmrr_units)) | 3978 | if (list_empty(&dmar_rmrr_units)) |
| 3670 | printk(KERN_INFO "DMAR: No RMRR found\n"); | 3979 | printk(KERN_INFO "DMAR: No RMRR found\n"); |
| 3671 | 3980 | ||
| @@ -3675,7 +3984,7 @@ int __init intel_iommu_init(void) | |||
| 3675 | if (dmar_init_reserved_ranges()) { | 3984 | if (dmar_init_reserved_ranges()) { |
| 3676 | if (force_on) | 3985 | if (force_on) |
| 3677 | panic("tboot: Failed to reserve iommu ranges\n"); | 3986 | panic("tboot: Failed to reserve iommu ranges\n"); |
| 3678 | goto out_free_mempool; | 3987 | goto out_free_reserved_range; |
| 3679 | } | 3988 | } |
| 3680 | 3989 | ||
| 3681 | init_no_remapping_devices(); | 3990 | init_no_remapping_devices(); |
| @@ -3687,6 +3996,7 @@ int __init intel_iommu_init(void) | |||
| 3687 | printk(KERN_ERR "IOMMU: dmar init failed\n"); | 3996 | printk(KERN_ERR "IOMMU: dmar init failed\n"); |
| 3688 | goto out_free_reserved_range; | 3997 | goto out_free_reserved_range; |
| 3689 | } | 3998 | } |
| 3999 | up_write(&dmar_global_lock); | ||
| 3690 | printk(KERN_INFO | 4000 | printk(KERN_INFO |
| 3691 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); | 4001 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
| 3692 | 4002 | ||
| @@ -3699,8 +4009,9 @@ int __init intel_iommu_init(void) | |||
| 3699 | init_iommu_pm_ops(); | 4009 | init_iommu_pm_ops(); |
| 3700 | 4010 | ||
| 3701 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); | 4011 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
| 3702 | |||
| 3703 | bus_register_notifier(&pci_bus_type, &device_nb); | 4012 | bus_register_notifier(&pci_bus_type, &device_nb); |
| 4013 | if (si_domain && !hw_pass_through) | ||
| 4014 | register_memory_notifier(&intel_iommu_memory_nb); | ||
| 3704 | 4015 | ||
| 3705 | intel_iommu_enabled = 1; | 4016 | intel_iommu_enabled = 1; |
| 3706 | 4017 | ||
| @@ -3708,21 +4019,23 @@ int __init intel_iommu_init(void) | |||
| 3708 | 4019 | ||
| 3709 | out_free_reserved_range: | 4020 | out_free_reserved_range: |
| 3710 | put_iova_domain(&reserved_iova_list); | 4021 | put_iova_domain(&reserved_iova_list); |
| 3711 | out_free_mempool: | ||
| 3712 | iommu_exit_mempool(); | ||
| 3713 | out_free_dmar: | 4022 | out_free_dmar: |
| 3714 | intel_iommu_free_dmars(); | 4023 | intel_iommu_free_dmars(); |
| 4024 | up_write(&dmar_global_lock); | ||
| 4025 | iommu_exit_mempool(); | ||
| 3715 | return ret; | 4026 | return ret; |
| 3716 | } | 4027 | } |
| 3717 | 4028 | ||
| 3718 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 4029 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
| 3719 | struct pci_dev *pdev) | 4030 | struct device *dev) |
| 3720 | { | 4031 | { |
| 3721 | struct pci_dev *tmp, *parent; | 4032 | struct pci_dev *tmp, *parent, *pdev; |
| 3722 | 4033 | ||
| 3723 | if (!iommu || !pdev) | 4034 | if (!iommu || !dev || !dev_is_pci(dev)) |
| 3724 | return; | 4035 | return; |
| 3725 | 4036 | ||
| 4037 | pdev = to_pci_dev(dev); | ||
| 4038 | |||
| 3726 | /* dependent device detach */ | 4039 | /* dependent device detach */ |
| 3727 | tmp = pci_find_upstream_pcie_bridge(pdev); | 4040 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 3728 | /* Secondary interface's bus number and devfn 0 */ | 4041 | /* Secondary interface's bus number and devfn 0 */ |
| @@ -3743,29 +4056,28 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
| 3743 | } | 4056 | } |
| 3744 | 4057 | ||
| 3745 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | 4058 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
| 3746 | struct pci_dev *pdev) | 4059 | struct device *dev) |
| 3747 | { | 4060 | { |
| 3748 | struct device_domain_info *info, *tmp; | 4061 | struct device_domain_info *info, *tmp; |
| 3749 | struct intel_iommu *iommu; | 4062 | struct intel_iommu *iommu; |
| 3750 | unsigned long flags; | 4063 | unsigned long flags; |
| 3751 | int found = 0; | 4064 | int found = 0; |
| 4065 | u8 bus, devfn; | ||
| 3752 | 4066 | ||
| 3753 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 4067 | iommu = device_to_iommu(dev, &bus, &devfn); |
| 3754 | pdev->devfn); | ||
| 3755 | if (!iommu) | 4068 | if (!iommu) |
| 3756 | return; | 4069 | return; |
| 3757 | 4070 | ||
| 3758 | spin_lock_irqsave(&device_domain_lock, flags); | 4071 | spin_lock_irqsave(&device_domain_lock, flags); |
| 3759 | list_for_each_entry_safe(info, tmp, &domain->devices, link) { | 4072 | list_for_each_entry_safe(info, tmp, &domain->devices, link) { |
| 3760 | if (info->segment == pci_domain_nr(pdev->bus) && | 4073 | if (info->iommu == iommu && info->bus == bus && |
| 3761 | info->bus == pdev->bus->number && | 4074 | info->devfn == devfn) { |
| 3762 | info->devfn == pdev->devfn) { | ||
| 3763 | unlink_domain_info(info); | 4075 | unlink_domain_info(info); |
| 3764 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4076 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 3765 | 4077 | ||
| 3766 | iommu_disable_dev_iotlb(info); | 4078 | iommu_disable_dev_iotlb(info); |
| 3767 | iommu_detach_dev(iommu, info->bus, info->devfn); | 4079 | iommu_detach_dev(iommu, info->bus, info->devfn); |
| 3768 | iommu_detach_dependent_devices(iommu, pdev); | 4080 | iommu_detach_dependent_devices(iommu, dev); |
| 3769 | free_devinfo_mem(info); | 4081 | free_devinfo_mem(info); |
| 3770 | 4082 | ||
| 3771 | spin_lock_irqsave(&device_domain_lock, flags); | 4083 | spin_lock_irqsave(&device_domain_lock, flags); |
| @@ -3780,8 +4092,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 3780 | * owned by this domain, clear this iommu in iommu_bmp | 4092 | * owned by this domain, clear this iommu in iommu_bmp |
| 3781 | * update iommu count and coherency | 4093 | * update iommu count and coherency |
| 3782 | */ | 4094 | */ |
| 3783 | if (iommu == device_to_iommu(info->segment, info->bus, | 4095 | if (info->iommu == iommu) |
| 3784 | info->devfn)) | ||
| 3785 | found = 1; | 4096 | found = 1; |
| 3786 | } | 4097 | } |
| 3787 | 4098 | ||
| @@ -3805,67 +4116,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 3805 | } | 4116 | } |
| 3806 | } | 4117 | } |
| 3807 | 4118 | ||
| 3808 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | ||
| 3809 | { | ||
| 3810 | struct device_domain_info *info; | ||
| 3811 | struct intel_iommu *iommu; | ||
| 3812 | unsigned long flags1, flags2; | ||
| 3813 | |||
| 3814 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
| 3815 | while (!list_empty(&domain->devices)) { | ||
| 3816 | info = list_entry(domain->devices.next, | ||
| 3817 | struct device_domain_info, link); | ||
| 3818 | unlink_domain_info(info); | ||
| 3819 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
| 3820 | |||
| 3821 | iommu_disable_dev_iotlb(info); | ||
| 3822 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); | ||
| 3823 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
| 3824 | iommu_detach_dependent_devices(iommu, info->dev); | ||
| 3825 | |||
| 3826 | /* clear this iommu in iommu_bmp, update iommu count | ||
| 3827 | * and capabilities | ||
| 3828 | */ | ||
| 3829 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
| 3830 | if (test_and_clear_bit(iommu->seq_id, | ||
| 3831 | domain->iommu_bmp)) { | ||
| 3832 | domain->iommu_count--; | ||
| 3833 | domain_update_iommu_cap(domain); | ||
| 3834 | } | ||
| 3835 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
| 3836 | |||
| 3837 | free_devinfo_mem(info); | ||
| 3838 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
| 3839 | } | ||
| 3840 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
| 3841 | } | ||
| 3842 | |||
| 3843 | /* domain id for virtual machine, it won't be set in context */ | ||
| 3844 | static atomic_t vm_domid = ATOMIC_INIT(0); | ||
| 3845 | |||
| 3846 | static struct dmar_domain *iommu_alloc_vm_domain(void) | ||
| 3847 | { | ||
| 3848 | struct dmar_domain *domain; | ||
| 3849 | |||
| 3850 | domain = alloc_domain_mem(); | ||
| 3851 | if (!domain) | ||
| 3852 | return NULL; | ||
| 3853 | |||
| 3854 | domain->id = atomic_inc_return(&vm_domid); | ||
| 3855 | domain->nid = -1; | ||
| 3856 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); | ||
| 3857 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
| 3858 | |||
| 3859 | return domain; | ||
| 3860 | } | ||
| 3861 | |||
| 3862 | static int md_domain_init(struct dmar_domain *domain, int guest_width) | 4119 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
| 3863 | { | 4120 | { |
| 3864 | int adjust_width; | 4121 | int adjust_width; |
| 3865 | 4122 | ||
| 3866 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 4123 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
| 3867 | spin_lock_init(&domain->iommu_lock); | ||
| 3868 | |||
| 3869 | domain_reserve_special_ranges(domain); | 4124 | domain_reserve_special_ranges(domain); |
| 3870 | 4125 | ||
| 3871 | /* calculate AGAW */ | 4126 | /* calculate AGAW */ |
| @@ -3873,9 +4128,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
| 3873 | adjust_width = guestwidth_to_adjustwidth(guest_width); | 4128 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
| 3874 | domain->agaw = width_to_agaw(adjust_width); | 4129 | domain->agaw = width_to_agaw(adjust_width); |
| 3875 | 4130 | ||
| 3876 | INIT_LIST_HEAD(&domain->devices); | ||
| 3877 | |||
| 3878 | domain->iommu_count = 0; | ||
| 3879 | domain->iommu_coherency = 0; | 4131 | domain->iommu_coherency = 0; |
| 3880 | domain->iommu_snooping = 0; | 4132 | domain->iommu_snooping = 0; |
| 3881 | domain->iommu_superpage = 0; | 4133 | domain->iommu_superpage = 0; |
| @@ -3890,53 +4142,11 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
| 3890 | return 0; | 4142 | return 0; |
| 3891 | } | 4143 | } |
| 3892 | 4144 | ||
| 3893 | static void iommu_free_vm_domain(struct dmar_domain *domain) | ||
| 3894 | { | ||
| 3895 | unsigned long flags; | ||
| 3896 | struct dmar_drhd_unit *drhd; | ||
| 3897 | struct intel_iommu *iommu; | ||
| 3898 | unsigned long i; | ||
| 3899 | unsigned long ndomains; | ||
| 3900 | |||
| 3901 | for_each_active_iommu(iommu, drhd) { | ||
| 3902 | ndomains = cap_ndoms(iommu->cap); | ||
| 3903 | for_each_set_bit(i, iommu->domain_ids, ndomains) { | ||
| 3904 | if (iommu->domains[i] == domain) { | ||
| 3905 | spin_lock_irqsave(&iommu->lock, flags); | ||
| 3906 | clear_bit(i, iommu->domain_ids); | ||
| 3907 | iommu->domains[i] = NULL; | ||
| 3908 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
| 3909 | break; | ||
| 3910 | } | ||
| 3911 | } | ||
| 3912 | } | ||
| 3913 | } | ||
| 3914 | |||
| 3915 | static void vm_domain_exit(struct dmar_domain *domain) | ||
| 3916 | { | ||
| 3917 | /* Domain 0 is reserved, so dont process it */ | ||
| 3918 | if (!domain) | ||
| 3919 | return; | ||
| 3920 | |||
| 3921 | vm_domain_remove_all_dev_info(domain); | ||
| 3922 | /* destroy iovas */ | ||
| 3923 | put_iova_domain(&domain->iovad); | ||
| 3924 | |||
| 3925 | /* clear ptes */ | ||
| 3926 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
| 3927 | |||
| 3928 | /* free page tables */ | ||
| 3929 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
| 3930 | |||
| 3931 | iommu_free_vm_domain(domain); | ||
| 3932 | free_domain_mem(domain); | ||
| 3933 | } | ||
| 3934 | |||
| 3935 | static int intel_iommu_domain_init(struct iommu_domain *domain) | 4145 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
| 3936 | { | 4146 | { |
| 3937 | struct dmar_domain *dmar_domain; | 4147 | struct dmar_domain *dmar_domain; |
| 3938 | 4148 | ||
| 3939 | dmar_domain = iommu_alloc_vm_domain(); | 4149 | dmar_domain = alloc_domain(true); |
| 3940 | if (!dmar_domain) { | 4150 | if (!dmar_domain) { |
| 3941 | printk(KERN_ERR | 4151 | printk(KERN_ERR |
| 3942 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 4152 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
| @@ -3945,7 +4155,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
| 3945 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 4155 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
| 3946 | printk(KERN_ERR | 4156 | printk(KERN_ERR |
| 3947 | "intel_iommu_domain_init() failed\n"); | 4157 | "intel_iommu_domain_init() failed\n"); |
| 3948 | vm_domain_exit(dmar_domain); | 4158 | domain_exit(dmar_domain); |
| 3949 | return -ENOMEM; | 4159 | return -ENOMEM; |
| 3950 | } | 4160 | } |
| 3951 | domain_update_iommu_cap(dmar_domain); | 4161 | domain_update_iommu_cap(dmar_domain); |
| @@ -3963,33 +4173,32 @@ static void intel_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 3963 | struct dmar_domain *dmar_domain = domain->priv; | 4173 | struct dmar_domain *dmar_domain = domain->priv; |
| 3964 | 4174 | ||
| 3965 | domain->priv = NULL; | 4175 | domain->priv = NULL; |
| 3966 | vm_domain_exit(dmar_domain); | 4176 | domain_exit(dmar_domain); |
| 3967 | } | 4177 | } |
| 3968 | 4178 | ||
| 3969 | static int intel_iommu_attach_device(struct iommu_domain *domain, | 4179 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
| 3970 | struct device *dev) | 4180 | struct device *dev) |
| 3971 | { | 4181 | { |
| 3972 | struct dmar_domain *dmar_domain = domain->priv; | 4182 | struct dmar_domain *dmar_domain = domain->priv; |
| 3973 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 3974 | struct intel_iommu *iommu; | 4183 | struct intel_iommu *iommu; |
| 3975 | int addr_width; | 4184 | int addr_width; |
| 4185 | u8 bus, devfn; | ||
| 3976 | 4186 | ||
| 3977 | /* normally pdev is not mapped */ | 4187 | /* normally dev is not mapped */ |
| 3978 | if (unlikely(domain_context_mapped(pdev))) { | 4188 | if (unlikely(domain_context_mapped(dev))) { |
| 3979 | struct dmar_domain *old_domain; | 4189 | struct dmar_domain *old_domain; |
| 3980 | 4190 | ||
| 3981 | old_domain = find_domain(pdev); | 4191 | old_domain = find_domain(dev); |
| 3982 | if (old_domain) { | 4192 | if (old_domain) { |
| 3983 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || | 4193 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 3984 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) | 4194 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
| 3985 | domain_remove_one_dev_info(old_domain, pdev); | 4195 | domain_remove_one_dev_info(old_domain, dev); |
| 3986 | else | 4196 | else |
| 3987 | domain_remove_dev_info(old_domain); | 4197 | domain_remove_dev_info(old_domain); |
| 3988 | } | 4198 | } |
| 3989 | } | 4199 | } |
| 3990 | 4200 | ||
| 3991 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 4201 | iommu = device_to_iommu(dev, &bus, &devfn); |
| 3992 | pdev->devfn); | ||
| 3993 | if (!iommu) | 4202 | if (!iommu) |
| 3994 | return -ENODEV; | 4203 | return -ENODEV; |
| 3995 | 4204 | ||
| @@ -4021,16 +4230,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 4021 | dmar_domain->agaw--; | 4230 | dmar_domain->agaw--; |
| 4022 | } | 4231 | } |
| 4023 | 4232 | ||
| 4024 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 4233 | return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL); |
| 4025 | } | 4234 | } |
| 4026 | 4235 | ||
| 4027 | static void intel_iommu_detach_device(struct iommu_domain *domain, | 4236 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
| 4028 | struct device *dev) | 4237 | struct device *dev) |
| 4029 | { | 4238 | { |
| 4030 | struct dmar_domain *dmar_domain = domain->priv; | 4239 | struct dmar_domain *dmar_domain = domain->priv; |
| 4031 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 4032 | 4240 | ||
| 4033 | domain_remove_one_dev_info(dmar_domain, pdev); | 4241 | domain_remove_one_dev_info(dmar_domain, dev); |
| 4034 | } | 4242 | } |
| 4035 | 4243 | ||
| 4036 | static int intel_iommu_map(struct iommu_domain *domain, | 4244 | static int intel_iommu_map(struct iommu_domain *domain, |
| @@ -4072,18 +4280,51 @@ static int intel_iommu_map(struct iommu_domain *domain, | |||
| 4072 | } | 4280 | } |
| 4073 | 4281 | ||
| 4074 | static size_t intel_iommu_unmap(struct iommu_domain *domain, | 4282 | static size_t intel_iommu_unmap(struct iommu_domain *domain, |
| 4075 | unsigned long iova, size_t size) | 4283 | unsigned long iova, size_t size) |
| 4076 | { | 4284 | { |
| 4077 | struct dmar_domain *dmar_domain = domain->priv; | 4285 | struct dmar_domain *dmar_domain = domain->priv; |
| 4078 | int order; | 4286 | struct page *freelist = NULL; |
| 4287 | struct intel_iommu *iommu; | ||
| 4288 | unsigned long start_pfn, last_pfn; | ||
| 4289 | unsigned int npages; | ||
| 4290 | int iommu_id, num, ndomains, level = 0; | ||
| 4291 | |||
| 4292 | /* Cope with horrid API which requires us to unmap more than the | ||
| 4293 | size argument if it happens to be a large-page mapping. */ | ||
| 4294 | if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)) | ||
| 4295 | BUG(); | ||
| 4296 | |||
| 4297 | if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) | ||
| 4298 | size = VTD_PAGE_SIZE << level_to_offset_bits(level); | ||
| 4299 | |||
| 4300 | start_pfn = iova >> VTD_PAGE_SHIFT; | ||
| 4301 | last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; | ||
| 4302 | |||
| 4303 | freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); | ||
| 4304 | |||
| 4305 | npages = last_pfn - start_pfn + 1; | ||
| 4306 | |||
| 4307 | for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) { | ||
| 4308 | iommu = g_iommus[iommu_id]; | ||
| 4309 | |||
| 4310 | /* | ||
| 4311 | * find bit position of dmar_domain | ||
| 4312 | */ | ||
| 4313 | ndomains = cap_ndoms(iommu->cap); | ||
| 4314 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | ||
| 4315 | if (iommu->domains[num] == dmar_domain) | ||
| 4316 | iommu_flush_iotlb_psi(iommu, num, start_pfn, | ||
| 4317 | npages, !freelist, 0); | ||
| 4318 | } | ||
| 4319 | |||
| 4320 | } | ||
| 4079 | 4321 | ||
| 4080 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 4322 | dma_free_pagelist(freelist); |
| 4081 | (iova + size - 1) >> VTD_PAGE_SHIFT); | ||
| 4082 | 4323 | ||
| 4083 | if (dmar_domain->max_addr == iova + size) | 4324 | if (dmar_domain->max_addr == iova + size) |
| 4084 | dmar_domain->max_addr = iova; | 4325 | dmar_domain->max_addr = iova; |
| 4085 | 4326 | ||
| 4086 | return PAGE_SIZE << order; | 4327 | return size; |
| 4087 | } | 4328 | } |
| 4088 | 4329 | ||
| 4089 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 4330 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
| @@ -4091,9 +4332,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | |||
| 4091 | { | 4332 | { |
| 4092 | struct dmar_domain *dmar_domain = domain->priv; | 4333 | struct dmar_domain *dmar_domain = domain->priv; |
| 4093 | struct dma_pte *pte; | 4334 | struct dma_pte *pte; |
| 4335 | int level = 0; | ||
| 4094 | u64 phys = 0; | 4336 | u64 phys = 0; |
| 4095 | 4337 | ||
| 4096 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); | 4338 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); |
| 4097 | if (pte) | 4339 | if (pte) |
| 4098 | phys = dma_pte_addr(pte); | 4340 | phys = dma_pte_addr(pte); |
| 4099 | 4341 | ||
| @@ -4121,9 +4363,9 @@ static int intel_iommu_add_device(struct device *dev) | |||
| 4121 | struct pci_dev *bridge, *dma_pdev = NULL; | 4363 | struct pci_dev *bridge, *dma_pdev = NULL; |
| 4122 | struct iommu_group *group; | 4364 | struct iommu_group *group; |
| 4123 | int ret; | 4365 | int ret; |
| 4366 | u8 bus, devfn; | ||
| 4124 | 4367 | ||
| 4125 | if (!device_to_iommu(pci_domain_nr(pdev->bus), | 4368 | if (!device_to_iommu(dev, &bus, &devfn)) |
| 4126 | pdev->bus->number, pdev->devfn)) | ||
| 4127 | return -ENODEV; | 4369 | return -ENODEV; |
| 4128 | 4370 | ||
| 4129 | bridge = pci_find_upstream_pcie_bridge(pdev); | 4371 | bridge = pci_find_upstream_pcie_bridge(pdev); |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ef5f65dbafe9..9b174893f0f5 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
| @@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
| 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
| 39 | static int ir_ioapic_num, ir_hpet_num; | 39 | static int ir_ioapic_num, ir_hpet_num; |
| 40 | 40 | ||
| 41 | /* | ||
| 42 | * Lock ordering: | ||
| 43 | * ->dmar_global_lock | ||
| 44 | * ->irq_2_ir_lock | ||
| 45 | * ->qi->q_lock | ||
| 46 | * ->iommu->register_lock | ||
| 47 | * Note: | ||
| 48 | * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called | ||
| 49 | * in single-threaded environment with interrupt disabled, so no need to tabke | ||
| 50 | * the dmar_global_lock. | ||
| 51 | */ | ||
| 41 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); | 52 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
| 42 | 53 | ||
| 43 | static int __init parse_ioapics_under_ir(void); | 54 | static int __init parse_ioapics_under_ir(void); |
| @@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic) | |||
| 307 | if (!irte) | 318 | if (!irte) |
| 308 | return -1; | 319 | return -1; |
| 309 | 320 | ||
| 321 | down_read(&dmar_global_lock); | ||
| 310 | for (i = 0; i < MAX_IO_APICS; i++) { | 322 | for (i = 0; i < MAX_IO_APICS; i++) { |
| 311 | if (ir_ioapic[i].id == apic) { | 323 | if (ir_ioapic[i].id == apic) { |
| 312 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | 324 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
| 313 | break; | 325 | break; |
| 314 | } | 326 | } |
| 315 | } | 327 | } |
| 328 | up_read(&dmar_global_lock); | ||
| 316 | 329 | ||
| 317 | if (sid == 0) { | 330 | if (sid == 0) { |
| 318 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | 331 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); |
| @@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id) | |||
| 332 | if (!irte) | 345 | if (!irte) |
| 333 | return -1; | 346 | return -1; |
| 334 | 347 | ||
| 348 | down_read(&dmar_global_lock); | ||
| 335 | for (i = 0; i < MAX_HPET_TBS; i++) { | 349 | for (i = 0; i < MAX_HPET_TBS; i++) { |
| 336 | if (ir_hpet[i].id == id) { | 350 | if (ir_hpet[i].id == id) { |
| 337 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | 351 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
| 338 | break; | 352 | break; |
| 339 | } | 353 | } |
| 340 | } | 354 | } |
| 355 | up_read(&dmar_global_lock); | ||
| 341 | 356 | ||
| 342 | if (sid == 0) { | 357 | if (sid == 0) { |
| 343 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | 358 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); |
| @@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void) | |||
| 794 | 809 | ||
| 795 | static int __init ir_dev_scope_init(void) | 810 | static int __init ir_dev_scope_init(void) |
| 796 | { | 811 | { |
| 812 | int ret; | ||
| 813 | |||
| 797 | if (!irq_remapping_enabled) | 814 | if (!irq_remapping_enabled) |
| 798 | return 0; | 815 | return 0; |
| 799 | 816 | ||
| 800 | return dmar_dev_scope_init(); | 817 | down_write(&dmar_global_lock); |
| 818 | ret = dmar_dev_scope_init(); | ||
| 819 | up_write(&dmar_global_lock); | ||
| 820 | |||
| 821 | return ret; | ||
| 801 | } | 822 | } |
| 802 | rootfs_initcall(ir_dev_scope_init); | 823 | rootfs_initcall(ir_dev_scope_init); |
| 803 | 824 | ||
| @@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq, | |||
| 878 | struct io_apic_irq_attr *attr) | 899 | struct io_apic_irq_attr *attr) |
| 879 | { | 900 | { |
| 880 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | 901 | int ioapic_id = mpc_ioapic_id(attr->ioapic); |
| 881 | struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); | 902 | struct intel_iommu *iommu; |
| 882 | struct IR_IO_APIC_route_entry *entry; | 903 | struct IR_IO_APIC_route_entry *entry; |
| 883 | struct irte irte; | 904 | struct irte irte; |
| 884 | int index; | 905 | int index; |
| 885 | 906 | ||
| 907 | down_read(&dmar_global_lock); | ||
| 908 | iommu = map_ioapic_to_ir(ioapic_id); | ||
| 886 | if (!iommu) { | 909 | if (!iommu) { |
| 887 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | 910 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); |
| 888 | return -ENODEV; | 911 | index = -ENODEV; |
| 889 | } | 912 | } else { |
| 890 | 913 | index = alloc_irte(iommu, irq, 1); | |
| 891 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | 914 | if (index < 0) { |
| 892 | 915 | pr_warn("Failed to allocate IRTE for ioapic %d\n", | |
| 893 | index = alloc_irte(iommu, irq, 1); | 916 | ioapic_id); |
| 894 | if (index < 0) { | 917 | index = -ENOMEM; |
| 895 | pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); | 918 | } |
| 896 | return -ENOMEM; | ||
| 897 | } | 919 | } |
| 920 | up_read(&dmar_global_lock); | ||
| 921 | if (index < 0) | ||
| 922 | return index; | ||
| 898 | 923 | ||
| 899 | prepare_irte(&irte, vector, destination); | 924 | prepare_irte(&irte, vector, destination); |
| 900 | 925 | ||
| @@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq, | |||
| 913 | irte.avail, irte.vector, irte.dest_id, | 938 | irte.avail, irte.vector, irte.dest_id, |
| 914 | irte.sid, irte.sq, irte.svt); | 939 | irte.sid, irte.sq, irte.svt); |
| 915 | 940 | ||
| 941 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | ||
| 916 | memset(entry, 0, sizeof(*entry)); | 942 | memset(entry, 0, sizeof(*entry)); |
| 917 | 943 | ||
| 918 | entry->index2 = (index >> 15) & 0x1; | 944 | entry->index2 = (index >> 15) & 0x1; |
| @@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | |||
| 1043 | struct intel_iommu *iommu; | 1069 | struct intel_iommu *iommu; |
| 1044 | int index; | 1070 | int index; |
| 1045 | 1071 | ||
| 1072 | down_read(&dmar_global_lock); | ||
| 1046 | iommu = map_dev_to_ir(dev); | 1073 | iommu = map_dev_to_ir(dev); |
| 1047 | if (!iommu) { | 1074 | if (!iommu) { |
| 1048 | printk(KERN_ERR | 1075 | printk(KERN_ERR |
| 1049 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | 1076 | "Unable to map PCI %s to iommu\n", pci_name(dev)); |
| 1050 | return -ENOENT; | 1077 | index = -ENOENT; |
| 1078 | } else { | ||
| 1079 | index = alloc_irte(iommu, irq, nvec); | ||
| 1080 | if (index < 0) { | ||
| 1081 | printk(KERN_ERR | ||
| 1082 | "Unable to allocate %d IRTE for PCI %s\n", | ||
| 1083 | nvec, pci_name(dev)); | ||
| 1084 | index = -ENOSPC; | ||
| 1085 | } | ||
| 1051 | } | 1086 | } |
| 1087 | up_read(&dmar_global_lock); | ||
| 1052 | 1088 | ||
| 1053 | index = alloc_irte(iommu, irq, nvec); | ||
| 1054 | if (index < 0) { | ||
| 1055 | printk(KERN_ERR | ||
| 1056 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | ||
| 1057 | pci_name(dev)); | ||
| 1058 | return -ENOSPC; | ||
| 1059 | } | ||
| 1060 | return index; | 1089 | return index; |
| 1061 | } | 1090 | } |
| 1062 | 1091 | ||
| @@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
| 1064 | int index, int sub_handle) | 1093 | int index, int sub_handle) |
| 1065 | { | 1094 | { |
| 1066 | struct intel_iommu *iommu; | 1095 | struct intel_iommu *iommu; |
| 1096 | int ret = -ENOENT; | ||
| 1067 | 1097 | ||
| 1098 | down_read(&dmar_global_lock); | ||
| 1068 | iommu = map_dev_to_ir(pdev); | 1099 | iommu = map_dev_to_ir(pdev); |
| 1069 | if (!iommu) | 1100 | if (iommu) { |
| 1070 | return -ENOENT; | 1101 | /* |
| 1071 | /* | 1102 | * setup the mapping between the irq and the IRTE |
| 1072 | * setup the mapping between the irq and the IRTE | 1103 | * base index, the sub_handle pointing to the |
| 1073 | * base index, the sub_handle pointing to the | 1104 | * appropriate interrupt remap table entry. |
| 1074 | * appropriate interrupt remap table entry. | 1105 | */ |
| 1075 | */ | 1106 | set_irte_irq(irq, iommu, index, sub_handle); |
| 1076 | set_irte_irq(irq, iommu, index, sub_handle); | 1107 | ret = 0; |
| 1108 | } | ||
| 1109 | up_read(&dmar_global_lock); | ||
| 1077 | 1110 | ||
| 1078 | return 0; | 1111 | return ret; |
| 1079 | } | 1112 | } |
| 1080 | 1113 | ||
| 1081 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) | 1114 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) |
| 1082 | { | 1115 | { |
| 1083 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 1116 | int ret = -1; |
| 1117 | struct intel_iommu *iommu; | ||
| 1084 | int index; | 1118 | int index; |
| 1085 | 1119 | ||
| 1086 | if (!iommu) | 1120 | down_read(&dmar_global_lock); |
| 1087 | return -1; | 1121 | iommu = map_hpet_to_ir(id); |
| 1088 | 1122 | if (iommu) { | |
| 1089 | index = alloc_irte(iommu, irq, 1); | 1123 | index = alloc_irte(iommu, irq, 1); |
| 1090 | if (index < 0) | 1124 | if (index >= 0) |
| 1091 | return -1; | 1125 | ret = 0; |
| 1126 | } | ||
| 1127 | up_read(&dmar_global_lock); | ||
| 1092 | 1128 | ||
| 1093 | return 0; | 1129 | return ret; |
| 1094 | } | 1130 | } |
| 1095 | 1131 | ||
| 1096 | struct irq_remap_ops intel_irq_remap_ops = { | 1132 | struct irq_remap_ops intel_irq_remap_ops = { |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 67da6cff74e8..f6b17e6af2fb 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
| @@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node, | |||
| 342 | return 0; | 342 | return 0; |
| 343 | } | 343 | } |
| 344 | 344 | ||
| 345 | static inline struct iova * | ||
| 346 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | ||
| 347 | { | ||
| 348 | struct iova *iova; | ||
| 349 | |||
| 350 | iova = alloc_iova_mem(); | ||
| 351 | if (iova) { | ||
| 352 | iova->pfn_lo = pfn_lo; | ||
| 353 | iova->pfn_hi = pfn_hi; | ||
| 354 | } | ||
| 355 | |||
| 356 | return iova; | ||
| 357 | } | ||
| 358 | |||
| 345 | static struct iova * | 359 | static struct iova * |
| 346 | __insert_new_range(struct iova_domain *iovad, | 360 | __insert_new_range(struct iova_domain *iovad, |
| 347 | unsigned long pfn_lo, unsigned long pfn_hi) | 361 | unsigned long pfn_lo, unsigned long pfn_hi) |
| 348 | { | 362 | { |
| 349 | struct iova *iova; | 363 | struct iova *iova; |
| 350 | 364 | ||
| 351 | iova = alloc_iova_mem(); | 365 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
| 352 | if (!iova) | 366 | if (iova) |
| 353 | return iova; | 367 | iova_insert_rbtree(&iovad->rbroot, iova); |
| 354 | 368 | ||
| 355 | iova->pfn_hi = pfn_hi; | ||
| 356 | iova->pfn_lo = pfn_lo; | ||
| 357 | iova_insert_rbtree(&iovad->rbroot, iova); | ||
| 358 | return iova; | 369 | return iova; |
| 359 | } | 370 | } |
| 360 | 371 | ||
| @@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
| 433 | } | 444 | } |
| 434 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); | 445 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
| 435 | } | 446 | } |
| 447 | |||
| 448 | struct iova * | ||
| 449 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | ||
| 450 | unsigned long pfn_lo, unsigned long pfn_hi) | ||
| 451 | { | ||
| 452 | unsigned long flags; | ||
| 453 | struct iova *prev = NULL, *next = NULL; | ||
| 454 | |||
| 455 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | ||
| 456 | if (iova->pfn_lo < pfn_lo) { | ||
| 457 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | ||
| 458 | if (prev == NULL) | ||
| 459 | goto error; | ||
| 460 | } | ||
| 461 | if (iova->pfn_hi > pfn_hi) { | ||
| 462 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | ||
| 463 | if (next == NULL) | ||
| 464 | goto error; | ||
| 465 | } | ||
| 466 | |||
| 467 | __cached_rbnode_delete_update(iovad, iova); | ||
| 468 | rb_erase(&iova->node, &iovad->rbroot); | ||
| 469 | |||
| 470 | if (prev) { | ||
| 471 | iova_insert_rbtree(&iovad->rbroot, prev); | ||
| 472 | iova->pfn_lo = pfn_lo; | ||
| 473 | } | ||
| 474 | if (next) { | ||
| 475 | iova_insert_rbtree(&iovad->rbroot, next); | ||
| 476 | iova->pfn_hi = pfn_hi; | ||
| 477 | } | ||
| 478 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 479 | |||
| 480 | return iova; | ||
| 481 | |||
| 482 | error: | ||
| 483 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 484 | if (prev) | ||
| 485 | free_iova_mem(prev); | ||
| 486 | return NULL; | ||
| 487 | } | ||
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index bcd78a720630..7fcbfc498fa9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
| @@ -23,6 +23,9 @@ | |||
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
| 25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/of.h> | ||
| 27 | #include <linux/of_iommu.h> | ||
| 28 | #include <linux/of_irq.h> | ||
| 26 | 29 | ||
| 27 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
| 28 | 31 | ||
| @@ -146,13 +149,10 @@ static int iommu_enable(struct omap_iommu *obj) | |||
| 146 | struct platform_device *pdev = to_platform_device(obj->dev); | 149 | struct platform_device *pdev = to_platform_device(obj->dev); |
| 147 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 150 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
| 148 | 151 | ||
| 149 | if (!pdata) | ||
| 150 | return -EINVAL; | ||
| 151 | |||
| 152 | if (!arch_iommu) | 152 | if (!arch_iommu) |
| 153 | return -ENODEV; | 153 | return -ENODEV; |
| 154 | 154 | ||
| 155 | if (pdata->deassert_reset) { | 155 | if (pdata && pdata->deassert_reset) { |
| 156 | err = pdata->deassert_reset(pdev, pdata->reset_name); | 156 | err = pdata->deassert_reset(pdev, pdata->reset_name); |
| 157 | if (err) { | 157 | if (err) { |
| 158 | dev_err(obj->dev, "deassert_reset failed: %d\n", err); | 158 | dev_err(obj->dev, "deassert_reset failed: %d\n", err); |
| @@ -172,14 +172,11 @@ static void iommu_disable(struct omap_iommu *obj) | |||
| 172 | struct platform_device *pdev = to_platform_device(obj->dev); | 172 | struct platform_device *pdev = to_platform_device(obj->dev); |
| 173 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 173 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
| 174 | 174 | ||
| 175 | if (!pdata) | ||
| 176 | return; | ||
| 177 | |||
| 178 | arch_iommu->disable(obj); | 175 | arch_iommu->disable(obj); |
| 179 | 176 | ||
| 180 | pm_runtime_put_sync(obj->dev); | 177 | pm_runtime_put_sync(obj->dev); |
| 181 | 178 | ||
| 182 | if (pdata->assert_reset) | 179 | if (pdata && pdata->assert_reset) |
| 183 | pdata->assert_reset(pdev, pdata->reset_name); | 180 | pdata->assert_reset(pdev, pdata->reset_name); |
| 184 | } | 181 | } |
| 185 | 182 | ||
| @@ -523,7 +520,8 @@ static void flush_iopte_range(u32 *first, u32 *last) | |||
| 523 | static void iopte_free(u32 *iopte) | 520 | static void iopte_free(u32 *iopte) |
| 524 | { | 521 | { |
| 525 | /* Note: freed iopte's must be clean ready for re-use */ | 522 | /* Note: freed iopte's must be clean ready for re-use */ |
| 526 | kmem_cache_free(iopte_cachep, iopte); | 523 | if (iopte) |
| 524 | kmem_cache_free(iopte_cachep, iopte); | ||
| 527 | } | 525 | } |
| 528 | 526 | ||
| 529 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) | 527 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) |
| @@ -863,7 +861,7 @@ static int device_match_by_alias(struct device *dev, void *data) | |||
| 863 | **/ | 861 | **/ |
| 864 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | 862 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) |
| 865 | { | 863 | { |
| 866 | int err = -ENOMEM; | 864 | int err; |
| 867 | struct device *dev; | 865 | struct device *dev; |
| 868 | struct omap_iommu *obj; | 866 | struct omap_iommu *obj; |
| 869 | 867 | ||
| @@ -871,7 +869,7 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
| 871 | (void *)name, | 869 | (void *)name, |
| 872 | device_match_by_alias); | 870 | device_match_by_alias); |
| 873 | if (!dev) | 871 | if (!dev) |
| 874 | return NULL; | 872 | return ERR_PTR(-ENODEV); |
| 875 | 873 | ||
| 876 | obj = to_iommu(dev); | 874 | obj = to_iommu(dev); |
| 877 | 875 | ||
| @@ -890,8 +888,10 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
| 890 | goto err_enable; | 888 | goto err_enable; |
| 891 | flush_iotlb_all(obj); | 889 | flush_iotlb_all(obj); |
| 892 | 890 | ||
| 893 | if (!try_module_get(obj->owner)) | 891 | if (!try_module_get(obj->owner)) { |
| 892 | err = -ENODEV; | ||
| 894 | goto err_module; | 893 | goto err_module; |
| 894 | } | ||
| 895 | 895 | ||
| 896 | spin_unlock(&obj->iommu_lock); | 896 | spin_unlock(&obj->iommu_lock); |
| 897 | 897 | ||
| @@ -940,17 +940,41 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
| 940 | struct omap_iommu *obj; | 940 | struct omap_iommu *obj; |
| 941 | struct resource *res; | 941 | struct resource *res; |
| 942 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 942 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
| 943 | struct device_node *of = pdev->dev.of_node; | ||
| 943 | 944 | ||
| 944 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | 945 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
| 945 | if (!obj) | 946 | if (!obj) |
| 946 | return -ENOMEM; | 947 | return -ENOMEM; |
| 947 | 948 | ||
| 948 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | 949 | if (of) { |
| 949 | obj->name = pdata->name; | 950 | obj->name = dev_name(&pdev->dev); |
| 951 | obj->nr_tlb_entries = 32; | ||
| 952 | err = of_property_read_u32(of, "ti,#tlb-entries", | ||
| 953 | &obj->nr_tlb_entries); | ||
| 954 | if (err && err != -EINVAL) | ||
| 955 | return err; | ||
| 956 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) | ||
| 957 | return -EINVAL; | ||
| 958 | /* | ||
| 959 | * da_start and da_end are needed for omap-iovmm, so hardcode | ||
| 960 | * these values as used by OMAP3 ISP - the only user for | ||
| 961 | * omap-iovmm | ||
| 962 | */ | ||
| 963 | obj->da_start = 0; | ||
| 964 | obj->da_end = 0xfffff000; | ||
| 965 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) | ||
| 966 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; | ||
| 967 | } else { | ||
| 968 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | ||
| 969 | obj->name = pdata->name; | ||
| 970 | obj->da_start = pdata->da_start; | ||
| 971 | obj->da_end = pdata->da_end; | ||
| 972 | } | ||
| 973 | if (obj->da_end <= obj->da_start) | ||
| 974 | return -EINVAL; | ||
| 975 | |||
| 950 | obj->dev = &pdev->dev; | 976 | obj->dev = &pdev->dev; |
| 951 | obj->ctx = (void *)obj + sizeof(*obj); | 977 | obj->ctx = (void *)obj + sizeof(*obj); |
| 952 | obj->da_start = pdata->da_start; | ||
| 953 | obj->da_end = pdata->da_end; | ||
| 954 | 978 | ||
| 955 | spin_lock_init(&obj->iommu_lock); | 979 | spin_lock_init(&obj->iommu_lock); |
| 956 | mutex_init(&obj->mmap_lock); | 980 | mutex_init(&obj->mmap_lock); |
| @@ -958,33 +982,18 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
| 958 | INIT_LIST_HEAD(&obj->mmap); | 982 | INIT_LIST_HEAD(&obj->mmap); |
| 959 | 983 | ||
| 960 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 984 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 961 | if (!res) { | 985 | obj->regbase = devm_ioremap_resource(obj->dev, res); |
| 962 | err = -ENODEV; | 986 | if (IS_ERR(obj->regbase)) |
| 963 | goto err_mem; | 987 | return PTR_ERR(obj->regbase); |
| 964 | } | ||
| 965 | |||
| 966 | res = request_mem_region(res->start, resource_size(res), | ||
| 967 | dev_name(&pdev->dev)); | ||
| 968 | if (!res) { | ||
| 969 | err = -EIO; | ||
| 970 | goto err_mem; | ||
| 971 | } | ||
| 972 | |||
| 973 | obj->regbase = ioremap(res->start, resource_size(res)); | ||
| 974 | if (!obj->regbase) { | ||
| 975 | err = -ENOMEM; | ||
| 976 | goto err_ioremap; | ||
| 977 | } | ||
| 978 | 988 | ||
| 979 | irq = platform_get_irq(pdev, 0); | 989 | irq = platform_get_irq(pdev, 0); |
| 980 | if (irq < 0) { | 990 | if (irq < 0) |
| 981 | err = -ENODEV; | 991 | return -ENODEV; |
| 982 | goto err_irq; | 992 | |
| 983 | } | 993 | err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, |
| 984 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | 994 | dev_name(obj->dev), obj); |
| 985 | dev_name(&pdev->dev), obj); | ||
| 986 | if (err < 0) | 995 | if (err < 0) |
| 987 | goto err_irq; | 996 | return err; |
| 988 | platform_set_drvdata(pdev, obj); | 997 | platform_set_drvdata(pdev, obj); |
| 989 | 998 | ||
| 990 | pm_runtime_irq_safe(obj->dev); | 999 | pm_runtime_irq_safe(obj->dev); |
| @@ -992,42 +1001,34 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
| 992 | 1001 | ||
| 993 | dev_info(&pdev->dev, "%s registered\n", obj->name); | 1002 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
| 994 | return 0; | 1003 | return 0; |
| 995 | |||
| 996 | err_irq: | ||
| 997 | iounmap(obj->regbase); | ||
| 998 | err_ioremap: | ||
| 999 | release_mem_region(res->start, resource_size(res)); | ||
| 1000 | err_mem: | ||
| 1001 | kfree(obj); | ||
| 1002 | return err; | ||
| 1003 | } | 1004 | } |
| 1004 | 1005 | ||
| 1005 | static int omap_iommu_remove(struct platform_device *pdev) | 1006 | static int omap_iommu_remove(struct platform_device *pdev) |
| 1006 | { | 1007 | { |
| 1007 | int irq; | ||
| 1008 | struct resource *res; | ||
| 1009 | struct omap_iommu *obj = platform_get_drvdata(pdev); | 1008 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
| 1010 | 1009 | ||
| 1011 | iopgtable_clear_entry_all(obj); | 1010 | iopgtable_clear_entry_all(obj); |
| 1012 | 1011 | ||
| 1013 | irq = platform_get_irq(pdev, 0); | ||
| 1014 | free_irq(irq, obj); | ||
| 1015 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1016 | release_mem_region(res->start, resource_size(res)); | ||
| 1017 | iounmap(obj->regbase); | ||
| 1018 | |||
| 1019 | pm_runtime_disable(obj->dev); | 1012 | pm_runtime_disable(obj->dev); |
| 1020 | 1013 | ||
| 1021 | dev_info(&pdev->dev, "%s removed\n", obj->name); | 1014 | dev_info(&pdev->dev, "%s removed\n", obj->name); |
| 1022 | kfree(obj); | ||
| 1023 | return 0; | 1015 | return 0; |
| 1024 | } | 1016 | } |
| 1025 | 1017 | ||
| 1018 | static struct of_device_id omap_iommu_of_match[] = { | ||
| 1019 | { .compatible = "ti,omap2-iommu" }, | ||
| 1020 | { .compatible = "ti,omap4-iommu" }, | ||
| 1021 | { .compatible = "ti,dra7-iommu" }, | ||
| 1022 | {}, | ||
| 1023 | }; | ||
| 1024 | MODULE_DEVICE_TABLE(of, omap_iommu_of_match); | ||
| 1025 | |||
| 1026 | static struct platform_driver omap_iommu_driver = { | 1026 | static struct platform_driver omap_iommu_driver = { |
| 1027 | .probe = omap_iommu_probe, | 1027 | .probe = omap_iommu_probe, |
| 1028 | .remove = omap_iommu_remove, | 1028 | .remove = omap_iommu_remove, |
| 1029 | .driver = { | 1029 | .driver = { |
| 1030 | .name = "omap-iommu", | 1030 | .name = "omap-iommu", |
| 1031 | .of_match_table = of_match_ptr(omap_iommu_of_match), | ||
| 1031 | }, | 1032 | }, |
| 1032 | }; | 1033 | }; |
| 1033 | 1034 | ||
| @@ -1253,6 +1254,49 @@ static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | |||
| 1253 | return 0; | 1254 | return 0; |
| 1254 | } | 1255 | } |
| 1255 | 1256 | ||
| 1257 | static int omap_iommu_add_device(struct device *dev) | ||
| 1258 | { | ||
| 1259 | struct omap_iommu_arch_data *arch_data; | ||
| 1260 | struct device_node *np; | ||
| 1261 | |||
| 1262 | /* | ||
| 1263 | * Allocate the archdata iommu structure for DT-based devices. | ||
| 1264 | * | ||
| 1265 | * TODO: Simplify this when removing non-DT support completely from the | ||
| 1266 | * IOMMU users. | ||
| 1267 | */ | ||
| 1268 | if (!dev->of_node) | ||
| 1269 | return 0; | ||
| 1270 | |||
| 1271 | np = of_parse_phandle(dev->of_node, "iommus", 0); | ||
| 1272 | if (!np) | ||
| 1273 | return 0; | ||
| 1274 | |||
| 1275 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); | ||
| 1276 | if (!arch_data) { | ||
| 1277 | of_node_put(np); | ||
| 1278 | return -ENOMEM; | ||
| 1279 | } | ||
| 1280 | |||
| 1281 | arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL); | ||
| 1282 | dev->archdata.iommu = arch_data; | ||
| 1283 | |||
| 1284 | of_node_put(np); | ||
| 1285 | |||
| 1286 | return 0; | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | static void omap_iommu_remove_device(struct device *dev) | ||
| 1290 | { | ||
| 1291 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | ||
| 1292 | |||
| 1293 | if (!dev->of_node || !arch_data) | ||
| 1294 | return; | ||
| 1295 | |||
| 1296 | kfree(arch_data->name); | ||
| 1297 | kfree(arch_data); | ||
| 1298 | } | ||
| 1299 | |||
| 1256 | static struct iommu_ops omap_iommu_ops = { | 1300 | static struct iommu_ops omap_iommu_ops = { |
| 1257 | .domain_init = omap_iommu_domain_init, | 1301 | .domain_init = omap_iommu_domain_init, |
| 1258 | .domain_destroy = omap_iommu_domain_destroy, | 1302 | .domain_destroy = omap_iommu_domain_destroy, |
| @@ -1262,6 +1306,8 @@ static struct iommu_ops omap_iommu_ops = { | |||
| 1262 | .unmap = omap_iommu_unmap, | 1306 | .unmap = omap_iommu_unmap, |
| 1263 | .iova_to_phys = omap_iommu_iova_to_phys, | 1307 | .iova_to_phys = omap_iommu_iova_to_phys, |
| 1264 | .domain_has_cap = omap_iommu_domain_has_cap, | 1308 | .domain_has_cap = omap_iommu_domain_has_cap, |
| 1309 | .add_device = omap_iommu_add_device, | ||
| 1310 | .remove_device = omap_iommu_remove_device, | ||
| 1265 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, | 1311 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
| 1266 | }; | 1312 | }; |
| 1267 | 1313 | ||
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 120084206602..ea920c3e94ff 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h | |||
| @@ -52,6 +52,8 @@ struct omap_iommu { | |||
| 52 | void *ctx; /* iommu context: registres saved area */ | 52 | void *ctx; /* iommu context: registres saved area */ |
| 53 | u32 da_start; | 53 | u32 da_start; |
| 54 | u32 da_end; | 54 | u32 da_end; |
| 55 | |||
| 56 | int has_bus_err_back; | ||
| 55 | }; | 57 | }; |
| 56 | 58 | ||
| 57 | struct cr_regs { | 59 | struct cr_regs { |
| @@ -130,6 +132,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
| 130 | #define MMU_READ_CAM 0x68 | 132 | #define MMU_READ_CAM 0x68 |
| 131 | #define MMU_READ_RAM 0x6c | 133 | #define MMU_READ_RAM 0x6c |
| 132 | #define MMU_EMU_FAULT_AD 0x70 | 134 | #define MMU_EMU_FAULT_AD 0x70 |
| 135 | #define MMU_GP_REG 0x88 | ||
| 133 | 136 | ||
| 134 | #define MMU_REG_SIZE 256 | 137 | #define MMU_REG_SIZE 256 |
| 135 | 138 | ||
| @@ -163,6 +166,8 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
| 163 | #define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) | 166 | #define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) |
| 164 | #define MMU_RAM_MIXED MMU_RAM_MIXED_MASK | 167 | #define MMU_RAM_MIXED MMU_RAM_MIXED_MASK |
| 165 | 168 | ||
| 169 | #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 | ||
| 170 | |||
| 166 | /* | 171 | /* |
| 167 | * utilities for super page(16MB, 1MB, 64KB and 4KB) | 172 | * utilities for super page(16MB, 1MB, 64KB and 4KB) |
| 168 | */ | 173 | */ |
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index d745094a69dd..5e1ea3b0bf16 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c | |||
| @@ -98,6 +98,9 @@ static int omap2_iommu_enable(struct omap_iommu *obj) | |||
| 98 | 98 | ||
| 99 | iommu_write_reg(obj, pa, MMU_TTB); | 99 | iommu_write_reg(obj, pa, MMU_TTB); |
| 100 | 100 | ||
| 101 | if (obj->has_bus_err_back) | ||
| 102 | iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); | ||
| 103 | |||
| 101 | __iommu_set_twl(obj, true); | 104 | __iommu_set_twl(obj, true); |
| 102 | 105 | ||
| 103 | return 0; | 106 | return 0; |
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index f3372441e3a5..c8adad9c6b6a 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h | |||
| @@ -424,7 +424,8 @@ enum acpi_dmar_type { | |||
| 424 | ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, | 424 | ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, |
| 425 | ACPI_DMAR_TYPE_ATSR = 2, | 425 | ACPI_DMAR_TYPE_ATSR = 2, |
| 426 | ACPI_DMAR_HARDWARE_AFFINITY = 3, | 426 | ACPI_DMAR_HARDWARE_AFFINITY = 3, |
| 427 | ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ | 427 | ACPI_DMAR_TYPE_ANDD = 4, |
| 428 | ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ | ||
| 428 | }; | 429 | }; |
| 429 | 430 | ||
| 430 | /* DMAR Device Scope structure */ | 431 | /* DMAR Device Scope structure */ |
| @@ -445,7 +446,8 @@ enum acpi_dmar_scope_type { | |||
| 445 | ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, | 446 | ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, |
| 446 | ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, | 447 | ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, |
| 447 | ACPI_DMAR_SCOPE_TYPE_HPET = 4, | 448 | ACPI_DMAR_SCOPE_TYPE_HPET = 4, |
| 448 | ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ | 449 | ACPI_DMAR_SCOPE_TYPE_ACPI = 5, |
| 450 | ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ | ||
| 449 | }; | 451 | }; |
| 450 | 452 | ||
| 451 | struct acpi_dmar_pci_path { | 453 | struct acpi_dmar_pci_path { |
| @@ -507,6 +509,15 @@ struct acpi_dmar_rhsa { | |||
| 507 | u32 proximity_domain; | 509 | u32 proximity_domain; |
| 508 | }; | 510 | }; |
| 509 | 511 | ||
| 512 | /* 4: ACPI Namespace Device Declaration Structure */ | ||
| 513 | |||
| 514 | struct acpi_dmar_andd { | ||
| 515 | struct acpi_dmar_header header; | ||
| 516 | u8 reserved[3]; | ||
| 517 | u8 device_number; | ||
| 518 | u8 object_name[]; | ||
| 519 | }; | ||
| 520 | |||
| 510 | /******************************************************************************* | 521 | /******************************************************************************* |
| 511 | * | 522 | * |
| 512 | * HPET - High Precision Event Timer table | 523 | * HPET - High Precision Event Timer table |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index eccb0c0c6cf6..23c8db129560 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
| 27 | #include <linux/irqreturn.h> | 27 | #include <linux/irqreturn.h> |
| 28 | #include <linux/rwsem.h> | ||
| 29 | #include <linux/rcupdate.h> | ||
| 28 | 30 | ||
| 29 | struct acpi_dmar_header; | 31 | struct acpi_dmar_header; |
| 30 | 32 | ||
| @@ -34,13 +36,19 @@ struct acpi_dmar_header; | |||
| 34 | 36 | ||
| 35 | struct intel_iommu; | 37 | struct intel_iommu; |
| 36 | 38 | ||
| 39 | struct dmar_dev_scope { | ||
| 40 | struct device __rcu *dev; | ||
| 41 | u8 bus; | ||
| 42 | u8 devfn; | ||
| 43 | }; | ||
| 44 | |||
| 37 | #ifdef CONFIG_DMAR_TABLE | 45 | #ifdef CONFIG_DMAR_TABLE |
| 38 | extern struct acpi_table_header *dmar_tbl; | 46 | extern struct acpi_table_header *dmar_tbl; |
| 39 | struct dmar_drhd_unit { | 47 | struct dmar_drhd_unit { |
| 40 | struct list_head list; /* list of drhd units */ | 48 | struct list_head list; /* list of drhd units */ |
| 41 | struct acpi_dmar_header *hdr; /* ACPI header */ | 49 | struct acpi_dmar_header *hdr; /* ACPI header */ |
| 42 | u64 reg_base_addr; /* register base address*/ | 50 | u64 reg_base_addr; /* register base address*/ |
| 43 | struct pci_dev **devices; /* target device array */ | 51 | struct dmar_dev_scope *devices;/* target device array */ |
| 44 | int devices_cnt; /* target device count */ | 52 | int devices_cnt; /* target device count */ |
| 45 | u16 segment; /* PCI domain */ | 53 | u16 segment; /* PCI domain */ |
| 46 | u8 ignored:1; /* ignore drhd */ | 54 | u8 ignored:1; /* ignore drhd */ |
| @@ -48,33 +56,66 @@ struct dmar_drhd_unit { | |||
| 48 | struct intel_iommu *iommu; | 56 | struct intel_iommu *iommu; |
| 49 | }; | 57 | }; |
| 50 | 58 | ||
| 59 | struct dmar_pci_notify_info { | ||
| 60 | struct pci_dev *dev; | ||
| 61 | unsigned long event; | ||
| 62 | int bus; | ||
| 63 | u16 seg; | ||
| 64 | u16 level; | ||
| 65 | struct acpi_dmar_pci_path path[]; | ||
| 66 | } __attribute__((packed)); | ||
| 67 | |||
| 68 | extern struct rw_semaphore dmar_global_lock; | ||
| 51 | extern struct list_head dmar_drhd_units; | 69 | extern struct list_head dmar_drhd_units; |
| 52 | 70 | ||
| 53 | #define for_each_drhd_unit(drhd) \ | 71 | #define for_each_drhd_unit(drhd) \ |
| 54 | list_for_each_entry(drhd, &dmar_drhd_units, list) | 72 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) |
| 55 | 73 | ||
| 56 | #define for_each_active_drhd_unit(drhd) \ | 74 | #define for_each_active_drhd_unit(drhd) \ |
| 57 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 75 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
| 58 | if (drhd->ignored) {} else | 76 | if (drhd->ignored) {} else |
| 59 | 77 | ||
| 60 | #define for_each_active_iommu(i, drhd) \ | 78 | #define for_each_active_iommu(i, drhd) \ |
| 61 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 79 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
| 62 | if (i=drhd->iommu, drhd->ignored) {} else | 80 | if (i=drhd->iommu, drhd->ignored) {} else |
| 63 | 81 | ||
| 64 | #define for_each_iommu(i, drhd) \ | 82 | #define for_each_iommu(i, drhd) \ |
| 65 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 83 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
| 66 | if (i=drhd->iommu, 0) {} else | 84 | if (i=drhd->iommu, 0) {} else |
| 67 | 85 | ||
| 86 | static inline bool dmar_rcu_check(void) | ||
| 87 | { | ||
| 88 | return rwsem_is_locked(&dmar_global_lock) || | ||
| 89 | system_state == SYSTEM_BOOTING; | ||
| 90 | } | ||
| 91 | |||
| 92 | #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) | ||
| 93 | |||
| 94 | #define for_each_dev_scope(a, c, p, d) \ | ||
| 95 | for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ | ||
| 96 | NULL, (p) < (c)); (p)++) | ||
| 97 | |||
| 98 | #define for_each_active_dev_scope(a, c, p, d) \ | ||
| 99 | for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else | ||
| 100 | |||
| 68 | extern int dmar_table_init(void); | 101 | extern int dmar_table_init(void); |
| 69 | extern int dmar_dev_scope_init(void); | 102 | extern int dmar_dev_scope_init(void); |
| 70 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, | 103 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, |
| 71 | struct pci_dev ***devices, u16 segment); | 104 | struct dmar_dev_scope **devices, u16 segment); |
| 72 | extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt); | 105 | extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); |
| 73 | 106 | extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); | |
| 107 | extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, | ||
| 108 | void *start, void*end, u16 segment, | ||
| 109 | struct dmar_dev_scope *devices, | ||
| 110 | int devices_cnt); | ||
| 111 | extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | ||
| 112 | u16 segment, struct dmar_dev_scope *devices, | ||
| 113 | int count); | ||
| 74 | /* Intel IOMMU detection */ | 114 | /* Intel IOMMU detection */ |
| 75 | extern int detect_intel_iommu(void); | 115 | extern int detect_intel_iommu(void); |
| 76 | extern int enable_drhd_fault_handling(void); | 116 | extern int enable_drhd_fault_handling(void); |
| 77 | #else | 117 | #else |
| 118 | struct dmar_pci_notify_info; | ||
| 78 | static inline int detect_intel_iommu(void) | 119 | static inline int detect_intel_iommu(void) |
| 79 | { | 120 | { |
| 80 | return -ENODEV; | 121 | return -ENODEV; |
| @@ -138,30 +179,9 @@ extern int arch_setup_dmar_msi(unsigned int irq); | |||
| 138 | 179 | ||
| 139 | #ifdef CONFIG_INTEL_IOMMU | 180 | #ifdef CONFIG_INTEL_IOMMU |
| 140 | extern int iommu_detected, no_iommu; | 181 | extern int iommu_detected, no_iommu; |
| 141 | extern struct list_head dmar_rmrr_units; | ||
| 142 | struct dmar_rmrr_unit { | ||
| 143 | struct list_head list; /* list of rmrr units */ | ||
| 144 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
| 145 | u64 base_address; /* reserved base address*/ | ||
| 146 | u64 end_address; /* reserved end address */ | ||
| 147 | struct pci_dev **devices; /* target devices */ | ||
| 148 | int devices_cnt; /* target device count */ | ||
| 149 | }; | ||
| 150 | |||
| 151 | #define for_each_rmrr_units(rmrr) \ | ||
| 152 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | ||
| 153 | |||
| 154 | struct dmar_atsr_unit { | ||
| 155 | struct list_head list; /* list of ATSR units */ | ||
| 156 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
| 157 | struct pci_dev **devices; /* target devices */ | ||
| 158 | int devices_cnt; /* target device count */ | ||
| 159 | u8 include_all:1; /* include all ports */ | ||
| 160 | }; | ||
| 161 | |||
| 162 | int dmar_parse_rmrr_atsr_dev(void); | ||
| 163 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | 182 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); |
| 164 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | 183 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); |
| 184 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | ||
| 165 | extern int intel_iommu_init(void); | 185 | extern int intel_iommu_init(void); |
| 166 | #else /* !CONFIG_INTEL_IOMMU: */ | 186 | #else /* !CONFIG_INTEL_IOMMU: */ |
| 167 | static inline int intel_iommu_init(void) { return -ENODEV; } | 187 | static inline int intel_iommu_init(void) { return -ENODEV; } |
| @@ -173,7 +193,7 @@ static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | |||
| 173 | { | 193 | { |
| 174 | return 0; | 194 | return 0; |
| 175 | } | 195 | } |
| 176 | static inline int dmar_parse_rmrr_atsr_dev(void) | 196 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
| 177 | { | 197 | { |
| 178 | return 0; | 198 | return 0; |
| 179 | } | 199 | } |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2c4bed593b32..0a2da5188217 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -319,6 +319,7 @@ struct intel_iommu { | |||
| 319 | int agaw; /* agaw of this iommu */ | 319 | int agaw; /* agaw of this iommu */ |
| 320 | int msagaw; /* max sagaw of this iommu */ | 320 | int msagaw; /* max sagaw of this iommu */ |
| 321 | unsigned int irq; | 321 | unsigned int irq; |
| 322 | u16 segment; /* PCI segment# */ | ||
| 322 | unsigned char name[13]; /* Device Name */ | 323 | unsigned char name[13]; /* Device Name */ |
| 323 | 324 | ||
| 324 | #ifdef CONFIG_INTEL_IOMMU | 325 | #ifdef CONFIG_INTEL_IOMMU |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 76a0759e88ec..3277f4711349 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
| @@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); | |||
| 47 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); | 47 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); |
| 48 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); | 48 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
| 49 | void put_iova_domain(struct iova_domain *iovad); | 49 | void put_iova_domain(struct iova_domain *iovad); |
| 50 | struct iova *split_and_remove_iova(struct iova_domain *iovad, | ||
| 51 | struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); | ||
| 50 | 52 | ||
| 51 | #endif | 53 | #endif |
