aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-04 09:06:20 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:08:50 -0400
commit3bd221724adb9d642270df0e78b0105fb61e4a1c (patch)
treed5c1cf8dc7e42f3da895b61403779f257511d39d /arch
parent41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (diff)
amd-iommu: introduce for_each_iommu* macros
This patch introduces the for_each_iommu and for_each_iommu_safe macros to simplify the developers life when having to iterate over all AMD IOMMUs in the system. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h8
-rw-r--r--arch/x86/kernel/amd_iommu.c8
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
3 files changed, 16 insertions, 8 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 95c8cd9d22b5..cf5ef172cfca 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -196,6 +196,14 @@
196 domain for an IOMMU */ 196 domain for an IOMMU */
197 197
198/* 198/*
199 * Make iterating over all IOMMUs easier
200 */
201#define for_each_iommu(iommu) \
202 list_for_each_entry((iommu), &amd_iommu_list, list)
203#define for_each_iommu_safe(iommu, next) \
204 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
205
206/*
199 * This structure contains generic data for IOMMU protection domains 207 * This structure contains generic data for IOMMU protection domains
200 * independent of their use. 208 * independent of their use.
201 */ 209 */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a97db99dad52..d9e9dc141a1e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -213,7 +213,7 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
213{ 213{
214 struct amd_iommu *iommu; 214 struct amd_iommu *iommu;
215 215
216 list_for_each_entry(iommu, &amd_iommu_list, list) 216 for_each_iommu(iommu)
217 iommu_poll_events(iommu); 217 iommu_poll_events(iommu);
218 218
219 return IRQ_HANDLED; 219 return IRQ_HANDLED;
@@ -440,7 +440,7 @@ static void iommu_flush_domain(u16 domid)
440 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 440 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
441 domid, 1, 1); 441 domid, 1, 1);
442 442
443 list_for_each_entry(iommu, &amd_iommu_list, list) { 443 for_each_iommu(iommu) {
444 spin_lock_irqsave(&iommu->lock, flags); 444 spin_lock_irqsave(&iommu->lock, flags);
445 __iommu_queue_command(iommu, &cmd); 445 __iommu_queue_command(iommu, &cmd);
446 __iommu_completion_wait(iommu); 446 __iommu_completion_wait(iommu);
@@ -1672,7 +1672,7 @@ int __init amd_iommu_init_dma_ops(void)
1672 * found in the system. Devices not assigned to any other 1672 * found in the system. Devices not assigned to any other
1673 * protection domain will be assigned to the default one. 1673 * protection domain will be assigned to the default one.
1674 */ 1674 */
1675 list_for_each_entry(iommu, &amd_iommu_list, list) { 1675 for_each_iommu(iommu) {
1676 iommu->default_dom = dma_ops_domain_alloc(iommu, order); 1676 iommu->default_dom = dma_ops_domain_alloc(iommu, order);
1677 if (iommu->default_dom == NULL) 1677 if (iommu->default_dom == NULL)
1678 return -ENOMEM; 1678 return -ENOMEM;
@@ -1710,7 +1710,7 @@ int __init amd_iommu_init_dma_ops(void)
1710 1710
1711free_domains: 1711free_domains:
1712 1712
1713 list_for_each_entry(iommu, &amd_iommu_list, list) { 1713 for_each_iommu(iommu) {
1714 if (iommu->default_dom) 1714 if (iommu->default_dom)
1715 dma_ops_domain_free(iommu->default_dom); 1715 dma_ops_domain_free(iommu->default_dom);
1716 } 1716 }
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 8c0be0902dac..675a4b642f70 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -679,7 +679,7 @@ static void __init free_iommu_all(void)
679{ 679{
680 struct amd_iommu *iommu, *next; 680 struct amd_iommu *iommu, *next;
681 681
682 list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { 682 for_each_iommu_safe(iommu, next) {
683 list_del(&iommu->list); 683 list_del(&iommu->list);
684 free_iommu_one(iommu); 684 free_iommu_one(iommu);
685 kfree(iommu); 685 kfree(iommu);
@@ -779,7 +779,7 @@ static int __init iommu_setup_msix(struct amd_iommu *iommu)
779 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */ 779 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
780 int nvec = 0, i; 780 int nvec = 0, i;
781 781
782 list_for_each_entry(curr, &amd_iommu_list, list) { 782 for_each_iommu(curr) {
783 if (curr->dev == iommu->dev) { 783 if (curr->dev == iommu->dev) {
784 entries[nvec].entry = curr->evt_msi_num; 784 entries[nvec].entry = curr->evt_msi_num;
785 entries[nvec].vector = 0; 785 entries[nvec].vector = 0;
@@ -818,7 +818,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
818 int r; 818 int r;
819 struct amd_iommu *curr; 819 struct amd_iommu *curr;
820 820
821 list_for_each_entry(curr, &amd_iommu_list, list) { 821 for_each_iommu(curr) {
822 if (curr->dev == iommu->dev) 822 if (curr->dev == iommu->dev)
823 curr->int_enabled = true; 823 curr->int_enabled = true;
824 } 824 }
@@ -971,7 +971,7 @@ static void __init enable_iommus(void)
971{ 971{
972 struct amd_iommu *iommu; 972 struct amd_iommu *iommu;
973 973
974 list_for_each_entry(iommu, &amd_iommu_list, list) { 974 for_each_iommu(iommu) {
975 iommu_set_exclusion_range(iommu); 975 iommu_set_exclusion_range(iommu);
976 iommu_init_msi(iommu); 976 iommu_init_msi(iommu);
977 iommu_enable_event_logging(iommu); 977 iommu_enable_event_logging(iommu);