diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-04 09:06:20 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-28 12:08:50 -0400 |
commit | 3bd221724adb9d642270df0e78b0105fb61e4a1c (patch) | |
tree | d5c1cf8dc7e42f3da895b61403779f257511d39d /arch/x86/kernel/amd_iommu_init.c | |
parent | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (diff) |
amd-iommu: introduce for_each_iommu* macros
This patch introduces the for_each_iommu and for_each_iommu_safe macros
to simplify the developers life when having to iterate over all AMD
IOMMUs in the system.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 8c0be0902dac..675a4b642f70 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -679,7 +679,7 @@ static void __init free_iommu_all(void) | |||
679 | { | 679 | { |
680 | struct amd_iommu *iommu, *next; | 680 | struct amd_iommu *iommu, *next; |
681 | 681 | ||
682 | list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { | 682 | for_each_iommu_safe(iommu, next) { |
683 | list_del(&iommu->list); | 683 | list_del(&iommu->list); |
684 | free_iommu_one(iommu); | 684 | free_iommu_one(iommu); |
685 | kfree(iommu); | 685 | kfree(iommu); |
@@ -779,7 +779,7 @@ static int __init iommu_setup_msix(struct amd_iommu *iommu) | |||
779 | struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */ | 779 | struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */ |
780 | int nvec = 0, i; | 780 | int nvec = 0, i; |
781 | 781 | ||
782 | list_for_each_entry(curr, &amd_iommu_list, list) { | 782 | for_each_iommu(curr) { |
783 | if (curr->dev == iommu->dev) { | 783 | if (curr->dev == iommu->dev) { |
784 | entries[nvec].entry = curr->evt_msi_num; | 784 | entries[nvec].entry = curr->evt_msi_num; |
785 | entries[nvec].vector = 0; | 785 | entries[nvec].vector = 0; |
@@ -818,7 +818,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) | |||
818 | int r; | 818 | int r; |
819 | struct amd_iommu *curr; | 819 | struct amd_iommu *curr; |
820 | 820 | ||
821 | list_for_each_entry(curr, &amd_iommu_list, list) { | 821 | for_each_iommu(curr) { |
822 | if (curr->dev == iommu->dev) | 822 | if (curr->dev == iommu->dev) |
823 | curr->int_enabled = true; | 823 | curr->int_enabled = true; |
824 | } | 824 | } |
@@ -971,7 +971,7 @@ static void __init enable_iommus(void) | |||
971 | { | 971 | { |
972 | struct amd_iommu *iommu; | 972 | struct amd_iommu *iommu; |
973 | 973 | ||
974 | list_for_each_entry(iommu, &amd_iommu_list, list) { | 974 | for_each_iommu(iommu) { |
975 | iommu_set_exclusion_range(iommu); | 975 | iommu_set_exclusion_range(iommu); |
976 | iommu_init_msi(iommu); | 976 | iommu_init_msi(iommu); |
977 | iommu_enable_event_logging(iommu); | 977 | iommu_enable_event_logging(iommu); |