aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries/iommu.c
diff options
context:
space:
mode:
authorNishanth Aravamudan <nacc@linux.vnet.ibm.com>2013-03-07 07:33:03 -0500
committerMichael Ellerman <michael@ellerman.id.au>2013-04-17 23:04:00 -0400
commit61435690a9c781b4c9e617aa86bd20c146c9a998 (patch)
tree40325e72e48f108aabb05c24fafc9e76e3145b98 /arch/powerpc/platforms/pseries/iommu.c
parentfb1b55d654a7038ca6337fbf55839a308c9bc1a7 (diff)
powerpc/pseries: close DDW race between functions of adapter
Given a PCI device with multiple functions in a DDW capable slot, the following situation can be encountered: When the first function sets a 64-bit DMA mask, enable_ddw() will be called and we can fail to properly configure DDW (the most common reason being the new DMA window's size is not large enough to map all of an LPAR's memory). With the recent changes to DDW, we remove the base window in order to determine if the new window is of sufficient size to cover an LPAR's memory. We correctly replace the base window if we find that not to be the case. However, once we go through and re-configured 32-bit DMA via the IOMMU, the next function of the adapter will go through the same process. And since DDW is a characteristic of the slot itself, we are most likely going to fail again. But to determine we are going to fail the second slot, we again remove the base window -- but that is now in-use by the first function/driver, which might be issuing I/O already. To close this window, keep a list of all the failed struct device_nodes that have failed to configure DDW. If the current device_node is in that list, just fail out immediately and fall back to 32-bit DMA without doing any DDW manipulation. Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms/pseries/iommu.c')
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1b2a174e7c59..86ae364900d6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -924,6 +924,13 @@ static void restore_default_window(struct pci_dev *dev,
924 __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token); 924 __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
925} 925}
926 926
927struct failed_ddw_pdn {
928 struct device_node *pdn;
929 struct list_head list;
930};
931
932static LIST_HEAD(failed_ddw_pdn_list);
933
927/* 934/*
928 * If the PE supports dynamic dma windows, and there is space for a table 935 * If the PE supports dynamic dma windows, and there is space for a table
929 * that can map all pages in a linear offset, then setup such a table, 936 * that can map all pages in a linear offset, then setup such a table,
@@ -951,6 +958,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
951 struct dynamic_dma_window_prop *ddwprop; 958 struct dynamic_dma_window_prop *ddwprop;
952 const void *dma_window = NULL; 959 const void *dma_window = NULL;
953 unsigned long liobn, offset, size; 960 unsigned long liobn, offset, size;
961 struct failed_ddw_pdn *fpdn;
954 962
955 mutex_lock(&direct_window_init_mutex); 963 mutex_lock(&direct_window_init_mutex);
956 964
@@ -959,6 +967,18 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
959 goto out_unlock; 967 goto out_unlock;
960 968
961 /* 969 /*
970 * If we already went through this for a previous function of
971 * the same device and failed, we don't want to muck with the
972 * DMA window again, as it will race with in-flight operations
973 * and can lead to EEHs. The above mutex protects access to the
974 * list.
975 */
976 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
977 if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
978 goto out_unlock;
979 }
980
981 /*
962 * the ibm,ddw-applicable property holds the tokens for: 982 * the ibm,ddw-applicable property holds the tokens for:
963 * ibm,query-pe-dma-window 983 * ibm,query-pe-dma-window
964 * ibm,create-pe-dma-window 984 * ibm,create-pe-dma-window
@@ -1114,6 +1134,12 @@ out_restore_window:
1114 if (ddw_restore_token) 1134 if (ddw_restore_token)
1115 restore_default_window(dev, ddw_restore_token); 1135 restore_default_window(dev, ddw_restore_token);
1116 1136
1137 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1138 if (!fpdn)
1139 goto out_unlock;
1140 fpdn->pdn = pdn;
1141 list_add(&fpdn->list, &failed_ddw_pdn_list);
1142
1117out_unlock: 1143out_unlock:
1118 mutex_unlock(&direct_window_init_mutex); 1144 mutex_unlock(&direct_window_init_mutex);
1119 return dma_addr; 1145 return dma_addr;