aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2015-10-26 06:19:13 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-09 03:10:49 -0500
commit66728eeea6d80060e4b9df55c7845c838ff2799f (patch)
treea04d86f83e773731272cf299d85069f4a66bc732
parent4d5a6b72959601d6c12e7e1ef3aa4132f0a62523 (diff)
s390/pci_dma: handle dma table failures
We use lazy allocation for translation table entries but don't handle allocation (and other) failures during translation table updates. Handle these failures and undo translation table updates when it's meaningful. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/pci_dma.h4
-rw-r--r--arch/s390/pci/pci_dma.c38
-rw-r--r--drivers/iommu/s390-iommu.c23
3 files changed, 48 insertions, 17 deletions
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1a5537..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
195void dma_free_seg_table(unsigned long); 195void dma_free_seg_table(unsigned long);
196unsigned long *dma_alloc_cpu_table(void); 196unsigned long *dma_alloc_cpu_table(void);
197void dma_cleanup_tables(unsigned long *); 197void dma_cleanup_tables(unsigned long *);
198void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); 198unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
199void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
200
199#endif 201#endif
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e4a3a31fd59a..f137949c9abf 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
95 return pto; 95 return pto;
96} 96}
97 97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 98unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{ 99{
100 unsigned long *sto, *pto; 100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px; 101 unsigned int rtx, sx, px;
@@ -114,17 +114,8 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
114 return &pto[px]; 114 return &pto[px];
115} 115}
116 116
117void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, 117void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
118 dma_addr_t dma_addr, int flags)
119{ 118{
120 unsigned long *entry;
121
122 entry = dma_walk_cpu_trans(dma_table, dma_addr);
123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) { 119 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry); 120 invalidate_pt_entry(entry);
130 } else { 121 } else {
@@ -145,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
145 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
146 dma_addr_t start_dma_addr = dma_addr; 137 dma_addr_t start_dma_addr = dma_addr;
147 unsigned long irq_flags; 138 unsigned long irq_flags;
139 unsigned long *entry;
148 int i, rc = 0; 140 int i, rc = 0;
149 141
150 if (!nr_pages) 142 if (!nr_pages)
151 return -EINVAL; 143 return -EINVAL;
152 144
153 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
154 if (!zdev->dma_table) 146 if (!zdev->dma_table) {
147 rc = -EINVAL;
155 goto no_refresh; 148 goto no_refresh;
149 }
156 150
157 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
158 dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, 152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
159 flags); 153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
160 page_addr += PAGE_SIZE; 158 page_addr += PAGE_SIZE;
161 dma_addr += PAGE_SIZE; 159 dma_addr += PAGE_SIZE;
162 } 160 }
@@ -175,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
175 173
176 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
177 nr_pages * PAGE_SIZE); 175 nr_pages * PAGE_SIZE);
176undo_cpu_trans:
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
179 while (i-- > 0) {
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
183 if (!entry)
184 break;
185 dma_update_cpu_trans(entry, page_addr, flags);
186 }
187 }
178 188
179no_refresh: 189no_refresh:
180 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
217 dma_addr_t start_dma_addr = dma_addr; 217 dma_addr_t start_dma_addr = dma_addr;
218 unsigned long irq_flags, nr_pages, i; 218 unsigned long irq_flags, nr_pages, i;
219 unsigned long *entry;
219 int rc = 0; 220 int rc = 0;
220 221
221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
228 229
229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
230 for (i = 0; i < nr_pages; i++) { 231 for (i = 0; i < nr_pages; i++) {
231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
232 dma_addr, flags); 233 if (!entry) {
234 rc = -ENOMEM;
235 goto undo_cpu_trans;
236 }
237 dma_update_cpu_trans(entry, page_addr, flags);
233 page_addr += PAGE_SIZE; 238 page_addr += PAGE_SIZE;
234 dma_addr += PAGE_SIZE; 239 dma_addr += PAGE_SIZE;
235 } 240 }
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
242 break; 247 break;
243 } 248 }
244 spin_unlock(&s390_domain->list_lock); 249 spin_unlock(&s390_domain->list_lock);
250
251undo_cpu_trans:
252 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
253 flags = ZPCI_PTE_INVALID;
254 while (i-- > 0) {
255 page_addr -= PAGE_SIZE;
256 dma_addr -= PAGE_SIZE;
257 entry = dma_walk_cpu_trans(s390_domain->dma_table,
258 dma_addr);
259 if (!entry)
260 break;
261 dma_update_cpu_trans(entry, page_addr, flags);
262 }
263 }
245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 264 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
246 265
247 return rc; 266 return rc;