aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_sun4v.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-28 01:39:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-30 03:27:34 -0400
commitad7ad57c6127042c411353dddb723765964815db (patch)
tree600484291d9cfa68d54dc9b230f5bd115f495213 /arch/sparc64/kernel/pci_sun4v.c
parentc7f439b99efbea74c70a5531f92566db5a6731f2 (diff)
[SPARC64]: Fix conflicts in SBUS/PCI/EBUS/ISA DMA handling.
Fully unify all of the DMA ops so that subordinate bus types to the DMA operation providers (such as ebus, isa, of_device) can work transparently. Basically, we just make sure that for every system device we create, the dev->archdata 'iommu' and 'stc' fields are filled in. Then we have two platform variants of the DMA ops, one for SUN4U which actually programs the real hardware, and one for SUN4V which makes hypervisor calls. This also fixes the crashes in parport_pc on sparc64, reported by Meelis Roos. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c168
1 files changed, 91 insertions, 77 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 639cf06ca372..466f4aa8fc82 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -33,30 +33,30 @@ static unsigned long vpci_minor = 1;
33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
34 34
35struct iommu_batch { 35struct iommu_batch {
36 struct pci_dev *pdev; /* Device mapping is for. */ 36 struct device *dev; /* Device mapping is for. */
37 unsigned long prot; /* IOMMU page protections */ 37 unsigned long prot; /* IOMMU page protections */
38 unsigned long entry; /* Index into IOTSB. */ 38 unsigned long entry; /* Index into IOTSB. */
39 u64 *pglist; /* List of physical pages */ 39 u64 *pglist; /* List of physical pages */
40 unsigned long npages; /* Number of pages in list. */ 40 unsigned long npages; /* Number of pages in list. */
41}; 41};
42 42
43static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); 43static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44 44
45/* Interrupts must be disabled. */ 45/* Interrupts must be disabled. */
46static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) 46static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47{ 47{
48 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 48 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49 49
50 p->pdev = pdev; 50 p->dev = dev;
51 p->prot = prot; 51 p->prot = prot;
52 p->entry = entry; 52 p->entry = entry;
53 p->npages = 0; 53 p->npages = 0;
54} 54}
55 55
56/* Interrupts must be disabled. */ 56/* Interrupts must be disabled. */
57static long pci_iommu_batch_flush(struct iommu_batch *p) 57static long iommu_batch_flush(struct iommu_batch *p)
58{ 58{
59 struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; 59 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60 unsigned long devhandle = pbm->devhandle; 60 unsigned long devhandle = pbm->devhandle;
61 unsigned long prot = p->prot; 61 unsigned long prot = p->prot;
62 unsigned long entry = p->entry; 62 unsigned long entry = p->entry;
@@ -70,7 +70,7 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
70 npages, prot, __pa(pglist)); 70 npages, prot, __pa(pglist));
71 if (unlikely(num < 0)) { 71 if (unlikely(num < 0)) {
72 if (printk_ratelimit()) 72 if (printk_ratelimit())
73 printk("pci_iommu_batch_flush: IOMMU map of " 73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with " 74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75 "status %ld\n", 75 "status %ld\n",
76 devhandle, HV_PCI_TSBID(0, entry), 76 devhandle, HV_PCI_TSBID(0, entry),
@@ -90,30 +90,30 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
90} 90}
91 91
92/* Interrupts must be disabled. */ 92/* Interrupts must be disabled. */
93static inline long pci_iommu_batch_add(u64 phys_page) 93static inline long iommu_batch_add(u64 phys_page)
94{ 94{
95 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 95 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
96 96
97 BUG_ON(p->npages >= PGLIST_NENTS); 97 BUG_ON(p->npages >= PGLIST_NENTS);
98 98
99 p->pglist[p->npages++] = phys_page; 99 p->pglist[p->npages++] = phys_page;
100 if (p->npages == PGLIST_NENTS) 100 if (p->npages == PGLIST_NENTS)
101 return pci_iommu_batch_flush(p); 101 return iommu_batch_flush(p);
102 102
103 return 0; 103 return 0;
104} 104}
105 105
106/* Interrupts must be disabled. */ 106/* Interrupts must be disabled. */
107static inline long pci_iommu_batch_end(void) 107static inline long iommu_batch_end(void)
108{ 108{
109 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110 110
111 BUG_ON(p->npages >= PGLIST_NENTS); 111 BUG_ON(p->npages >= PGLIST_NENTS);
112 112
113 return pci_iommu_batch_flush(p); 113 return iommu_batch_flush(p);
114} 114}
115 115
116static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) 116static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117{ 117{
118 unsigned long n, i, start, end, limit; 118 unsigned long n, i, start, end, limit;
119 int pass; 119 int pass;
@@ -152,7 +152,8 @@ again:
152 return n; 152 return n;
153} 153}
154 154
155static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) 155static void arena_free(struct iommu_arena *arena, unsigned long base,
156 unsigned long npages)
156{ 157{
157 unsigned long i; 158 unsigned long i;
158 159
@@ -160,7 +161,8 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign
160 __clear_bit(i, arena->map); 161 __clear_bit(i, arena->map);
161} 162}
162 163
163static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 164static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165 dma_addr_t *dma_addrp, gfp_t gfp)
164{ 166{
165 struct iommu *iommu; 167 struct iommu *iommu;
166 unsigned long flags, order, first_page, npages, n; 168 unsigned long flags, order, first_page, npages, n;
@@ -180,10 +182,10 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
180 182
181 memset((char *)first_page, 0, PAGE_SIZE << order); 183 memset((char *)first_page, 0, PAGE_SIZE << order);
182 184
183 iommu = pdev->dev.archdata.iommu; 185 iommu = dev->archdata.iommu;
184 186
185 spin_lock_irqsave(&iommu->lock, flags); 187 spin_lock_irqsave(&iommu->lock, flags);
186 entry = pci_arena_alloc(&iommu->arena, npages); 188 entry = arena_alloc(&iommu->arena, npages);
187 spin_unlock_irqrestore(&iommu->lock, flags); 189 spin_unlock_irqrestore(&iommu->lock, flags);
188 190
189 if (unlikely(entry < 0L)) 191 if (unlikely(entry < 0L))
@@ -196,18 +198,18 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
196 198
197 local_irq_save(flags); 199 local_irq_save(flags);
198 200
199 pci_iommu_batch_start(pdev, 201 iommu_batch_start(dev,
200 (HV_PCI_MAP_ATTR_READ | 202 (HV_PCI_MAP_ATTR_READ |
201 HV_PCI_MAP_ATTR_WRITE), 203 HV_PCI_MAP_ATTR_WRITE),
202 entry); 204 entry);
203 205
204 for (n = 0; n < npages; n++) { 206 for (n = 0; n < npages; n++) {
205 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); 207 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
206 if (unlikely(err < 0L)) 208 if (unlikely(err < 0L))
207 goto iommu_map_fail; 209 goto iommu_map_fail;
208 } 210 }
209 211
210 if (unlikely(pci_iommu_batch_end() < 0L)) 212 if (unlikely(iommu_batch_end() < 0L))
211 goto iommu_map_fail; 213 goto iommu_map_fail;
212 214
213 local_irq_restore(flags); 215 local_irq_restore(flags);
@@ -217,7 +219,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
217iommu_map_fail: 219iommu_map_fail:
218 /* Interrupts are disabled. */ 220 /* Interrupts are disabled. */
219 spin_lock(&iommu->lock); 221 spin_lock(&iommu->lock);
220 pci_arena_free(&iommu->arena, entry, npages); 222 arena_free(&iommu->arena, entry, npages);
221 spin_unlock_irqrestore(&iommu->lock, flags); 223 spin_unlock_irqrestore(&iommu->lock, flags);
222 224
223arena_alloc_fail: 225arena_alloc_fail:
@@ -225,7 +227,8 @@ arena_alloc_fail:
225 return NULL; 227 return NULL;
226} 228}
227 229
228static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 230static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
231 dma_addr_t dvma)
229{ 232{
230 struct pci_pbm_info *pbm; 233 struct pci_pbm_info *pbm;
231 struct iommu *iommu; 234 struct iommu *iommu;
@@ -233,14 +236,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
233 u32 devhandle; 236 u32 devhandle;
234 237
235 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 238 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
236 iommu = pdev->dev.archdata.iommu; 239 iommu = dev->archdata.iommu;
237 pbm = pdev->dev.archdata.host_controller; 240 pbm = dev->archdata.host_controller;
238 devhandle = pbm->devhandle; 241 devhandle = pbm->devhandle;
239 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 242 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
240 243
241 spin_lock_irqsave(&iommu->lock, flags); 244 spin_lock_irqsave(&iommu->lock, flags);
242 245
243 pci_arena_free(&iommu->arena, entry, npages); 246 arena_free(&iommu->arena, entry, npages);
244 247
245 do { 248 do {
246 unsigned long num; 249 unsigned long num;
@@ -258,7 +261,8 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
258 free_pages((unsigned long)cpu, order); 261 free_pages((unsigned long)cpu, order);
259} 262}
260 263
261static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 264static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
265 enum dma_data_direction direction)
262{ 266{
263 struct iommu *iommu; 267 struct iommu *iommu;
264 unsigned long flags, npages, oaddr; 268 unsigned long flags, npages, oaddr;
@@ -267,9 +271,9 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
267 unsigned long prot; 271 unsigned long prot;
268 long entry; 272 long entry;
269 273
270 iommu = pdev->dev.archdata.iommu; 274 iommu = dev->archdata.iommu;
271 275
272 if (unlikely(direction == PCI_DMA_NONE)) 276 if (unlikely(direction == DMA_NONE))
273 goto bad; 277 goto bad;
274 278
275 oaddr = (unsigned long)ptr; 279 oaddr = (unsigned long)ptr;
@@ -277,7 +281,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
277 npages >>= IO_PAGE_SHIFT; 281 npages >>= IO_PAGE_SHIFT;
278 282
279 spin_lock_irqsave(&iommu->lock, flags); 283 spin_lock_irqsave(&iommu->lock, flags);
280 entry = pci_arena_alloc(&iommu->arena, npages); 284 entry = arena_alloc(&iommu->arena, npages);
281 spin_unlock_irqrestore(&iommu->lock, flags); 285 spin_unlock_irqrestore(&iommu->lock, flags);
282 286
283 if (unlikely(entry < 0L)) 287 if (unlikely(entry < 0L))
@@ -288,19 +292,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
288 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 292 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
289 base_paddr = __pa(oaddr & IO_PAGE_MASK); 293 base_paddr = __pa(oaddr & IO_PAGE_MASK);
290 prot = HV_PCI_MAP_ATTR_READ; 294 prot = HV_PCI_MAP_ATTR_READ;
291 if (direction != PCI_DMA_TODEVICE) 295 if (direction != DMA_TO_DEVICE)
292 prot |= HV_PCI_MAP_ATTR_WRITE; 296 prot |= HV_PCI_MAP_ATTR_WRITE;
293 297
294 local_irq_save(flags); 298 local_irq_save(flags);
295 299
296 pci_iommu_batch_start(pdev, prot, entry); 300 iommu_batch_start(dev, prot, entry);
297 301
298 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 302 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
299 long err = pci_iommu_batch_add(base_paddr); 303 long err = iommu_batch_add(base_paddr);
300 if (unlikely(err < 0L)) 304 if (unlikely(err < 0L))
301 goto iommu_map_fail; 305 goto iommu_map_fail;
302 } 306 }
303 if (unlikely(pci_iommu_batch_end() < 0L)) 307 if (unlikely(iommu_batch_end() < 0L))
304 goto iommu_map_fail; 308 goto iommu_map_fail;
305 309
306 local_irq_restore(flags); 310 local_irq_restore(flags);
@@ -310,18 +314,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
310bad: 314bad:
311 if (printk_ratelimit()) 315 if (printk_ratelimit())
312 WARN_ON(1); 316 WARN_ON(1);
313 return PCI_DMA_ERROR_CODE; 317 return DMA_ERROR_CODE;
314 318
315iommu_map_fail: 319iommu_map_fail:
316 /* Interrupts are disabled. */ 320 /* Interrupts are disabled. */
317 spin_lock(&iommu->lock); 321 spin_lock(&iommu->lock);
318 pci_arena_free(&iommu->arena, entry, npages); 322 arena_free(&iommu->arena, entry, npages);
319 spin_unlock_irqrestore(&iommu->lock, flags); 323 spin_unlock_irqrestore(&iommu->lock, flags);
320 324
321 return PCI_DMA_ERROR_CODE; 325 return DMA_ERROR_CODE;
322} 326}
323 327
324static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 328static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
329 size_t sz, enum dma_data_direction direction)
325{ 330{
326 struct pci_pbm_info *pbm; 331 struct pci_pbm_info *pbm;
327 struct iommu *iommu; 332 struct iommu *iommu;
@@ -329,14 +334,14 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
329 long entry; 334 long entry;
330 u32 devhandle; 335 u32 devhandle;
331 336
332 if (unlikely(direction == PCI_DMA_NONE)) { 337 if (unlikely(direction == DMA_NONE)) {
333 if (printk_ratelimit()) 338 if (printk_ratelimit())
334 WARN_ON(1); 339 WARN_ON(1);
335 return; 340 return;
336 } 341 }
337 342
338 iommu = pdev->dev.archdata.iommu; 343 iommu = dev->archdata.iommu;
339 pbm = pdev->dev.archdata.host_controller; 344 pbm = dev->archdata.host_controller;
340 devhandle = pbm->devhandle; 345 devhandle = pbm->devhandle;
341 346
342 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 347 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
@@ -346,7 +351,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
346 spin_lock_irqsave(&iommu->lock, flags); 351 spin_lock_irqsave(&iommu->lock, flags);
347 352
348 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 353 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
349 pci_arena_free(&iommu->arena, entry, npages); 354 arena_free(&iommu->arena, entry, npages);
350 355
351 do { 356 do {
352 unsigned long num; 357 unsigned long num;
@@ -363,7 +368,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
363#define SG_ENT_PHYS_ADDRESS(SG) \ 368#define SG_ENT_PHYS_ADDRESS(SG) \
364 (__pa(page_address((SG)->page)) + (SG)->offset) 369 (__pa(page_address((SG)->page)) + (SG)->offset)
365 370
366static inline long fill_sg(long entry, struct pci_dev *pdev, 371static inline long fill_sg(long entry, struct device *dev,
367 struct scatterlist *sg, 372 struct scatterlist *sg,
368 int nused, int nelems, unsigned long prot) 373 int nused, int nelems, unsigned long prot)
369{ 374{
@@ -374,7 +379,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
374 379
375 local_irq_save(flags); 380 local_irq_save(flags);
376 381
377 pci_iommu_batch_start(pdev, prot, entry); 382 iommu_batch_start(dev, prot, entry);
378 383
379 for (i = 0; i < nused; i++) { 384 for (i = 0; i < nused; i++) {
380 unsigned long pteval = ~0UL; 385 unsigned long pteval = ~0UL;
@@ -415,7 +420,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
415 while (len > 0) { 420 while (len > 0) {
416 long err; 421 long err;
417 422
418 err = pci_iommu_batch_add(pteval); 423 err = iommu_batch_add(pteval);
419 if (unlikely(err < 0L)) 424 if (unlikely(err < 0L))
420 goto iommu_map_failed; 425 goto iommu_map_failed;
421 426
@@ -446,7 +451,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
446 dma_sg++; 451 dma_sg++;
447 } 452 }
448 453
449 if (unlikely(pci_iommu_batch_end() < 0L)) 454 if (unlikely(iommu_batch_end() < 0L))
450 goto iommu_map_failed; 455 goto iommu_map_failed;
451 456
452 local_irq_restore(flags); 457 local_irq_restore(flags);
@@ -457,7 +462,8 @@ iommu_map_failed:
457 return -1L; 462 return -1L;
458} 463}
459 464
460static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 465static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
466 int nelems, enum dma_data_direction direction)
461{ 467{
462 struct iommu *iommu; 468 struct iommu *iommu;
463 unsigned long flags, npages, prot; 469 unsigned long flags, npages, prot;
@@ -469,18 +475,19 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
469 /* Fast path single entry scatterlists. */ 475 /* Fast path single entry scatterlists. */
470 if (nelems == 1) { 476 if (nelems == 1) {
471 sglist->dma_address = 477 sglist->dma_address =
472 pci_4v_map_single(pdev, 478 dma_4v_map_single(dev,
473 (page_address(sglist->page) + sglist->offset), 479 (page_address(sglist->page) +
480 sglist->offset),
474 sglist->length, direction); 481 sglist->length, direction);
475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) 482 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
476 return 0; 483 return 0;
477 sglist->dma_length = sglist->length; 484 sglist->dma_length = sglist->length;
478 return 1; 485 return 1;
479 } 486 }
480 487
481 iommu = pdev->dev.archdata.iommu; 488 iommu = dev->archdata.iommu;
482 489
483 if (unlikely(direction == PCI_DMA_NONE)) 490 if (unlikely(direction == DMA_NONE))
484 goto bad; 491 goto bad;
485 492
486 /* Step 1: Prepare scatter list. */ 493 /* Step 1: Prepare scatter list. */
@@ -488,7 +495,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
488 495
489 /* Step 2: Allocate a cluster and context, if necessary. */ 496 /* Step 2: Allocate a cluster and context, if necessary. */
490 spin_lock_irqsave(&iommu->lock, flags); 497 spin_lock_irqsave(&iommu->lock, flags);
491 entry = pci_arena_alloc(&iommu->arena, npages); 498 entry = arena_alloc(&iommu->arena, npages);
492 spin_unlock_irqrestore(&iommu->lock, flags); 499 spin_unlock_irqrestore(&iommu->lock, flags);
493 500
494 if (unlikely(entry < 0L)) 501 if (unlikely(entry < 0L))
@@ -510,10 +517,10 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
510 517
511 /* Step 4: Create the mappings. */ 518 /* Step 4: Create the mappings. */
512 prot = HV_PCI_MAP_ATTR_READ; 519 prot = HV_PCI_MAP_ATTR_READ;
513 if (direction != PCI_DMA_TODEVICE) 520 if (direction != DMA_TO_DEVICE)
514 prot |= HV_PCI_MAP_ATTR_WRITE; 521 prot |= HV_PCI_MAP_ATTR_WRITE;
515 522
516 err = fill_sg(entry, pdev, sglist, used, nelems, prot); 523 err = fill_sg(entry, dev, sglist, used, nelems, prot);
517 if (unlikely(err < 0L)) 524 if (unlikely(err < 0L))
518 goto iommu_map_failed; 525 goto iommu_map_failed;
519 526
@@ -526,13 +533,14 @@ bad:
526 533
527iommu_map_failed: 534iommu_map_failed:
528 spin_lock_irqsave(&iommu->lock, flags); 535 spin_lock_irqsave(&iommu->lock, flags);
529 pci_arena_free(&iommu->arena, entry, npages); 536 arena_free(&iommu->arena, entry, npages);
530 spin_unlock_irqrestore(&iommu->lock, flags); 537 spin_unlock_irqrestore(&iommu->lock, flags);
531 538
532 return 0; 539 return 0;
533} 540}
534 541
535static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 542static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
543 int nelems, enum dma_data_direction direction)
536{ 544{
537 struct pci_pbm_info *pbm; 545 struct pci_pbm_info *pbm;
538 struct iommu *iommu; 546 struct iommu *iommu;
@@ -540,13 +548,13 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
540 long entry; 548 long entry;
541 u32 devhandle, bus_addr; 549 u32 devhandle, bus_addr;
542 550
543 if (unlikely(direction == PCI_DMA_NONE)) { 551 if (unlikely(direction == DMA_NONE)) {
544 if (printk_ratelimit()) 552 if (printk_ratelimit())
545 WARN_ON(1); 553 WARN_ON(1);
546 } 554 }
547 555
548 iommu = pdev->dev.archdata.iommu; 556 iommu = dev->archdata.iommu;
549 pbm = pdev->dev.archdata.host_controller; 557 pbm = dev->archdata.host_controller;
550 devhandle = pbm->devhandle; 558 devhandle = pbm->devhandle;
551 559
552 bus_addr = sglist->dma_address & IO_PAGE_MASK; 560 bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -562,7 +570,7 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
562 570
563 spin_lock_irqsave(&iommu->lock, flags); 571 spin_lock_irqsave(&iommu->lock, flags);
564 572
565 pci_arena_free(&iommu->arena, entry, npages); 573 arena_free(&iommu->arena, entry, npages);
566 574
567 do { 575 do {
568 unsigned long num; 576 unsigned long num;
@@ -576,25 +584,29 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
576 spin_unlock_irqrestore(&iommu->lock, flags); 584 spin_unlock_irqrestore(&iommu->lock, flags);
577} 585}
578 586
579static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 587static void dma_4v_sync_single_for_cpu(struct device *dev,
588 dma_addr_t bus_addr, size_t sz,
589 enum dma_data_direction direction)
580{ 590{
581 /* Nothing to do... */ 591 /* Nothing to do... */
582} 592}
583 593
584static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 594static void dma_4v_sync_sg_for_cpu(struct device *dev,
595 struct scatterlist *sglist, int nelems,
596 enum dma_data_direction direction)
585{ 597{
586 /* Nothing to do... */ 598 /* Nothing to do... */
587} 599}
588 600
589const struct pci_iommu_ops pci_sun4v_iommu_ops = { 601const struct dma_ops sun4v_dma_ops = {
590 .alloc_consistent = pci_4v_alloc_consistent, 602 .alloc_coherent = dma_4v_alloc_coherent,
591 .free_consistent = pci_4v_free_consistent, 603 .free_coherent = dma_4v_free_coherent,
592 .map_single = pci_4v_map_single, 604 .map_single = dma_4v_map_single,
593 .unmap_single = pci_4v_unmap_single, 605 .unmap_single = dma_4v_unmap_single,
594 .map_sg = pci_4v_map_sg, 606 .map_sg = dma_4v_map_sg,
595 .unmap_sg = pci_4v_unmap_sg, 607 .unmap_sg = dma_4v_unmap_sg,
596 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, 608 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
597 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, 609 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
598}; 610};
599 611
600static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm) 612static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
@@ -1186,6 +1198,8 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1186 } 1198 }
1187 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n", 1199 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1188 vpci_major, vpci_minor); 1200 vpci_major, vpci_minor);
1201
1202 dma_ops = &sun4v_dma_ops;
1189 } 1203 }
1190 1204
1191 prop = of_find_property(dp, "reg", NULL); 1205 prop = of_find_property(dp, "reg", NULL);
@@ -1206,7 +1220,7 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1206 if (!page) 1220 if (!page)
1207 goto fatal_memory_error; 1221 goto fatal_memory_error;
1208 1222
1209 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; 1223 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1210 } 1224 }
1211 1225
1212 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1226 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);