diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-20 01:21:32 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:13:41 -0500 |
commit | 6a32fd4d0d42258004631dc0ac90665382a2e5dc (patch) | |
tree | f2a73240a7cfadfb3097121279b6e5a7651812e5 /arch | |
parent | 04d74758eb7dce6dfb7d2101315827c267ffefc4 (diff) |
[SPARC64]: Remove PGLIST_NENTS PCI IOMMU mapping limitation on SUN4V.
Use a batching queue system for IOMMU mapping setup,
with a page sized batch.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 233 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.h | 10 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v_asm.S | 11 |
3 files changed, 171 insertions, 83 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 902d07c714fb..4e9d3c451af2 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -26,11 +26,86 @@ | |||
26 | 26 | ||
27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | 27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
28 | 28 | ||
29 | struct sun4v_pglist { | 29 | struct pci_iommu_batch { |
30 | u64 *pglist; | 30 | struct pci_dev *pdev; /* Device mapping is for. */ |
31 | unsigned long prot; /* IOMMU page protections */ | ||
32 | unsigned long entry; /* Index into IOTSB. */ | ||
33 | u64 *pglist; /* List of physical pages */ | ||
34 | unsigned long npages; /* Number of pages in list. */ | ||
31 | }; | 35 | }; |
32 | 36 | ||
33 | static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); | 37 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); |
38 | |||
39 | /* Interrupts must be disabled. */ | ||
40 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | ||
41 | { | ||
42 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
43 | |||
44 | p->pdev = pdev; | ||
45 | p->prot = prot; | ||
46 | p->entry = entry; | ||
47 | p->npages = 0; | ||
48 | } | ||
49 | |||
50 | /* Interrupts must be disabled. */ | ||
51 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | ||
52 | { | ||
53 | struct pcidev_cookie *pcp = p->pdev->sysdata; | ||
54 | unsigned long devhandle = pcp->pbm->devhandle; | ||
55 | unsigned long prot = p->prot; | ||
56 | unsigned long entry = p->entry; | ||
57 | u64 *pglist = p->pglist; | ||
58 | unsigned long npages = p->npages; | ||
59 | |||
60 | do { | ||
61 | long num; | ||
62 | |||
63 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
64 | npages, prot, __pa(pglist)); | ||
65 | if (unlikely(num < 0)) { | ||
66 | if (printk_ratelimit()) | ||
67 | printk("pci_iommu_batch_flush: IOMMU map of " | ||
68 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | ||
69 | "status %ld\n", | ||
70 | devhandle, HV_PCI_TSBID(0, entry), | ||
71 | npages, prot, __pa(pglist), num); | ||
72 | return -1; | ||
73 | } | ||
74 | |||
75 | entry += num; | ||
76 | npages -= num; | ||
77 | pglist += num; | ||
78 | } while (npages != 0); | ||
79 | |||
80 | p->entry = entry; | ||
81 | p->npages = 0; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Interrupts must be disabled. */ | ||
87 | static inline long pci_iommu_batch_add(u64 phys_page) | ||
88 | { | ||
89 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
90 | |||
91 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
92 | |||
93 | p->pglist[p->npages++] = phys_page; | ||
94 | if (p->npages == PGLIST_NENTS) | ||
95 | return pci_iommu_batch_flush(p); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* Interrupts must be disabled. */ | ||
101 | static inline long pci_iommu_batch_end(void) | ||
102 | { | ||
103 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
104 | |||
105 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
106 | |||
107 | return pci_iommu_batch_flush(p); | ||
108 | } | ||
34 | 109 | ||
35 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | 110 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) |
36 | { | 111 | { |
@@ -86,65 +161,64 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
86 | unsigned long flags, order, first_page, npages, n; | 161 | unsigned long flags, order, first_page, npages, n; |
87 | void *ret; | 162 | void *ret; |
88 | long entry; | 163 | long entry; |
89 | u64 *pglist; | ||
90 | u32 devhandle; | ||
91 | int cpu; | ||
92 | 164 | ||
93 | size = IO_PAGE_ALIGN(size); | 165 | size = IO_PAGE_ALIGN(size); |
94 | order = get_order(size); | 166 | order = get_order(size); |
95 | if (order >= MAX_ORDER) | 167 | if (unlikely(order >= MAX_ORDER)) |
96 | return NULL; | 168 | return NULL; |
97 | 169 | ||
98 | npages = size >> IO_PAGE_SHIFT; | 170 | npages = size >> IO_PAGE_SHIFT; |
99 | if (npages > PGLIST_NENTS) | ||
100 | return NULL; | ||
101 | 171 | ||
102 | first_page = __get_free_pages(GFP_ATOMIC, order); | 172 | first_page = __get_free_pages(GFP_ATOMIC, order); |
103 | if (first_page == 0UL) | 173 | if (unlikely(first_page == 0UL)) |
104 | return NULL; | 174 | return NULL; |
105 | 175 | ||
106 | memset((char *)first_page, 0, PAGE_SIZE << order); | 176 | memset((char *)first_page, 0, PAGE_SIZE << order); |
107 | 177 | ||
108 | pcp = pdev->sysdata; | 178 | pcp = pdev->sysdata; |
109 | devhandle = pcp->pbm->devhandle; | ||
110 | iommu = pcp->pbm->iommu; | 179 | iommu = pcp->pbm->iommu; |
111 | 180 | ||
112 | spin_lock_irqsave(&iommu->lock, flags); | 181 | spin_lock_irqsave(&iommu->lock, flags); |
113 | entry = pci_arena_alloc(&iommu->arena, npages); | 182 | entry = pci_arena_alloc(&iommu->arena, npages); |
114 | spin_unlock_irqrestore(&iommu->lock, flags); | 183 | spin_unlock_irqrestore(&iommu->lock, flags); |
115 | 184 | ||
116 | if (unlikely(entry < 0L)) { | 185 | if (unlikely(entry < 0L)) |
117 | free_pages(first_page, order); | 186 | goto arena_alloc_fail; |
118 | return NULL; | ||
119 | } | ||
120 | 187 | ||
121 | *dma_addrp = (iommu->page_table_map_base + | 188 | *dma_addrp = (iommu->page_table_map_base + |
122 | (entry << IO_PAGE_SHIFT)); | 189 | (entry << IO_PAGE_SHIFT)); |
123 | ret = (void *) first_page; | 190 | ret = (void *) first_page; |
124 | first_page = __pa(first_page); | 191 | first_page = __pa(first_page); |
125 | 192 | ||
126 | cpu = get_cpu(); | 193 | local_irq_save(flags); |
127 | 194 | ||
128 | pglist = __get_cpu_var(iommu_pglists).pglist; | 195 | pci_iommu_batch_start(pdev, |
129 | for (n = 0; n < npages; n++) | 196 | (HV_PCI_MAP_ATTR_READ | |
130 | pglist[n] = first_page + (n * PAGE_SIZE); | 197 | HV_PCI_MAP_ATTR_WRITE), |
198 | entry); | ||
131 | 199 | ||
132 | do { | 200 | for (n = 0; n < npages; n++) { |
133 | unsigned long num; | 201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); |
202 | if (unlikely(err < 0L)) | ||
203 | goto iommu_map_fail; | ||
204 | } | ||
134 | 205 | ||
135 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | 206 | if (unlikely(pci_iommu_batch_end() < 0L)) |
136 | npages, | 207 | goto iommu_map_fail; |
137 | (HV_PCI_MAP_ATTR_READ | | ||
138 | HV_PCI_MAP_ATTR_WRITE), | ||
139 | __pa(pglist)); | ||
140 | entry += num; | ||
141 | npages -= num; | ||
142 | pglist += num; | ||
143 | } while (npages != 0); | ||
144 | 208 | ||
145 | put_cpu(); | 209 | local_irq_restore(flags); |
146 | 210 | ||
147 | return ret; | 211 | return ret; |
212 | |||
213 | iommu_map_fail: | ||
214 | /* Interrupts are disabled. */ | ||
215 | spin_lock(&iommu->lock); | ||
216 | pci_arena_free(&iommu->arena, entry, npages); | ||
217 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
218 | |||
219 | arena_alloc_fail: | ||
220 | free_pages(first_page, order); | ||
221 | return NULL; | ||
148 | } | 222 | } |
149 | 223 | ||
150 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
@@ -186,15 +260,12 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, | |||
186 | struct pci_iommu *iommu; | 260 | struct pci_iommu *iommu; |
187 | unsigned long flags, npages, oaddr; | 261 | unsigned long flags, npages, oaddr; |
188 | unsigned long i, base_paddr; | 262 | unsigned long i, base_paddr; |
189 | u32 devhandle, bus_addr, ret; | 263 | u32 bus_addr, ret; |
190 | unsigned long prot; | 264 | unsigned long prot; |
191 | long entry; | 265 | long entry; |
192 | u64 *pglist; | ||
193 | int cpu; | ||
194 | 266 | ||
195 | pcp = pdev->sysdata; | 267 | pcp = pdev->sysdata; |
196 | iommu = pcp->pbm->iommu; | 268 | iommu = pcp->pbm->iommu; |
197 | devhandle = pcp->pbm->devhandle; | ||
198 | 269 | ||
199 | if (unlikely(direction == PCI_DMA_NONE)) | 270 | if (unlikely(direction == PCI_DMA_NONE)) |
200 | goto bad; | 271 | goto bad; |
@@ -202,8 +273,6 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, | |||
202 | oaddr = (unsigned long)ptr; | 273 | oaddr = (unsigned long)ptr; |
203 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | 274 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
204 | npages >>= IO_PAGE_SHIFT; | 275 | npages >>= IO_PAGE_SHIFT; |
205 | if (unlikely(npages > PGLIST_NENTS)) | ||
206 | goto bad; | ||
207 | 276 | ||
208 | spin_lock_irqsave(&iommu->lock, flags); | 277 | spin_lock_irqsave(&iommu->lock, flags); |
209 | entry = pci_arena_alloc(&iommu->arena, npages); | 278 | entry = pci_arena_alloc(&iommu->arena, npages); |
@@ -220,24 +289,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, | |||
220 | if (direction != PCI_DMA_TODEVICE) | 289 | if (direction != PCI_DMA_TODEVICE) |
221 | prot |= HV_PCI_MAP_ATTR_WRITE; | 290 | prot |= HV_PCI_MAP_ATTR_WRITE; |
222 | 291 | ||
223 | cpu = get_cpu(); | 292 | local_irq_save(flags); |
224 | |||
225 | pglist = __get_cpu_var(iommu_pglists).pglist; | ||
226 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) | ||
227 | pglist[i] = base_paddr; | ||
228 | 293 | ||
229 | do { | 294 | pci_iommu_batch_start(pdev, prot, entry); |
230 | unsigned long num; | ||
231 | 295 | ||
232 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | 296 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
233 | npages, prot, | 297 | long err = pci_iommu_batch_add(base_paddr); |
234 | __pa(pglist)); | 298 | if (unlikely(err < 0L)) |
235 | entry += num; | 299 | goto iommu_map_fail; |
236 | npages -= num; | 300 | } |
237 | pglist += num; | 301 | if (unlikely(pci_iommu_batch_end() < 0L)) |
238 | } while (npages != 0); | 302 | goto iommu_map_fail; |
239 | 303 | ||
240 | put_cpu(); | 304 | local_irq_restore(flags); |
241 | 305 | ||
242 | return ret; | 306 | return ret; |
243 | 307 | ||
@@ -245,6 +309,14 @@ bad: | |||
245 | if (printk_ratelimit()) | 309 | if (printk_ratelimit()) |
246 | WARN_ON(1); | 310 | WARN_ON(1); |
247 | return PCI_DMA_ERROR_CODE; | 311 | return PCI_DMA_ERROR_CODE; |
312 | |||
313 | iommu_map_fail: | ||
314 | /* Interrupts are disabled. */ | ||
315 | spin_lock(&iommu->lock); | ||
316 | pci_arena_free(&iommu->arena, entry, npages); | ||
317 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
318 | |||
319 | return PCI_DMA_ERROR_CODE; | ||
248 | } | 320 | } |
249 | 321 | ||
250 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 322 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
@@ -289,18 +361,19 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
289 | #define SG_ENT_PHYS_ADDRESS(SG) \ | 361 | #define SG_ENT_PHYS_ADDRESS(SG) \ |
290 | (__pa(page_address((SG)->page)) + (SG)->offset) | 362 | (__pa(page_address((SG)->page)) + (SG)->offset) |
291 | 363 | ||
292 | static inline void fill_sg(long entry, u32 devhandle, | 364 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
293 | struct scatterlist *sg, | 365 | struct scatterlist *sg, |
294 | int nused, int nelems, unsigned long prot) | 366 | int nused, int nelems, unsigned long prot) |
295 | { | 367 | { |
296 | struct scatterlist *dma_sg = sg; | 368 | struct scatterlist *dma_sg = sg; |
297 | struct scatterlist *sg_end = sg + nelems; | 369 | struct scatterlist *sg_end = sg + nelems; |
298 | int i, cpu, pglist_ent; | 370 | unsigned long flags; |
299 | u64 *pglist; | 371 | int i; |
372 | |||
373 | local_irq_save(flags); | ||
374 | |||
375 | pci_iommu_batch_start(pdev, prot, entry); | ||
300 | 376 | ||
301 | cpu = get_cpu(); | ||
302 | pglist = __get_cpu_var(iommu_pglists).pglist; | ||
303 | pglist_ent = 0; | ||
304 | for (i = 0; i < nused; i++) { | 377 | for (i = 0; i < nused; i++) { |
305 | unsigned long pteval = ~0UL; | 378 | unsigned long pteval = ~0UL; |
306 | u32 dma_npages; | 379 | u32 dma_npages; |
@@ -338,7 +411,12 @@ static inline void fill_sg(long entry, u32 devhandle, | |||
338 | 411 | ||
339 | pteval = (pteval & IOPTE_PAGE); | 412 | pteval = (pteval & IOPTE_PAGE); |
340 | while (len > 0) { | 413 | while (len > 0) { |
341 | pglist[pglist_ent++] = pteval; | 414 | long err; |
415 | |||
416 | err = pci_iommu_batch_add(pteval); | ||
417 | if (unlikely(err < 0L)) | ||
418 | goto iommu_map_failed; | ||
419 | |||
342 | pteval += IO_PAGE_SIZE; | 420 | pteval += IO_PAGE_SIZE; |
343 | len -= (IO_PAGE_SIZE - offset); | 421 | len -= (IO_PAGE_SIZE - offset); |
344 | offset = 0; | 422 | offset = 0; |
@@ -366,18 +444,15 @@ static inline void fill_sg(long entry, u32 devhandle, | |||
366 | dma_sg++; | 444 | dma_sg++; |
367 | } | 445 | } |
368 | 446 | ||
369 | BUG_ON(pglist_ent == 0); | 447 | if (unlikely(pci_iommu_batch_end() < 0L)) |
448 | goto iommu_map_failed; | ||
370 | 449 | ||
371 | do { | 450 | local_irq_restore(flags); |
372 | unsigned long num; | 451 | return 0; |
373 | |||
374 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
375 | pglist_ent); | ||
376 | entry += num; | ||
377 | pglist_ent -= num; | ||
378 | } while (pglist_ent != 0); | ||
379 | 452 | ||
380 | put_cpu(); | 453 | iommu_map_failed: |
454 | local_irq_restore(flags); | ||
455 | return -1L; | ||
381 | } | 456 | } |
382 | 457 | ||
383 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 458 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
@@ -385,9 +460,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
385 | struct pcidev_cookie *pcp; | 460 | struct pcidev_cookie *pcp; |
386 | struct pci_iommu *iommu; | 461 | struct pci_iommu *iommu; |
387 | unsigned long flags, npages, prot; | 462 | unsigned long flags, npages, prot; |
388 | u32 devhandle, dma_base; | 463 | u32 dma_base; |
389 | struct scatterlist *sgtmp; | 464 | struct scatterlist *sgtmp; |
390 | long entry; | 465 | long entry, err; |
391 | int used; | 466 | int used; |
392 | 467 | ||
393 | /* Fast path single entry scatterlists. */ | 468 | /* Fast path single entry scatterlists. */ |
@@ -404,7 +479,6 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
404 | 479 | ||
405 | pcp = pdev->sysdata; | 480 | pcp = pdev->sysdata; |
406 | iommu = pcp->pbm->iommu; | 481 | iommu = pcp->pbm->iommu; |
407 | devhandle = pcp->pbm->devhandle; | ||
408 | 482 | ||
409 | if (unlikely(direction == PCI_DMA_NONE)) | 483 | if (unlikely(direction == PCI_DMA_NONE)) |
410 | goto bad; | 484 | goto bad; |
@@ -441,7 +515,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
441 | if (direction != PCI_DMA_TODEVICE) | 515 | if (direction != PCI_DMA_TODEVICE) |
442 | prot |= HV_PCI_MAP_ATTR_WRITE; | 516 | prot |= HV_PCI_MAP_ATTR_WRITE; |
443 | 517 | ||
444 | fill_sg(entry, devhandle, sglist, used, nelems, prot); | 518 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
519 | if (unlikely(err < 0L)) | ||
520 | goto iommu_map_failed; | ||
445 | 521 | ||
446 | return used; | 522 | return used; |
447 | 523 | ||
@@ -449,6 +525,13 @@ bad: | |||
449 | if (printk_ratelimit()) | 525 | if (printk_ratelimit()) |
450 | WARN_ON(1); | 526 | WARN_ON(1); |
451 | return 0; | 527 | return 0; |
528 | |||
529 | iommu_map_failed: | ||
530 | spin_lock_irqsave(&iommu->lock, flags); | ||
531 | pci_arena_free(&iommu->arena, entry, npages); | ||
532 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
533 | |||
534 | return 0; | ||
452 | } | 535 | } |
453 | 536 | ||
454 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 537 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
@@ -1011,13 +1094,13 @@ void sun4v_pci_init(int node, char *model_name) | |||
1011 | } | 1094 | } |
1012 | } | 1095 | } |
1013 | 1096 | ||
1014 | for (i = 0; i < NR_CPUS; i++) { | 1097 | for_each_cpu(i) { |
1015 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | 1098 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1016 | 1099 | ||
1017 | if (!page) | 1100 | if (!page) |
1018 | goto fatal_memory_error; | 1101 | goto fatal_memory_error; |
1019 | 1102 | ||
1020 | per_cpu(iommu_pglists, i).pglist = (u64 *) page; | 1103 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
1021 | } | 1104 | } |
1022 | 1105 | ||
1023 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 1106 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index 88f199e11a71..884d25f6158d 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h | |||
@@ -6,11 +6,11 @@ | |||
6 | #ifndef _PCI_SUN4V_H | 6 | #ifndef _PCI_SUN4V_H |
7 | #define _PCI_SUN4V_H | 7 | #define _PCI_SUN4V_H |
8 | 8 | ||
9 | extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, | 9 | extern long pci_sun4v_iommu_map(unsigned long devhandle, |
10 | unsigned long tsbid, | 10 | unsigned long tsbid, |
11 | unsigned long num_ttes, | 11 | unsigned long num_ttes, |
12 | unsigned long io_attributes, | 12 | unsigned long io_attributes, |
13 | unsigned long io_page_list_pa); | 13 | unsigned long io_page_list_pa); |
14 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, | 14 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, |
15 | unsigned long tsbid, | 15 | unsigned long tsbid, |
16 | unsigned long num_ttes); | 16 | unsigned long num_ttes); |
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index 424db6526648..6604fdbf746c 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S | |||
@@ -11,14 +11,19 @@ | |||
11 | * %o3: io_attributes | 11 | * %o3: io_attributes |
12 | * %o4: io_page_list phys address | 12 | * %o4: io_page_list phys address |
13 | * | 13 | * |
14 | * returns %o0: num ttes mapped | 14 | * returns %o0: -status if status was non-zero, else |
15 | * %o0: num pages mapped | ||
15 | */ | 16 | */ |
16 | .globl pci_sun4v_iommu_map | 17 | .globl pci_sun4v_iommu_map |
17 | pci_sun4v_iommu_map: | 18 | pci_sun4v_iommu_map: |
19 | mov %o5, %g1 | ||
18 | mov HV_FAST_PCI_IOMMU_MAP, %o5 | 20 | mov HV_FAST_PCI_IOMMU_MAP, %o5 |
19 | ta HV_FAST_TRAP | 21 | ta HV_FAST_TRAP |
20 | retl | 22 | brnz,pn %o0, 1f |
21 | mov %o1, %o0 | 23 | sub %g0, %o0, %o0 |
24 | mov %o1, %o0 | ||
25 | 1: retl | ||
26 | nop | ||
22 | 27 | ||
23 | /* %o0: devhandle | 28 | /* %o0: devhandle |
24 | * %o1: tsbid | 29 | * %o1: tsbid |