diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-10 03:08:26 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:12:15 -0500 |
commit | 18397944642cbca7fcd4a109b43ed5b4652e95b9 (patch) | |
tree | 6848d3e67b789b30064f488bd3273ed5ebee4f99 | |
parent | 164c220fa3947abbada65329d168f421b461a2a7 (diff) |
[SPARC64]: First cut at SUN4V PCI IOMMU handling.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 6 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 525 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.h | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v_asm.S | 46 |
4 files changed, 550 insertions, 31 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 8e52232f6f31..c9320eac45d3 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -562,9 +562,9 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
562 | /* Fast path single entry scatterlists. */ | 562 | /* Fast path single entry scatterlists. */ |
563 | if (nelems == 1) { | 563 | if (nelems == 1) { |
564 | sglist->dma_address = | 564 | sglist->dma_address = |
565 | pci_map_single(pdev, | 565 | pci_4u_map_single(pdev, |
566 | (page_address(sglist->page) + sglist->offset), | 566 | (page_address(sglist->page) + sglist->offset), |
567 | sglist->length, direction); | 567 | sglist->length, direction); |
568 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | 568 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) |
569 | return 0; | 569 | return 0; |
570 | sglist->dma_length = sglist->length; | 570 | sglist->dma_length = sglist->length; |
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index abd9bfb245cb..3f0e3c09f4d3 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/percpu.h> | ||
12 | 13 | ||
13 | #include <asm/pbm.h> | 14 | #include <asm/pbm.h> |
14 | #include <asm/iommu.h> | 15 | #include <asm/iommu.h> |
@@ -23,39 +24,481 @@ | |||
23 | 24 | ||
24 | #include "pci_sun4v.h" | 25 | #include "pci_sun4v.h" |
25 | 26 | ||
27 | #define PGLIST_NENTS 2048 | ||
28 | |||
29 | struct sun4v_pglist { | ||
30 | u64 pglist[PGLIST_NENTS]; | ||
31 | }; | ||
32 | |||
33 | static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); | ||
34 | |||
35 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | ||
36 | { | ||
37 | unsigned long n, i, start, end, limit; | ||
38 | int pass; | ||
39 | |||
40 | limit = arena->limit; | ||
41 | start = arena->hint; | ||
42 | pass = 0; | ||
43 | |||
44 | again: | ||
45 | n = find_next_zero_bit(arena->map, limit, start); | ||
46 | end = n + npages; | ||
47 | if (unlikely(end >= limit)) { | ||
48 | if (likely(pass < 1)) { | ||
49 | limit = start; | ||
50 | start = 0; | ||
51 | pass++; | ||
52 | goto again; | ||
53 | } else { | ||
54 | /* Scanned the whole thing, give up. */ | ||
55 | return -1; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | for (i = n; i < end; i++) { | ||
60 | if (test_bit(i, arena->map)) { | ||
61 | start = i + 1; | ||
62 | goto again; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | for (i = n; i < end; i++) | ||
67 | __set_bit(i, arena->map); | ||
68 | |||
69 | arena->hint = end; | ||
70 | |||
71 | return n; | ||
72 | } | ||
73 | |||
74 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | ||
75 | { | ||
76 | unsigned long i; | ||
77 | |||
78 | for (i = base; i < (base + npages); i++) | ||
79 | __clear_bit(i, arena->map); | ||
80 | } | ||
81 | |||
26 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | 82 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) |
27 | { | 83 | { |
28 | return NULL; | 84 | struct pcidev_cookie *pcp; |
85 | struct pci_iommu *iommu; | ||
86 | unsigned long devhandle, flags, order, first_page, npages, n; | ||
87 | void *ret; | ||
88 | long entry; | ||
89 | u64 *pglist; | ||
90 | int cpu; | ||
91 | |||
92 | size = IO_PAGE_ALIGN(size); | ||
93 | order = get_order(size); | ||
94 | if (order >= MAX_ORDER) | ||
95 | return NULL; | ||
96 | |||
97 | npages = size >> IO_PAGE_SHIFT; | ||
98 | if (npages > PGLIST_NENTS) | ||
99 | return NULL; | ||
100 | |||
101 | first_page = __get_free_pages(GFP_ATOMIC, order); | ||
102 | if (first_page == 0UL) | ||
103 | return NULL; | ||
104 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
105 | |||
106 | pcp = pdev->sysdata; | ||
107 | devhandle = pcp->pbm->devhandle; | ||
108 | iommu = pcp->pbm->iommu; | ||
109 | |||
110 | spin_lock_irqsave(&iommu->lock, flags); | ||
111 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
112 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
113 | |||
114 | if (unlikely(entry < 0L)) { | ||
115 | free_pages(first_page, order); | ||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | *dma_addrp = (iommu->page_table_map_base + | ||
120 | (entry << IO_PAGE_SHIFT)); | ||
121 | ret = (void *) first_page; | ||
122 | first_page = __pa(first_page); | ||
123 | |||
124 | cpu = get_cpu(); | ||
125 | |||
126 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | ||
127 | for (n = 0; n < npages; n++) | ||
128 | pglist[n] = first_page + (n * PAGE_SIZE); | ||
129 | |||
130 | do { | ||
131 | unsigned long num; | ||
132 | |||
133 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
134 | npages, | ||
135 | (HV_PCI_MAP_ATTR_READ | | ||
136 | HV_PCI_MAP_ATTR_WRITE), | ||
137 | __pa(pglist)); | ||
138 | entry += num; | ||
139 | npages -= num; | ||
140 | pglist += num; | ||
141 | } while (npages != 0); | ||
142 | |||
143 | put_cpu(); | ||
144 | |||
145 | return ret; | ||
29 | } | 146 | } |
30 | 147 | ||
31 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 148 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
32 | { | 149 | { |
150 | struct pcidev_cookie *pcp; | ||
151 | struct pci_iommu *iommu; | ||
152 | unsigned long flags, order, npages, entry, devhandle; | ||
153 | |||
154 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
155 | pcp = pdev->sysdata; | ||
156 | iommu = pcp->pbm->iommu; | ||
157 | devhandle = pcp->pbm->devhandle; | ||
158 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
159 | |||
160 | spin_lock_irqsave(&iommu->lock, flags); | ||
161 | |||
162 | pci_arena_free(&iommu->arena, entry, npages); | ||
163 | |||
164 | do { | ||
165 | unsigned long num; | ||
166 | |||
167 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
168 | npages); | ||
169 | entry += num; | ||
170 | npages -= num; | ||
171 | } while (npages != 0); | ||
172 | |||
173 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
174 | |||
175 | order = get_order(size); | ||
176 | if (order < 10) | ||
177 | free_pages((unsigned long)cpu, order); | ||
33 | } | 178 | } |
34 | 179 | ||
35 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 180 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
36 | { | 181 | { |
37 | return 0; | 182 | struct pcidev_cookie *pcp; |
183 | struct pci_iommu *iommu; | ||
184 | unsigned long flags, npages, oaddr; | ||
185 | unsigned long i, base_paddr, devhandle; | ||
186 | u32 bus_addr, ret; | ||
187 | unsigned long prot; | ||
188 | long entry; | ||
189 | u64 *pglist; | ||
190 | int cpu; | ||
191 | |||
192 | pcp = pdev->sysdata; | ||
193 | iommu = pcp->pbm->iommu; | ||
194 | devhandle = pcp->pbm->devhandle; | ||
195 | |||
196 | if (unlikely(direction == PCI_DMA_NONE)) | ||
197 | goto bad; | ||
198 | |||
199 | oaddr = (unsigned long)ptr; | ||
200 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
201 | npages >>= IO_PAGE_SHIFT; | ||
202 | if (unlikely(npages > PGLIST_NENTS)) | ||
203 | goto bad; | ||
204 | |||
205 | spin_lock_irqsave(&iommu->lock, flags); | ||
206 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
207 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
208 | |||
209 | if (unlikely(entry < 0L)) | ||
210 | goto bad; | ||
211 | |||
212 | bus_addr = (iommu->page_table_map_base + | ||
213 | (entry << IO_PAGE_SHIFT)); | ||
214 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
215 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
216 | prot = HV_PCI_MAP_ATTR_READ; | ||
217 | if (direction != PCI_DMA_TODEVICE) | ||
218 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
219 | |||
220 | cpu = get_cpu(); | ||
221 | |||
222 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | ||
223 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) | ||
224 | pglist[i] = base_paddr; | ||
225 | |||
226 | do { | ||
227 | unsigned long num; | ||
228 | |||
229 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
230 | npages, prot, | ||
231 | __pa(pglist)); | ||
232 | entry += num; | ||
233 | npages -= num; | ||
234 | pglist += num; | ||
235 | } while (npages != 0); | ||
236 | |||
237 | put_cpu(); | ||
238 | |||
239 | return ret; | ||
240 | |||
241 | bad: | ||
242 | if (printk_ratelimit()) | ||
243 | WARN_ON(1); | ||
244 | return PCI_DMA_ERROR_CODE; | ||
38 | } | 245 | } |
39 | 246 | ||
40 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 247 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
41 | { | 248 | { |
249 | struct pcidev_cookie *pcp; | ||
250 | struct pci_iommu *iommu; | ||
251 | unsigned long flags, npages, devhandle; | ||
252 | long entry; | ||
253 | |||
254 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
255 | if (printk_ratelimit()) | ||
256 | WARN_ON(1); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | pcp = pdev->sysdata; | ||
261 | iommu = pcp->pbm->iommu; | ||
262 | devhandle = pcp->pbm->devhandle; | ||
263 | |||
264 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
265 | npages >>= IO_PAGE_SHIFT; | ||
266 | bus_addr &= IO_PAGE_MASK; | ||
267 | |||
268 | spin_lock_irqsave(&iommu->lock, flags); | ||
269 | |||
270 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
271 | pci_arena_free(&iommu->arena, entry, npages); | ||
272 | |||
273 | do { | ||
274 | unsigned long num; | ||
275 | |||
276 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
277 | npages); | ||
278 | entry += num; | ||
279 | npages -= num; | ||
280 | } while (npages != 0); | ||
281 | |||
282 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
283 | } | ||
284 | |||
285 | #define SG_ENT_PHYS_ADDRESS(SG) \ | ||
286 | (__pa(page_address((SG)->page)) + (SG)->offset) | ||
287 | |||
288 | static inline void fill_sg(long entry, unsigned long devhandle, | ||
289 | struct scatterlist *sg, | ||
290 | int nused, int nelems, unsigned long prot) | ||
291 | { | ||
292 | struct scatterlist *dma_sg = sg; | ||
293 | struct scatterlist *sg_end = sg + nelems; | ||
294 | int i, cpu, pglist_ent; | ||
295 | u64 *pglist; | ||
296 | |||
297 | cpu = get_cpu(); | ||
298 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | ||
299 | pglist_ent = 0; | ||
300 | for (i = 0; i < nused; i++) { | ||
301 | unsigned long pteval = ~0UL; | ||
302 | u32 dma_npages; | ||
303 | |||
304 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | ||
305 | dma_sg->dma_length + | ||
306 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | ||
307 | do { | ||
308 | unsigned long offset; | ||
309 | signed int len; | ||
310 | |||
311 | /* If we are here, we know we have at least one | ||
312 | * more page to map. So walk forward until we | ||
313 | * hit a page crossing, and begin creating new | ||
314 | * mappings from that spot. | ||
315 | */ | ||
316 | for (;;) { | ||
317 | unsigned long tmp; | ||
318 | |||
319 | tmp = SG_ENT_PHYS_ADDRESS(sg); | ||
320 | len = sg->length; | ||
321 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | ||
322 | pteval = tmp & IO_PAGE_MASK; | ||
323 | offset = tmp & (IO_PAGE_SIZE - 1UL); | ||
324 | break; | ||
325 | } | ||
326 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | ||
327 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | ||
328 | offset = 0UL; | ||
329 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||
330 | break; | ||
331 | } | ||
332 | sg++; | ||
333 | } | ||
334 | |||
335 | pteval = (pteval & IOPTE_PAGE); | ||
336 | while (len > 0) { | ||
337 | pglist[pglist_ent++] = pteval; | ||
338 | pteval += IO_PAGE_SIZE; | ||
339 | len -= (IO_PAGE_SIZE - offset); | ||
340 | offset = 0; | ||
341 | dma_npages--; | ||
342 | } | ||
343 | |||
344 | pteval = (pteval & IOPTE_PAGE) + len; | ||
345 | sg++; | ||
346 | |||
347 | /* Skip over any tail mappings we've fully mapped, | ||
348 | * adjusting pteval along the way. Stop when we | ||
349 | * detect a page crossing event. | ||
350 | */ | ||
351 | while (sg < sg_end && | ||
352 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||
353 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||
354 | ((pteval ^ | ||
355 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||
356 | pteval += sg->length; | ||
357 | sg++; | ||
358 | } | ||
359 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||
360 | pteval = ~0UL; | ||
361 | } while (dma_npages != 0); | ||
362 | dma_sg++; | ||
363 | } | ||
364 | |||
365 | BUG_ON(pglist_ent == 0); | ||
366 | |||
367 | do { | ||
368 | unsigned long num; | ||
369 | |||
370 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
371 | pglist_ent); | ||
372 | entry += num; | ||
373 | pglist_ent -= num; | ||
374 | } while (pglist_ent != 0); | ||
375 | |||
376 | put_cpu(); | ||
42 | } | 377 | } |
43 | 378 | ||
44 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 379 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
45 | { | 380 | { |
46 | return nelems; | 381 | struct pcidev_cookie *pcp; |
382 | struct pci_iommu *iommu; | ||
383 | unsigned long flags, npages, prot, devhandle; | ||
384 | u32 dma_base; | ||
385 | struct scatterlist *sgtmp; | ||
386 | long entry; | ||
387 | int used; | ||
388 | |||
389 | /* Fast path single entry scatterlists. */ | ||
390 | if (nelems == 1) { | ||
391 | sglist->dma_address = | ||
392 | pci_4v_map_single(pdev, | ||
393 | (page_address(sglist->page) + sglist->offset), | ||
394 | sglist->length, direction); | ||
395 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | ||
396 | return 0; | ||
397 | sglist->dma_length = sglist->length; | ||
398 | return 1; | ||
399 | } | ||
400 | |||
401 | pcp = pdev->sysdata; | ||
402 | iommu = pcp->pbm->iommu; | ||
403 | devhandle = pcp->pbm->devhandle; | ||
404 | |||
405 | if (unlikely(direction == PCI_DMA_NONE)) | ||
406 | goto bad; | ||
407 | |||
408 | /* Step 1: Prepare scatter list. */ | ||
409 | npages = prepare_sg(sglist, nelems); | ||
410 | if (unlikely(npages > PGLIST_NENTS)) | ||
411 | goto bad; | ||
412 | |||
413 | /* Step 2: Allocate a cluster and context, if necessary. */ | ||
414 | spin_lock_irqsave(&iommu->lock, flags); | ||
415 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
416 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
417 | |||
418 | if (unlikely(entry < 0L)) | ||
419 | goto bad; | ||
420 | |||
421 | dma_base = iommu->page_table_map_base + | ||
422 | (entry << IO_PAGE_SHIFT); | ||
423 | |||
424 | /* Step 3: Normalize DMA addresses. */ | ||
425 | used = nelems; | ||
426 | |||
427 | sgtmp = sglist; | ||
428 | while (used && sgtmp->dma_length) { | ||
429 | sgtmp->dma_address += dma_base; | ||
430 | sgtmp++; | ||
431 | used--; | ||
432 | } | ||
433 | used = nelems - used; | ||
434 | |||
435 | /* Step 4: Create the mappings. */ | ||
436 | prot = HV_PCI_MAP_ATTR_READ; | ||
437 | if (direction != PCI_DMA_TODEVICE) | ||
438 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
439 | |||
440 | fill_sg(entry, devhandle, sglist, used, nelems, prot); | ||
441 | |||
442 | return used; | ||
443 | |||
444 | bad: | ||
445 | if (printk_ratelimit()) | ||
446 | WARN_ON(1); | ||
447 | return 0; | ||
47 | } | 448 | } |
48 | 449 | ||
49 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 450 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
50 | { | 451 | { |
452 | struct pcidev_cookie *pcp; | ||
453 | struct pci_iommu *iommu; | ||
454 | unsigned long flags, i, npages, devhandle; | ||
455 | long entry; | ||
456 | u32 bus_addr; | ||
457 | |||
458 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
459 | if (printk_ratelimit()) | ||
460 | WARN_ON(1); | ||
461 | } | ||
462 | |||
463 | pcp = pdev->sysdata; | ||
464 | iommu = pcp->pbm->iommu; | ||
465 | devhandle = pcp->pbm->devhandle; | ||
466 | |||
467 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||
468 | |||
469 | for (i = 1; i < nelems; i++) | ||
470 | if (sglist[i].dma_length == 0) | ||
471 | break; | ||
472 | i--; | ||
473 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||
474 | bus_addr) >> IO_PAGE_SHIFT; | ||
475 | |||
476 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
477 | |||
478 | spin_lock_irqsave(&iommu->lock, flags); | ||
479 | |||
480 | pci_arena_free(&iommu->arena, entry, npages); | ||
481 | |||
482 | do { | ||
483 | unsigned long num; | ||
484 | |||
485 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
486 | npages); | ||
487 | entry += num; | ||
488 | npages -= num; | ||
489 | } while (npages != 0); | ||
490 | |||
491 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
51 | } | 492 | } |
52 | 493 | ||
53 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 494 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
54 | { | 495 | { |
496 | /* Nothing to do... */ | ||
55 | } | 497 | } |
56 | 498 | ||
57 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 499 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
58 | { | 500 | { |
501 | /* Nothing to do... */ | ||
59 | } | 502 | } |
60 | 503 | ||
61 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | 504 | struct pci_iommu_ops pci_sun4v_iommu_ops = { |
@@ -264,9 +707,83 @@ static void pbm_register_toplevel_resources(struct pci_controller_info *p, | |||
264 | &pbm->mem_space); | 707 | &pbm->mem_space); |
265 | } | 708 | } |
266 | 709 | ||
710 | static void probe_existing_entries(struct pci_pbm_info *pbm, | ||
711 | struct pci_iommu *iommu) | ||
712 | { | ||
713 | struct pci_iommu_arena *arena = &iommu->arena; | ||
714 | unsigned long i, devhandle; | ||
715 | |||
716 | devhandle = pbm->devhandle; | ||
717 | for (i = 0; i < arena->limit; i++) { | ||
718 | unsigned long ret, io_attrs, ra; | ||
719 | |||
720 | ret = pci_sun4v_iommu_getmap(devhandle, | ||
721 | HV_PCI_TSBID(0, i), | ||
722 | &io_attrs, &ra); | ||
723 | if (ret == HV_EOK) | ||
724 | __set_bit(i, arena->map); | ||
725 | } | ||
726 | } | ||
727 | |||
267 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | 728 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
268 | { | 729 | { |
269 | /* XXX Implement me! XXX */ | 730 | struct pci_iommu *iommu = pbm->iommu; |
731 | unsigned long num_tsb_entries, sz; | ||
732 | u32 vdma[2], dma_mask, dma_offset; | ||
733 | int err, tsbsize; | ||
734 | |||
735 | err = prom_getproperty(pbm->prom_node, "virtual-dma", | ||
736 | (char *)&vdma[0], sizeof(vdma)); | ||
737 | if (err == 0 || err == -1) { | ||
738 | /* No property, use default values. */ | ||
739 | vdma[0] = 0x80000000; | ||
740 | vdma[1] = 0x80000000; | ||
741 | } | ||
742 | |||
743 | dma_mask = vdma[0]; | ||
744 | switch (vdma[1]) { | ||
745 | case 0x20000000: | ||
746 | dma_mask |= 0x1fffffff; | ||
747 | tsbsize = 64; | ||
748 | break; | ||
749 | |||
750 | case 0x40000000: | ||
751 | dma_mask |= 0x3fffffff; | ||
752 | tsbsize = 128; | ||
753 | break; | ||
754 | |||
755 | case 0x80000000: | ||
756 | dma_mask |= 0x7fffffff; | ||
757 | tsbsize = 128; | ||
758 | break; | ||
759 | |||
760 | default: | ||
761 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | ||
762 | prom_halt(); | ||
763 | }; | ||
764 | |||
765 | num_tsb_entries = tsbsize / sizeof(iopte_t); | ||
766 | |||
767 | dma_offset = vdma[0]; | ||
768 | |||
769 | /* Setup initial software IOMMU state. */ | ||
770 | spin_lock_init(&iommu->lock); | ||
771 | iommu->ctx_lowest_free = 1; | ||
772 | iommu->page_table_map_base = dma_offset; | ||
773 | iommu->dma_addr_mask = dma_mask; | ||
774 | |||
775 | /* Allocate and initialize the free area map. */ | ||
776 | sz = num_tsb_entries / 8; | ||
777 | sz = (sz + 7UL) & ~7UL; | ||
778 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | ||
779 | if (!iommu->arena.map) { | ||
780 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | ||
781 | prom_halt(); | ||
782 | } | ||
783 | memset(iommu->arena.map, 0, sz); | ||
784 | iommu->arena.limit = num_tsb_entries; | ||
785 | |||
786 | probe_existing_entries(pbm, iommu); | ||
270 | } | 787 | } |
271 | 788 | ||
272 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) | 789 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) |
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index 5c7ed2ca1505..00322ed0cf8a 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h | |||
@@ -16,6 +16,10 @@ extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, | |||
16 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, | 16 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, |
17 | unsigned long tsbid, | 17 | unsigned long tsbid, |
18 | unsigned long num_ttes); | 18 | unsigned long num_ttes); |
19 | extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, | ||
20 | unsigned long tsbid, | ||
21 | unsigned long *io_attributes, | ||
22 | unsigned long *real_address); | ||
19 | extern unsigned long pci_sun4v_config_get(unsigned long devhandle, | 23 | extern unsigned long pci_sun4v_config_get(unsigned long devhandle, |
20 | unsigned long pci_device, | 24 | unsigned long pci_device, |
21 | unsigned long config_offset, | 25 | unsigned long config_offset, |
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index 2f1147146abe..4a12341dd5d3 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S | |||
@@ -12,9 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | .globl pci_sun4v_devino_to_sysino | 13 | .globl pci_sun4v_devino_to_sysino |
14 | pci_sun4v_devino_to_sysino: | 14 | pci_sun4v_devino_to_sysino: |
15 | mov %o1, %o2 | 15 | mov HV_FAST_INTR_DEVINO2SYSINO, %o5 |
16 | mov %o0, %o1 | ||
17 | mov HV_FAST_INTR_DEVINO2SYSINO, %o0 | ||
18 | ta HV_FAST_TRAP | 16 | ta HV_FAST_TRAP |
19 | retl | 17 | retl |
20 | mov %o1, %o0 | 18 | mov %o1, %o0 |
@@ -29,12 +27,7 @@ pci_sun4v_devino_to_sysino: | |||
29 | */ | 27 | */ |
30 | .globl pci_sun4v_iommu_map | 28 | .globl pci_sun4v_iommu_map |
31 | pci_sun4v_iommu_map: | 29 | pci_sun4v_iommu_map: |
32 | mov %o4, %o5 | 30 | mov HV_FAST_PCI_IOMMU_MAP, %o5 |
33 | mov %o3, %o4 | ||
34 | mov %o2, %o3 | ||
35 | mov %o1, %o2 | ||
36 | mov %o0, %o1 | ||
37 | mov HV_FAST_PCI_IOMMU_MAP, %o0 | ||
38 | ta HV_FAST_TRAP | 31 | ta HV_FAST_TRAP |
39 | retl | 32 | retl |
40 | mov %o1, %o0 | 33 | mov %o1, %o0 |
@@ -47,15 +40,29 @@ pci_sun4v_iommu_map: | |||
47 | */ | 40 | */ |
48 | .globl pci_sun4v_iommu_demap | 41 | .globl pci_sun4v_iommu_demap |
49 | pci_sun4v_iommu_demap: | 42 | pci_sun4v_iommu_demap: |
50 | mov %o2, %o3 | 43 | mov HV_FAST_PCI_IOMMU_DEMAP, %o5 |
51 | mov %o1, %o2 | ||
52 | mov %o0, %o1 | ||
53 | mov HV_FAST_PCI_IOMMU_DEMAP, %o0 | ||
54 | ta HV_FAST_TRAP | 44 | ta HV_FAST_TRAP |
55 | retl | 45 | retl |
56 | mov %o1, %o0 | 46 | mov %o1, %o0 |
57 | 47 | ||
58 | /* %o0: devhandle | 48 | /* %o0: devhandle |
49 | * %o1: tsbid | ||
50 | * %o2: &io_attributes | ||
51 | * %o3: &real_address | ||
52 | * | ||
53 | * returns %o0: status | ||
54 | */ | ||
55 | .globl pci_sun4v_iommu_getmap | ||
56 | pci_sun4v_iommu_getmap: | ||
57 | mov %o2, %o4 | ||
58 | mov HV_FAST_PCI_IOMMU_GETMAP, %o5 | ||
59 | ta HV_FAST_TRAP | ||
60 | stx %o1, [%o4] | ||
61 | stx %o2, [%o3] | ||
62 | retl | ||
63 | mov %o0, %o0 | ||
64 | |||
65 | /* %o0: devhandle | ||
59 | * %o1: pci_device | 66 | * %o1: pci_device |
60 | * %o2: pci_config_offset | 67 | * %o2: pci_config_offset |
61 | * %o3: size | 68 | * %o3: size |
@@ -67,11 +74,7 @@ pci_sun4v_iommu_demap: | |||
67 | */ | 74 | */ |
68 | .globl pci_sun4v_config_get | 75 | .globl pci_sun4v_config_get |
69 | pci_sun4v_config_get: | 76 | pci_sun4v_config_get: |
70 | mov %o3, %o4 | 77 | mov HV_FAST_PCI_CONFIG_GET, %o5 |
71 | mov %o2, %o3 | ||
72 | mov %o1, %o2 | ||
73 | mov %o0, %o1 | ||
74 | mov HV_FAST_PCI_CONFIG_GET, %o0 | ||
75 | ta HV_FAST_TRAP | 78 | ta HV_FAST_TRAP |
76 | brnz,a,pn %o1, 1f | 79 | brnz,a,pn %o1, 1f |
77 | mov -1, %o2 | 80 | mov -1, %o2 |
@@ -91,14 +94,9 @@ pci_sun4v_config_get: | |||
91 | */ | 94 | */ |
92 | .globl pci_sun4v_config_put | 95 | .globl pci_sun4v_config_put |
93 | pci_sun4v_config_put: | 96 | pci_sun4v_config_put: |
94 | mov %o3, %o4 | 97 | mov HV_FAST_PCI_CONFIG_PUT, %o5 |
95 | mov %o2, %o3 | ||
96 | mov %o1, %o2 | ||
97 | mov %o0, %o1 | ||
98 | mov HV_FAST_PCI_CONFIG_PUT, %o0 | ||
99 | ta HV_FAST_TRAP | 98 | ta HV_FAST_TRAP |
100 | brnz,a,pn %o1, 1f | 99 | brnz,a,pn %o1, 1f |
101 | mov -1, %o1 | 100 | mov -1, %o1 |
102 | 1: retl | 101 | 1: retl |
103 | mov %o1, %o0 | 102 | mov %o1, %o0 |
104 | |||