diff options
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 1147 |
1 files changed, 1147 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c new file mode 100644 index 000000000000..9372d4f376d5 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -0,0 +1,1147 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/pbm.h> | ||
15 | #include <asm/iommu.h> | ||
16 | #include <asm/irq.h> | ||
17 | #include <asm/upa.h> | ||
18 | #include <asm/pstate.h> | ||
19 | #include <asm/oplib.h> | ||
20 | #include <asm/hypervisor.h> | ||
21 | |||
22 | #include "pci_impl.h" | ||
23 | #include "iommu_common.h" | ||
24 | |||
25 | #include "pci_sun4v.h" | ||
26 | |||
27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | ||
28 | |||
29 | struct pci_iommu_batch { | ||
30 | struct pci_dev *pdev; /* Device mapping is for. */ | ||
31 | unsigned long prot; /* IOMMU page protections */ | ||
32 | unsigned long entry; /* Index into IOTSB. */ | ||
33 | u64 *pglist; /* List of physical pages */ | ||
34 | unsigned long npages; /* Number of pages in list. */ | ||
35 | }; | ||
36 | |||
37 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); | ||
38 | |||
39 | /* Interrupts must be disabled. */ | ||
40 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | ||
41 | { | ||
42 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
43 | |||
44 | p->pdev = pdev; | ||
45 | p->prot = prot; | ||
46 | p->entry = entry; | ||
47 | p->npages = 0; | ||
48 | } | ||
49 | |||
50 | /* Interrupts must be disabled. */ | ||
51 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | ||
52 | { | ||
53 | struct pcidev_cookie *pcp = p->pdev->sysdata; | ||
54 | unsigned long devhandle = pcp->pbm->devhandle; | ||
55 | unsigned long prot = p->prot; | ||
56 | unsigned long entry = p->entry; | ||
57 | u64 *pglist = p->pglist; | ||
58 | unsigned long npages = p->npages; | ||
59 | |||
60 | while (npages != 0) { | ||
61 | long num; | ||
62 | |||
63 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
64 | npages, prot, __pa(pglist)); | ||
65 | if (unlikely(num < 0)) { | ||
66 | if (printk_ratelimit()) | ||
67 | printk("pci_iommu_batch_flush: IOMMU map of " | ||
68 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | ||
69 | "status %ld\n", | ||
70 | devhandle, HV_PCI_TSBID(0, entry), | ||
71 | npages, prot, __pa(pglist), num); | ||
72 | return -1; | ||
73 | } | ||
74 | |||
75 | entry += num; | ||
76 | npages -= num; | ||
77 | pglist += num; | ||
78 | } | ||
79 | |||
80 | p->entry = entry; | ||
81 | p->npages = 0; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Interrupts must be disabled. */ | ||
87 | static inline long pci_iommu_batch_add(u64 phys_page) | ||
88 | { | ||
89 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
90 | |||
91 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
92 | |||
93 | p->pglist[p->npages++] = phys_page; | ||
94 | if (p->npages == PGLIST_NENTS) | ||
95 | return pci_iommu_batch_flush(p); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* Interrupts must be disabled. */ | ||
101 | static inline long pci_iommu_batch_end(void) | ||
102 | { | ||
103 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
104 | |||
105 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
106 | |||
107 | return pci_iommu_batch_flush(p); | ||
108 | } | ||
109 | |||
110 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | ||
111 | { | ||
112 | unsigned long n, i, start, end, limit; | ||
113 | int pass; | ||
114 | |||
115 | limit = arena->limit; | ||
116 | start = arena->hint; | ||
117 | pass = 0; | ||
118 | |||
119 | again: | ||
120 | n = find_next_zero_bit(arena->map, limit, start); | ||
121 | end = n + npages; | ||
122 | if (unlikely(end >= limit)) { | ||
123 | if (likely(pass < 1)) { | ||
124 | limit = start; | ||
125 | start = 0; | ||
126 | pass++; | ||
127 | goto again; | ||
128 | } else { | ||
129 | /* Scanned the whole thing, give up. */ | ||
130 | return -1; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | for (i = n; i < end; i++) { | ||
135 | if (test_bit(i, arena->map)) { | ||
136 | start = i + 1; | ||
137 | goto again; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | for (i = n; i < end; i++) | ||
142 | __set_bit(i, arena->map); | ||
143 | |||
144 | arena->hint = end; | ||
145 | |||
146 | return n; | ||
147 | } | ||
148 | |||
149 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | ||
150 | { | ||
151 | unsigned long i; | ||
152 | |||
153 | for (i = base; i < (base + npages); i++) | ||
154 | __clear_bit(i, arena->map); | ||
155 | } | ||
156 | |||
157 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | ||
158 | { | ||
159 | struct pcidev_cookie *pcp; | ||
160 | struct pci_iommu *iommu; | ||
161 | unsigned long flags, order, first_page, npages, n; | ||
162 | void *ret; | ||
163 | long entry; | ||
164 | |||
165 | size = IO_PAGE_ALIGN(size); | ||
166 | order = get_order(size); | ||
167 | if (unlikely(order >= MAX_ORDER)) | ||
168 | return NULL; | ||
169 | |||
170 | npages = size >> IO_PAGE_SHIFT; | ||
171 | |||
172 | first_page = __get_free_pages(GFP_ATOMIC, order); | ||
173 | if (unlikely(first_page == 0UL)) | ||
174 | return NULL; | ||
175 | |||
176 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
177 | |||
178 | pcp = pdev->sysdata; | ||
179 | iommu = pcp->pbm->iommu; | ||
180 | |||
181 | spin_lock_irqsave(&iommu->lock, flags); | ||
182 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
183 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
184 | |||
185 | if (unlikely(entry < 0L)) | ||
186 | goto arena_alloc_fail; | ||
187 | |||
188 | *dma_addrp = (iommu->page_table_map_base + | ||
189 | (entry << IO_PAGE_SHIFT)); | ||
190 | ret = (void *) first_page; | ||
191 | first_page = __pa(first_page); | ||
192 | |||
193 | local_irq_save(flags); | ||
194 | |||
195 | pci_iommu_batch_start(pdev, | ||
196 | (HV_PCI_MAP_ATTR_READ | | ||
197 | HV_PCI_MAP_ATTR_WRITE), | ||
198 | entry); | ||
199 | |||
200 | for (n = 0; n < npages; n++) { | ||
201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | ||
202 | if (unlikely(err < 0L)) | ||
203 | goto iommu_map_fail; | ||
204 | } | ||
205 | |||
206 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
207 | goto iommu_map_fail; | ||
208 | |||
209 | local_irq_restore(flags); | ||
210 | |||
211 | return ret; | ||
212 | |||
213 | iommu_map_fail: | ||
214 | /* Interrupts are disabled. */ | ||
215 | spin_lock(&iommu->lock); | ||
216 | pci_arena_free(&iommu->arena, entry, npages); | ||
217 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
218 | |||
219 | arena_alloc_fail: | ||
220 | free_pages(first_page, order); | ||
221 | return NULL; | ||
222 | } | ||
223 | |||
224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | ||
225 | { | ||
226 | struct pcidev_cookie *pcp; | ||
227 | struct pci_iommu *iommu; | ||
228 | unsigned long flags, order, npages, entry; | ||
229 | u32 devhandle; | ||
230 | |||
231 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
232 | pcp = pdev->sysdata; | ||
233 | iommu = pcp->pbm->iommu; | ||
234 | devhandle = pcp->pbm->devhandle; | ||
235 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
236 | |||
237 | spin_lock_irqsave(&iommu->lock, flags); | ||
238 | |||
239 | pci_arena_free(&iommu->arena, entry, npages); | ||
240 | |||
241 | do { | ||
242 | unsigned long num; | ||
243 | |||
244 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
245 | npages); | ||
246 | entry += num; | ||
247 | npages -= num; | ||
248 | } while (npages != 0); | ||
249 | |||
250 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
251 | |||
252 | order = get_order(size); | ||
253 | if (order < 10) | ||
254 | free_pages((unsigned long)cpu, order); | ||
255 | } | ||
256 | |||
257 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | ||
258 | { | ||
259 | struct pcidev_cookie *pcp; | ||
260 | struct pci_iommu *iommu; | ||
261 | unsigned long flags, npages, oaddr; | ||
262 | unsigned long i, base_paddr; | ||
263 | u32 bus_addr, ret; | ||
264 | unsigned long prot; | ||
265 | long entry; | ||
266 | |||
267 | pcp = pdev->sysdata; | ||
268 | iommu = pcp->pbm->iommu; | ||
269 | |||
270 | if (unlikely(direction == PCI_DMA_NONE)) | ||
271 | goto bad; | ||
272 | |||
273 | oaddr = (unsigned long)ptr; | ||
274 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
275 | npages >>= IO_PAGE_SHIFT; | ||
276 | |||
277 | spin_lock_irqsave(&iommu->lock, flags); | ||
278 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
279 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
280 | |||
281 | if (unlikely(entry < 0L)) | ||
282 | goto bad; | ||
283 | |||
284 | bus_addr = (iommu->page_table_map_base + | ||
285 | (entry << IO_PAGE_SHIFT)); | ||
286 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
287 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
288 | prot = HV_PCI_MAP_ATTR_READ; | ||
289 | if (direction != PCI_DMA_TODEVICE) | ||
290 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
291 | |||
292 | local_irq_save(flags); | ||
293 | |||
294 | pci_iommu_batch_start(pdev, prot, entry); | ||
295 | |||
296 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { | ||
297 | long err = pci_iommu_batch_add(base_paddr); | ||
298 | if (unlikely(err < 0L)) | ||
299 | goto iommu_map_fail; | ||
300 | } | ||
301 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
302 | goto iommu_map_fail; | ||
303 | |||
304 | local_irq_restore(flags); | ||
305 | |||
306 | return ret; | ||
307 | |||
308 | bad: | ||
309 | if (printk_ratelimit()) | ||
310 | WARN_ON(1); | ||
311 | return PCI_DMA_ERROR_CODE; | ||
312 | |||
313 | iommu_map_fail: | ||
314 | /* Interrupts are disabled. */ | ||
315 | spin_lock(&iommu->lock); | ||
316 | pci_arena_free(&iommu->arena, entry, npages); | ||
317 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
318 | |||
319 | return PCI_DMA_ERROR_CODE; | ||
320 | } | ||
321 | |||
322 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
323 | { | ||
324 | struct pcidev_cookie *pcp; | ||
325 | struct pci_iommu *iommu; | ||
326 | unsigned long flags, npages; | ||
327 | long entry; | ||
328 | u32 devhandle; | ||
329 | |||
330 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
331 | if (printk_ratelimit()) | ||
332 | WARN_ON(1); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | pcp = pdev->sysdata; | ||
337 | iommu = pcp->pbm->iommu; | ||
338 | devhandle = pcp->pbm->devhandle; | ||
339 | |||
340 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
341 | npages >>= IO_PAGE_SHIFT; | ||
342 | bus_addr &= IO_PAGE_MASK; | ||
343 | |||
344 | spin_lock_irqsave(&iommu->lock, flags); | ||
345 | |||
346 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
347 | pci_arena_free(&iommu->arena, entry, npages); | ||
348 | |||
349 | do { | ||
350 | unsigned long num; | ||
351 | |||
352 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
353 | npages); | ||
354 | entry += num; | ||
355 | npages -= num; | ||
356 | } while (npages != 0); | ||
357 | |||
358 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
359 | } | ||
360 | |||
361 | #define SG_ENT_PHYS_ADDRESS(SG) \ | ||
362 | (__pa(page_address((SG)->page)) + (SG)->offset) | ||
363 | |||
364 | static inline long fill_sg(long entry, struct pci_dev *pdev, | ||
365 | struct scatterlist *sg, | ||
366 | int nused, int nelems, unsigned long prot) | ||
367 | { | ||
368 | struct scatterlist *dma_sg = sg; | ||
369 | struct scatterlist *sg_end = sg + nelems; | ||
370 | unsigned long flags; | ||
371 | int i; | ||
372 | |||
373 | local_irq_save(flags); | ||
374 | |||
375 | pci_iommu_batch_start(pdev, prot, entry); | ||
376 | |||
377 | for (i = 0; i < nused; i++) { | ||
378 | unsigned long pteval = ~0UL; | ||
379 | u32 dma_npages; | ||
380 | |||
381 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | ||
382 | dma_sg->dma_length + | ||
383 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | ||
384 | do { | ||
385 | unsigned long offset; | ||
386 | signed int len; | ||
387 | |||
388 | /* If we are here, we know we have at least one | ||
389 | * more page to map. So walk forward until we | ||
390 | * hit a page crossing, and begin creating new | ||
391 | * mappings from that spot. | ||
392 | */ | ||
393 | for (;;) { | ||
394 | unsigned long tmp; | ||
395 | |||
396 | tmp = SG_ENT_PHYS_ADDRESS(sg); | ||
397 | len = sg->length; | ||
398 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | ||
399 | pteval = tmp & IO_PAGE_MASK; | ||
400 | offset = tmp & (IO_PAGE_SIZE - 1UL); | ||
401 | break; | ||
402 | } | ||
403 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | ||
404 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | ||
405 | offset = 0UL; | ||
406 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||
407 | break; | ||
408 | } | ||
409 | sg++; | ||
410 | } | ||
411 | |||
412 | pteval = (pteval & IOPTE_PAGE); | ||
413 | while (len > 0) { | ||
414 | long err; | ||
415 | |||
416 | err = pci_iommu_batch_add(pteval); | ||
417 | if (unlikely(err < 0L)) | ||
418 | goto iommu_map_failed; | ||
419 | |||
420 | pteval += IO_PAGE_SIZE; | ||
421 | len -= (IO_PAGE_SIZE - offset); | ||
422 | offset = 0; | ||
423 | dma_npages--; | ||
424 | } | ||
425 | |||
426 | pteval = (pteval & IOPTE_PAGE) + len; | ||
427 | sg++; | ||
428 | |||
429 | /* Skip over any tail mappings we've fully mapped, | ||
430 | * adjusting pteval along the way. Stop when we | ||
431 | * detect a page crossing event. | ||
432 | */ | ||
433 | while (sg < sg_end && | ||
434 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||
435 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||
436 | ((pteval ^ | ||
437 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||
438 | pteval += sg->length; | ||
439 | sg++; | ||
440 | } | ||
441 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||
442 | pteval = ~0UL; | ||
443 | } while (dma_npages != 0); | ||
444 | dma_sg++; | ||
445 | } | ||
446 | |||
447 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
448 | goto iommu_map_failed; | ||
449 | |||
450 | local_irq_restore(flags); | ||
451 | return 0; | ||
452 | |||
453 | iommu_map_failed: | ||
454 | local_irq_restore(flags); | ||
455 | return -1L; | ||
456 | } | ||
457 | |||
458 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
459 | { | ||
460 | struct pcidev_cookie *pcp; | ||
461 | struct pci_iommu *iommu; | ||
462 | unsigned long flags, npages, prot; | ||
463 | u32 dma_base; | ||
464 | struct scatterlist *sgtmp; | ||
465 | long entry, err; | ||
466 | int used; | ||
467 | |||
468 | /* Fast path single entry scatterlists. */ | ||
469 | if (nelems == 1) { | ||
470 | sglist->dma_address = | ||
471 | pci_4v_map_single(pdev, | ||
472 | (page_address(sglist->page) + sglist->offset), | ||
473 | sglist->length, direction); | ||
474 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | ||
475 | return 0; | ||
476 | sglist->dma_length = sglist->length; | ||
477 | return 1; | ||
478 | } | ||
479 | |||
480 | pcp = pdev->sysdata; | ||
481 | iommu = pcp->pbm->iommu; | ||
482 | |||
483 | if (unlikely(direction == PCI_DMA_NONE)) | ||
484 | goto bad; | ||
485 | |||
486 | /* Step 1: Prepare scatter list. */ | ||
487 | npages = prepare_sg(sglist, nelems); | ||
488 | |||
489 | /* Step 2: Allocate a cluster and context, if necessary. */ | ||
490 | spin_lock_irqsave(&iommu->lock, flags); | ||
491 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
492 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
493 | |||
494 | if (unlikely(entry < 0L)) | ||
495 | goto bad; | ||
496 | |||
497 | dma_base = iommu->page_table_map_base + | ||
498 | (entry << IO_PAGE_SHIFT); | ||
499 | |||
500 | /* Step 3: Normalize DMA addresses. */ | ||
501 | used = nelems; | ||
502 | |||
503 | sgtmp = sglist; | ||
504 | while (used && sgtmp->dma_length) { | ||
505 | sgtmp->dma_address += dma_base; | ||
506 | sgtmp++; | ||
507 | used--; | ||
508 | } | ||
509 | used = nelems - used; | ||
510 | |||
511 | /* Step 4: Create the mappings. */ | ||
512 | prot = HV_PCI_MAP_ATTR_READ; | ||
513 | if (direction != PCI_DMA_TODEVICE) | ||
514 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
515 | |||
516 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); | ||
517 | if (unlikely(err < 0L)) | ||
518 | goto iommu_map_failed; | ||
519 | |||
520 | return used; | ||
521 | |||
522 | bad: | ||
523 | if (printk_ratelimit()) | ||
524 | WARN_ON(1); | ||
525 | return 0; | ||
526 | |||
527 | iommu_map_failed: | ||
528 | spin_lock_irqsave(&iommu->lock, flags); | ||
529 | pci_arena_free(&iommu->arena, entry, npages); | ||
530 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
536 | { | ||
537 | struct pcidev_cookie *pcp; | ||
538 | struct pci_iommu *iommu; | ||
539 | unsigned long flags, i, npages; | ||
540 | long entry; | ||
541 | u32 devhandle, bus_addr; | ||
542 | |||
543 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
544 | if (printk_ratelimit()) | ||
545 | WARN_ON(1); | ||
546 | } | ||
547 | |||
548 | pcp = pdev->sysdata; | ||
549 | iommu = pcp->pbm->iommu; | ||
550 | devhandle = pcp->pbm->devhandle; | ||
551 | |||
552 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||
553 | |||
554 | for (i = 1; i < nelems; i++) | ||
555 | if (sglist[i].dma_length == 0) | ||
556 | break; | ||
557 | i--; | ||
558 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||
559 | bus_addr) >> IO_PAGE_SHIFT; | ||
560 | |||
561 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
562 | |||
563 | spin_lock_irqsave(&iommu->lock, flags); | ||
564 | |||
565 | pci_arena_free(&iommu->arena, entry, npages); | ||
566 | |||
567 | do { | ||
568 | unsigned long num; | ||
569 | |||
570 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
571 | npages); | ||
572 | entry += num; | ||
573 | npages -= num; | ||
574 | } while (npages != 0); | ||
575 | |||
576 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
577 | } | ||
578 | |||
579 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
580 | { | ||
581 | /* Nothing to do... */ | ||
582 | } | ||
583 | |||
584 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
585 | { | ||
586 | /* Nothing to do... */ | ||
587 | } | ||
588 | |||
589 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | ||
590 | .alloc_consistent = pci_4v_alloc_consistent, | ||
591 | .free_consistent = pci_4v_free_consistent, | ||
592 | .map_single = pci_4v_map_single, | ||
593 | .unmap_single = pci_4v_unmap_single, | ||
594 | .map_sg = pci_4v_map_sg, | ||
595 | .unmap_sg = pci_4v_unmap_sg, | ||
596 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | ||
597 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | ||
598 | }; | ||
599 | |||
600 | /* SUN4V PCI configuration space accessors. */ | ||
601 | |||
602 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | ||
603 | { | ||
604 | if (bus == pbm->pci_first_busno) { | ||
605 | if (device == 0 && func == 0) | ||
606 | return 0; | ||
607 | return 1; | ||
608 | } | ||
609 | |||
610 | if (bus < pbm->pci_first_busno || | ||
611 | bus > pbm->pci_last_busno) | ||
612 | return 1; | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
617 | int where, int size, u32 *value) | ||
618 | { | ||
619 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
620 | u32 devhandle = pbm->devhandle; | ||
621 | unsigned int bus = bus_dev->number; | ||
622 | unsigned int device = PCI_SLOT(devfn); | ||
623 | unsigned int func = PCI_FUNC(devfn); | ||
624 | unsigned long ret; | ||
625 | |||
626 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
627 | ret = ~0UL; | ||
628 | } else { | ||
629 | ret = pci_sun4v_config_get(devhandle, | ||
630 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
631 | where, size); | ||
632 | #if 0 | ||
633 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", | ||
634 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
635 | where, size, ret); | ||
636 | #endif | ||
637 | } | ||
638 | switch (size) { | ||
639 | case 1: | ||
640 | *value = ret & 0xff; | ||
641 | break; | ||
642 | case 2: | ||
643 | *value = ret & 0xffff; | ||
644 | break; | ||
645 | case 4: | ||
646 | *value = ret & 0xffffffff; | ||
647 | break; | ||
648 | }; | ||
649 | |||
650 | |||
651 | return PCIBIOS_SUCCESSFUL; | ||
652 | } | ||
653 | |||
654 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
655 | int where, int size, u32 value) | ||
656 | { | ||
657 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
658 | u32 devhandle = pbm->devhandle; | ||
659 | unsigned int bus = bus_dev->number; | ||
660 | unsigned int device = PCI_SLOT(devfn); | ||
661 | unsigned int func = PCI_FUNC(devfn); | ||
662 | unsigned long ret; | ||
663 | |||
664 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
665 | /* Do nothing. */ | ||
666 | } else { | ||
667 | ret = pci_sun4v_config_put(devhandle, | ||
668 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
669 | where, size, value); | ||
670 | #if 0 | ||
671 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", | ||
672 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
673 | where, size, value, ret); | ||
674 | #endif | ||
675 | } | ||
676 | return PCIBIOS_SUCCESSFUL; | ||
677 | } | ||
678 | |||
679 | static struct pci_ops pci_sun4v_ops = { | ||
680 | .read = pci_sun4v_read_pci_cfg, | ||
681 | .write = pci_sun4v_write_pci_cfg, | ||
682 | }; | ||
683 | |||
684 | |||
685 | static void pbm_scan_bus(struct pci_controller_info *p, | ||
686 | struct pci_pbm_info *pbm) | ||
687 | { | ||
688 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | ||
689 | |||
690 | if (!cookie) { | ||
691 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | ||
692 | prom_halt(); | ||
693 | } | ||
694 | |||
695 | /* All we care about is the PBM. */ | ||
696 | memset(cookie, 0, sizeof(*cookie)); | ||
697 | cookie->pbm = pbm; | ||
698 | |||
699 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); | ||
700 | #if 0 | ||
701 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
702 | pbm->pci_bus->self->sysdata = cookie; | ||
703 | #endif | ||
704 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, | ||
705 | pbm->prom_node); | ||
706 | pci_record_assignments(pbm, pbm->pci_bus); | ||
707 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
708 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
709 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
710 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
711 | } | ||
712 | |||
713 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) | ||
714 | { | ||
715 | if (p->pbm_A.prom_node) { | ||
716 | p->pbm_A.is_66mhz_capable = | ||
717 | prom_getbool(p->pbm_A.prom_node, "66mhz-capable"); | ||
718 | |||
719 | pbm_scan_bus(p, &p->pbm_A); | ||
720 | } | ||
721 | if (p->pbm_B.prom_node) { | ||
722 | p->pbm_B.is_66mhz_capable = | ||
723 | prom_getbool(p->pbm_B.prom_node, "66mhz-capable"); | ||
724 | |||
725 | pbm_scan_bus(p, &p->pbm_B); | ||
726 | } | ||
727 | |||
728 | /* XXX register error interrupt handlers XXX */ | ||
729 | } | ||
730 | |||
731 | static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, | ||
732 | struct pci_dev *pdev, | ||
733 | unsigned int devino) | ||
734 | { | ||
735 | u32 devhandle = pbm->devhandle; | ||
736 | int pil; | ||
737 | |||
738 | pil = 5; | ||
739 | if (pdev) { | ||
740 | switch ((pdev->class >> 16) & 0xff) { | ||
741 | case PCI_BASE_CLASS_STORAGE: | ||
742 | pil = 5; | ||
743 | break; | ||
744 | |||
745 | case PCI_BASE_CLASS_NETWORK: | ||
746 | pil = 6; | ||
747 | break; | ||
748 | |||
749 | case PCI_BASE_CLASS_DISPLAY: | ||
750 | pil = 9; | ||
751 | break; | ||
752 | |||
753 | case PCI_BASE_CLASS_MULTIMEDIA: | ||
754 | case PCI_BASE_CLASS_MEMORY: | ||
755 | case PCI_BASE_CLASS_BRIDGE: | ||
756 | case PCI_BASE_CLASS_SERIAL: | ||
757 | pil = 10; | ||
758 | break; | ||
759 | |||
760 | default: | ||
761 | pil = 5; | ||
762 | break; | ||
763 | }; | ||
764 | } | ||
765 | BUG_ON(PIL_RESERVED(pil)); | ||
766 | |||
767 | return sun4v_build_irq(devhandle, devino, pil, IBF_PCI); | ||
768 | } | ||
769 | |||
770 | static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) | ||
771 | { | ||
772 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
773 | struct pci_pbm_info *pbm = pcp->pbm; | ||
774 | struct resource *res, *root; | ||
775 | u32 reg; | ||
776 | int where, size, is_64bit; | ||
777 | |||
778 | res = &pdev->resource[resource]; | ||
779 | if (resource < 6) { | ||
780 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
781 | } else if (resource == PCI_ROM_RESOURCE) { | ||
782 | where = pdev->rom_base_reg; | ||
783 | } else { | ||
784 | /* Somebody might have asked allocation of a non-standard resource */ | ||
785 | return; | ||
786 | } | ||
787 | |||
788 | /* XXX 64-bit MEM handling is not %100 correct... XXX */ | ||
789 | is_64bit = 0; | ||
790 | if (res->flags & IORESOURCE_IO) | ||
791 | root = &pbm->io_space; | ||
792 | else { | ||
793 | root = &pbm->mem_space; | ||
794 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
795 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
796 | is_64bit = 1; | ||
797 | } | ||
798 | |||
799 | size = res->end - res->start; | ||
800 | pci_read_config_dword(pdev, where, ®); | ||
801 | reg = ((reg & size) | | ||
802 | (((u32)(res->start - root->start)) & ~size)); | ||
803 | if (resource == PCI_ROM_RESOURCE) { | ||
804 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
805 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
806 | } | ||
807 | pci_write_config_dword(pdev, where, reg); | ||
808 | |||
809 | /* This knows that the upper 32-bits of the address | ||
810 | * must be zero. Our PCI common layer enforces this. | ||
811 | */ | ||
812 | if (is_64bit) | ||
813 | pci_write_config_dword(pdev, where + 4, 0); | ||
814 | } | ||
815 | |||
816 | static void pci_sun4v_resource_adjust(struct pci_dev *pdev, | ||
817 | struct resource *res, | ||
818 | struct resource *root) | ||
819 | { | ||
820 | res->start += root->start; | ||
821 | res->end += root->start; | ||
822 | } | ||
823 | |||
824 | /* Use ranges property to determine where PCI MEM, I/O, and Config | ||
825 | * space are for this PCI bus module. | ||
826 | */ | ||
827 | static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
828 | { | ||
829 | int i, saw_mem, saw_io; | ||
830 | |||
831 | saw_mem = saw_io = 0; | ||
832 | for (i = 0; i < pbm->num_pbm_ranges; i++) { | ||
833 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | ||
834 | unsigned long a; | ||
835 | int type; | ||
836 | |||
837 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
838 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | ||
839 | ((unsigned long)pr->parent_phys_lo << 0UL)); | ||
840 | |||
841 | switch (type) { | ||
842 | case 1: | ||
843 | /* 16-bit IO space, 16MB */ | ||
844 | pbm->io_space.start = a; | ||
845 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
846 | pbm->io_space.flags = IORESOURCE_IO; | ||
847 | saw_io = 1; | ||
848 | break; | ||
849 | |||
850 | case 2: | ||
851 | /* 32-bit MEM space, 2GB */ | ||
852 | pbm->mem_space.start = a; | ||
853 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | ||
854 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
855 | saw_mem = 1; | ||
856 | break; | ||
857 | |||
858 | case 3: | ||
859 | /* XXX 64-bit MEM handling XXX */ | ||
860 | |||
861 | default: | ||
862 | break; | ||
863 | }; | ||
864 | } | ||
865 | |||
866 | if (!saw_io || !saw_mem) { | ||
867 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
868 | pbm->name, | ||
869 | (!saw_io ? "IO" : "MEM")); | ||
870 | prom_halt(); | ||
871 | } | ||
872 | |||
873 | printk("%s: PCI IO[%lx] MEM[%lx]\n", | ||
874 | pbm->name, | ||
875 | pbm->io_space.start, | ||
876 | pbm->mem_space.start); | ||
877 | } | ||
878 | |||
879 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
880 | struct pci_pbm_info *pbm) | ||
881 | { | ||
882 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
883 | |||
884 | request_resource(&ioport_resource, &pbm->io_space); | ||
885 | request_resource(&iomem_resource, &pbm->mem_space); | ||
886 | pci_register_legacy_regions(&pbm->io_space, | ||
887 | &pbm->mem_space); | ||
888 | } | ||
889 | |||
890 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | ||
891 | struct pci_iommu *iommu) | ||
892 | { | ||
893 | struct pci_iommu_arena *arena = &iommu->arena; | ||
894 | unsigned long i, cnt = 0; | ||
895 | u32 devhandle; | ||
896 | |||
897 | devhandle = pbm->devhandle; | ||
898 | for (i = 0; i < arena->limit; i++) { | ||
899 | unsigned long ret, io_attrs, ra; | ||
900 | |||
901 | ret = pci_sun4v_iommu_getmap(devhandle, | ||
902 | HV_PCI_TSBID(0, i), | ||
903 | &io_attrs, &ra); | ||
904 | if (ret == HV_EOK) { | ||
905 | cnt++; | ||
906 | __set_bit(i, arena->map); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return cnt; | ||
911 | } | ||
912 | |||
913 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | ||
914 | { | ||
915 | struct pci_iommu *iommu = pbm->iommu; | ||
916 | unsigned long num_tsb_entries, sz; | ||
917 | u32 vdma[2], dma_mask, dma_offset; | ||
918 | int err, tsbsize; | ||
919 | |||
920 | err = prom_getproperty(pbm->prom_node, "virtual-dma", | ||
921 | (char *)&vdma[0], sizeof(vdma)); | ||
922 | if (err == 0 || err == -1) { | ||
923 | /* No property, use default values. */ | ||
924 | vdma[0] = 0x80000000; | ||
925 | vdma[1] = 0x80000000; | ||
926 | } | ||
927 | |||
928 | dma_mask = vdma[0]; | ||
929 | switch (vdma[1]) { | ||
930 | case 0x20000000: | ||
931 | dma_mask |= 0x1fffffff; | ||
932 | tsbsize = 64; | ||
933 | break; | ||
934 | |||
935 | case 0x40000000: | ||
936 | dma_mask |= 0x3fffffff; | ||
937 | tsbsize = 128; | ||
938 | break; | ||
939 | |||
940 | case 0x80000000: | ||
941 | dma_mask |= 0x7fffffff; | ||
942 | tsbsize = 256; | ||
943 | break; | ||
944 | |||
945 | default: | ||
946 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | ||
947 | prom_halt(); | ||
948 | }; | ||
949 | |||
950 | tsbsize *= (8 * 1024); | ||
951 | |||
952 | num_tsb_entries = tsbsize / sizeof(iopte_t); | ||
953 | |||
954 | dma_offset = vdma[0]; | ||
955 | |||
956 | /* Setup initial software IOMMU state. */ | ||
957 | spin_lock_init(&iommu->lock); | ||
958 | iommu->ctx_lowest_free = 1; | ||
959 | iommu->page_table_map_base = dma_offset; | ||
960 | iommu->dma_addr_mask = dma_mask; | ||
961 | |||
962 | /* Allocate and initialize the free area map. */ | ||
963 | sz = num_tsb_entries / 8; | ||
964 | sz = (sz + 7UL) & ~7UL; | ||
965 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | ||
966 | if (!iommu->arena.map) { | ||
967 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | ||
968 | prom_halt(); | ||
969 | } | ||
970 | memset(iommu->arena.map, 0, sz); | ||
971 | iommu->arena.limit = num_tsb_entries; | ||
972 | |||
973 | sz = probe_existing_entries(pbm, iommu); | ||
974 | |||
975 | printk("%s: TSB entries [%lu], existing mapings [%lu]\n", | ||
976 | pbm->name, num_tsb_entries, sz); | ||
977 | } | ||
978 | |||
979 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) | ||
980 | { | ||
981 | unsigned int busrange[2]; | ||
982 | int prom_node = pbm->prom_node; | ||
983 | int err; | ||
984 | |||
985 | err = prom_getproperty(prom_node, "bus-range", | ||
986 | (char *)&busrange[0], | ||
987 | sizeof(busrange)); | ||
988 | if (err == 0 || err == -1) { | ||
989 | prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); | ||
990 | prom_halt(); | ||
991 | } | ||
992 | |||
993 | pbm->pci_first_busno = busrange[0]; | ||
994 | pbm->pci_last_busno = busrange[1]; | ||
995 | |||
996 | } | ||
997 | |||
998 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) | ||
999 | { | ||
1000 | struct pci_pbm_info *pbm; | ||
1001 | int err, i; | ||
1002 | |||
1003 | if (devhandle & 0x40) | ||
1004 | pbm = &p->pbm_B; | ||
1005 | else | ||
1006 | pbm = &p->pbm_A; | ||
1007 | |||
1008 | pbm->parent = p; | ||
1009 | pbm->prom_node = prom_node; | ||
1010 | pbm->pci_first_slot = 1; | ||
1011 | |||
1012 | pbm->devhandle = devhandle; | ||
1013 | |||
1014 | sprintf(pbm->name, "SUN4V-PCI%d PBM%c", | ||
1015 | p->index, (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1016 | |||
1017 | printk("%s: devhandle[%x] prom_node[%x:%x]\n", | ||
1018 | pbm->name, pbm->devhandle, | ||
1019 | pbm->prom_node, prom_getchild(pbm->prom_node)); | ||
1020 | |||
1021 | prom_getstring(prom_node, "name", | ||
1022 | pbm->prom_name, sizeof(pbm->prom_name)); | ||
1023 | |||
1024 | err = prom_getproperty(prom_node, "ranges", | ||
1025 | (char *) pbm->pbm_ranges, | ||
1026 | sizeof(pbm->pbm_ranges)); | ||
1027 | if (err == 0 || err == -1) { | ||
1028 | prom_printf("%s: Fatal error, no ranges property.\n", | ||
1029 | pbm->name); | ||
1030 | prom_halt(); | ||
1031 | } | ||
1032 | |||
1033 | pbm->num_pbm_ranges = | ||
1034 | (err / sizeof(struct linux_prom_pci_ranges)); | ||
1035 | |||
1036 | /* Mask out the top 8 bits of the ranges, leaving the real | ||
1037 | * physical address. | ||
1038 | */ | ||
1039 | for (i = 0; i < pbm->num_pbm_ranges; i++) | ||
1040 | pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; | ||
1041 | |||
1042 | pci_sun4v_determine_mem_io_space(pbm); | ||
1043 | pbm_register_toplevel_resources(p, pbm); | ||
1044 | |||
1045 | err = prom_getproperty(prom_node, "interrupt-map", | ||
1046 | (char *)pbm->pbm_intmap, | ||
1047 | sizeof(pbm->pbm_intmap)); | ||
1048 | if (err == 0 || err == -1) { | ||
1049 | prom_printf("%s: Fatal error, no interrupt-map property.\n", | ||
1050 | pbm->name); | ||
1051 | prom_halt(); | ||
1052 | } | ||
1053 | |||
1054 | pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); | ||
1055 | err = prom_getproperty(prom_node, "interrupt-map-mask", | ||
1056 | (char *)&pbm->pbm_intmask, | ||
1057 | sizeof(pbm->pbm_intmask)); | ||
1058 | if (err == 0 || err == -1) { | ||
1059 | prom_printf("%s: Fatal error, no interrupt-map-mask.\n", | ||
1060 | pbm->name); | ||
1061 | prom_halt(); | ||
1062 | } | ||
1063 | |||
1064 | pci_sun4v_get_bus_range(pbm); | ||
1065 | pci_sun4v_iommu_init(pbm); | ||
1066 | } | ||
1067 | |||
1068 | void sun4v_pci_init(int node, char *model_name) | ||
1069 | { | ||
1070 | struct pci_controller_info *p; | ||
1071 | struct pci_iommu *iommu; | ||
1072 | struct linux_prom64_registers regs; | ||
1073 | u32 devhandle; | ||
1074 | int i; | ||
1075 | |||
1076 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | ||
1077 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | ||
1078 | |||
1079 | for (p = pci_controller_root; p; p = p->next) { | ||
1080 | struct pci_pbm_info *pbm; | ||
1081 | |||
1082 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | ||
1083 | continue; | ||
1084 | |||
1085 | pbm = (p->pbm_A.prom_node ? | ||
1086 | &p->pbm_A : | ||
1087 | &p->pbm_B); | ||
1088 | |||
1089 | if (pbm->devhandle == (devhandle ^ 0x40)) { | ||
1090 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1091 | return; | ||
1092 | } | ||
1093 | } | ||
1094 | |||
1095 | for_each_cpu(i) { | ||
1096 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | ||
1097 | |||
1098 | if (!page) | ||
1099 | goto fatal_memory_error; | ||
1100 | |||
1101 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; | ||
1102 | } | ||
1103 | |||
1104 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | ||
1105 | if (!p) | ||
1106 | goto fatal_memory_error; | ||
1107 | |||
1108 | memset(p, 0, sizeof(*p)); | ||
1109 | |||
1110 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1111 | if (!iommu) | ||
1112 | goto fatal_memory_error; | ||
1113 | |||
1114 | memset(iommu, 0, sizeof(*iommu)); | ||
1115 | p->pbm_A.iommu = iommu; | ||
1116 | |||
1117 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1118 | if (!iommu) | ||
1119 | goto fatal_memory_error; | ||
1120 | |||
1121 | memset(iommu, 0, sizeof(*iommu)); | ||
1122 | p->pbm_B.iommu = iommu; | ||
1123 | |||
1124 | p->next = pci_controller_root; | ||
1125 | pci_controller_root = p; | ||
1126 | |||
1127 | p->index = pci_num_controllers++; | ||
1128 | p->pbms_same_domain = 0; | ||
1129 | |||
1130 | p->scan_bus = pci_sun4v_scan_bus; | ||
1131 | p->irq_build = pci_sun4v_irq_build; | ||
1132 | p->base_address_update = pci_sun4v_base_address_update; | ||
1133 | p->resource_adjust = pci_sun4v_resource_adjust; | ||
1134 | p->pci_ops = &pci_sun4v_ops; | ||
1135 | |||
1136 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | ||
1137 | * for memory space. | ||
1138 | */ | ||
1139 | pci_memspace_mask = 0x7fffffffUL; | ||
1140 | |||
1141 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1142 | return; | ||
1143 | |||
1144 | fatal_memory_error: | ||
1145 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | ||
1146 | prom_halt(); | ||
1147 | } | ||