diff options
Diffstat (limited to 'arch/sparc64/kernel/pci_iommu.c')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 104 |
1 files changed, 43 insertions, 61 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 2e7f1427088a..66712772f494 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $ | 1 | /* pci_iommu.c: UltraSparc PCI controller IOM/STC support. |
2 | * pci_iommu.c: UltraSparc PCI controller IOM/STC support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) | 4 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -36,7 +35,7 @@ | |||
36 | "i" (ASI_PHYS_BYPASS_EC_E)) | 35 | "i" (ASI_PHYS_BYPASS_EC_E)) |
37 | 36 | ||
38 | /* Must be invoked under the IOMMU lock. */ | 37 | /* Must be invoked under the IOMMU lock. */ |
39 | static void __iommu_flushall(struct pci_iommu *iommu) | 38 | static void __iommu_flushall(struct iommu *iommu) |
40 | { | 39 | { |
41 | unsigned long tag; | 40 | unsigned long tag; |
42 | int entry; | 41 | int entry; |
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu) | |||
64 | #define IOPTE_IS_DUMMY(iommu, iopte) \ | 63 | #define IOPTE_IS_DUMMY(iommu, iopte) \ |
65 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) | 64 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) |
66 | 65 | ||
67 | static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | 66 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) |
68 | { | 67 | { |
69 | unsigned long val = iopte_val(*iopte); | 68 | unsigned long val = iopte_val(*iopte); |
70 | 69 | ||
@@ -75,9 +74,9 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | |||
75 | } | 74 | } |
76 | 75 | ||
77 | /* Based largely upon the ppc64 iommu allocator. */ | 76 | /* Based largely upon the ppc64 iommu allocator. */ |
78 | static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) | 77 | static long pci_arena_alloc(struct iommu *iommu, unsigned long npages) |
79 | { | 78 | { |
80 | struct pci_iommu_arena *arena = &iommu->arena; | 79 | struct iommu_arena *arena = &iommu->arena; |
81 | unsigned long n, i, start, end, limit; | 80 | unsigned long n, i, start, end, limit; |
82 | int pass; | 81 | int pass; |
83 | 82 | ||
@@ -116,7 +115,7 @@ again: | |||
116 | return n; | 115 | return n; |
117 | } | 116 | } |
118 | 117 | ||
119 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | 118 | static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
120 | { | 119 | { |
121 | unsigned long i; | 120 | unsigned long i; |
122 | 121 | ||
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un | |||
124 | __clear_bit(i, arena->map); | 123 | __clear_bit(i, arena->map); |
125 | } | 124 | } |
126 | 125 | ||
127 | void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) | 126 | void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) |
128 | { | 127 | { |
129 | unsigned long i, tsbbase, order, sz, num_tsb_entries; | 128 | unsigned long i, tsbbase, order, sz, num_tsb_entries; |
130 | 129 | ||
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, | |||
170 | iopte_make_dummy(iommu, &iommu->page_table[i]); | 169 | iopte_make_dummy(iommu, &iommu->page_table[i]); |
171 | } | 170 | } |
172 | 171 | ||
173 | static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) | 172 | static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) |
174 | { | 173 | { |
175 | long entry; | 174 | long entry; |
176 | 175 | ||
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage | |||
181 | return iommu->page_table + entry; | 180 | return iommu->page_table + entry; |
182 | } | 181 | } |
183 | 182 | ||
184 | static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) | 183 | static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) |
185 | { | 184 | { |
186 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); | 185 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); |
187 | } | 186 | } |
188 | 187 | ||
189 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | 188 | static int iommu_alloc_ctx(struct iommu *iommu) |
190 | { | 189 | { |
191 | int lowest = iommu->ctx_lowest_free; | 190 | int lowest = iommu->ctx_lowest_free; |
192 | int sz = IOMMU_NUM_CTXS - lowest; | 191 | int sz = IOMMU_NUM_CTXS - lowest; |
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu) | |||
205 | return n; | 204 | return n; |
206 | } | 205 | } |
207 | 206 | ||
208 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | 207 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) |
209 | { | 208 | { |
210 | if (likely(ctx)) { | 209 | if (likely(ctx)) { |
211 | __clear_bit(ctx, iommu->ctx_bitmap); | 210 | __clear_bit(ctx, iommu->ctx_bitmap); |
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | |||
220 | */ | 219 | */ |
221 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) | 220 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
222 | { | 221 | { |
223 | struct pcidev_cookie *pcp; | 222 | struct iommu *iommu; |
224 | struct pci_iommu *iommu; | ||
225 | iopte_t *iopte; | 223 | iopte_t *iopte; |
226 | unsigned long flags, order, first_page; | 224 | unsigned long flags, order, first_page; |
227 | void *ret; | 225 | void *ret; |
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
237 | return NULL; | 235 | return NULL; |
238 | memset((char *)first_page, 0, PAGE_SIZE << order); | 236 | memset((char *)first_page, 0, PAGE_SIZE << order); |
239 | 237 | ||
240 | pcp = pdev->sysdata; | 238 | iommu = pdev->dev.archdata.iommu; |
241 | iommu = pcp->pbm->iommu; | ||
242 | 239 | ||
243 | spin_lock_irqsave(&iommu->lock, flags); | 240 | spin_lock_irqsave(&iommu->lock, flags); |
244 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); | 241 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); |
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
268 | /* Free and unmap a consistent DMA translation. */ | 265 | /* Free and unmap a consistent DMA translation. */ |
269 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 266 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
270 | { | 267 | { |
271 | struct pcidev_cookie *pcp; | 268 | struct iommu *iommu; |
272 | struct pci_iommu *iommu; | ||
273 | iopte_t *iopte; | 269 | iopte_t *iopte; |
274 | unsigned long flags, order, npages; | 270 | unsigned long flags, order, npages; |
275 | 271 | ||
276 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 272 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
277 | pcp = pdev->sysdata; | 273 | iommu = pdev->dev.archdata.iommu; |
278 | iommu = pcp->pbm->iommu; | ||
279 | iopte = iommu->page_table + | 274 | iopte = iommu->page_table + |
280 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 275 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
281 | 276 | ||
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
295 | */ | 290 | */ |
296 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 291 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
297 | { | 292 | { |
298 | struct pcidev_cookie *pcp; | 293 | struct iommu *iommu; |
299 | struct pci_iommu *iommu; | 294 | struct strbuf *strbuf; |
300 | struct pci_strbuf *strbuf; | ||
301 | iopte_t *base; | 295 | iopte_t *base; |
302 | unsigned long flags, npages, oaddr; | 296 | unsigned long flags, npages, oaddr; |
303 | unsigned long i, base_paddr, ctx; | 297 | unsigned long i, base_paddr, ctx; |
304 | u32 bus_addr, ret; | 298 | u32 bus_addr, ret; |
305 | unsigned long iopte_protection; | 299 | unsigned long iopte_protection; |
306 | 300 | ||
307 | pcp = pdev->sysdata; | 301 | iommu = pdev->dev.archdata.iommu; |
308 | iommu = pcp->pbm->iommu; | 302 | strbuf = pdev->dev.archdata.stc; |
309 | strbuf = &pcp->pbm->stc; | ||
310 | 303 | ||
311 | if (unlikely(direction == PCI_DMA_NONE)) | 304 | if (unlikely(direction == PCI_DMA_NONE)) |
312 | goto bad_no_ctx; | 305 | goto bad_no_ctx; |
@@ -349,7 +342,7 @@ bad_no_ctx: | |||
349 | return PCI_DMA_ERROR_CODE; | 342 | return PCI_DMA_ERROR_CODE; |
350 | } | 343 | } |
351 | 344 | ||
352 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) | 345 | static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
353 | { | 346 | { |
354 | int limit; | 347 | int limit; |
355 | 348 | ||
@@ -416,9 +409,8 @@ do_flush_sync: | |||
416 | /* Unmap a single streaming mode DMA translation. */ | 409 | /* Unmap a single streaming mode DMA translation. */ |
417 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 410 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
418 | { | 411 | { |
419 | struct pcidev_cookie *pcp; | 412 | struct iommu *iommu; |
420 | struct pci_iommu *iommu; | 413 | struct strbuf *strbuf; |
421 | struct pci_strbuf *strbuf; | ||
422 | iopte_t *base; | 414 | iopte_t *base; |
423 | unsigned long flags, npages, ctx, i; | 415 | unsigned long flags, npages, ctx, i; |
424 | 416 | ||
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
428 | return; | 420 | return; |
429 | } | 421 | } |
430 | 422 | ||
431 | pcp = pdev->sysdata; | 423 | iommu = pdev->dev.archdata.iommu; |
432 | iommu = pcp->pbm->iommu; | 424 | strbuf = pdev->dev.archdata.stc; |
433 | strbuf = &pcp->pbm->stc; | ||
434 | 425 | ||
435 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 426 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
436 | npages >>= IO_PAGE_SHIFT; | 427 | npages >>= IO_PAGE_SHIFT; |
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
549 | */ | 540 | */ |
550 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 541 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
551 | { | 542 | { |
552 | struct pcidev_cookie *pcp; | 543 | struct iommu *iommu; |
553 | struct pci_iommu *iommu; | 544 | struct strbuf *strbuf; |
554 | struct pci_strbuf *strbuf; | ||
555 | unsigned long flags, ctx, npages, iopte_protection; | 545 | unsigned long flags, ctx, npages, iopte_protection; |
556 | iopte_t *base; | 546 | iopte_t *base; |
557 | u32 dma_base; | 547 | u32 dma_base; |
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
570 | return 1; | 560 | return 1; |
571 | } | 561 | } |
572 | 562 | ||
573 | pcp = pdev->sysdata; | 563 | iommu = pdev->dev.archdata.iommu; |
574 | iommu = pcp->pbm->iommu; | 564 | strbuf = pdev->dev.archdata.stc; |
575 | strbuf = &pcp->pbm->stc; | ||
576 | 565 | ||
577 | if (unlikely(direction == PCI_DMA_NONE)) | 566 | if (unlikely(direction == PCI_DMA_NONE)) |
578 | goto bad_no_ctx; | 567 | goto bad_no_ctx; |
@@ -636,9 +625,8 @@ bad_no_ctx: | |||
636 | /* Unmap a set of streaming mode DMA translations. */ | 625 | /* Unmap a set of streaming mode DMA translations. */ |
637 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 626 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
638 | { | 627 | { |
639 | struct pcidev_cookie *pcp; | 628 | struct iommu *iommu; |
640 | struct pci_iommu *iommu; | 629 | struct strbuf *strbuf; |
641 | struct pci_strbuf *strbuf; | ||
642 | iopte_t *base; | 630 | iopte_t *base; |
643 | unsigned long flags, ctx, i, npages; | 631 | unsigned long flags, ctx, i, npages; |
644 | u32 bus_addr; | 632 | u32 bus_addr; |
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
648 | WARN_ON(1); | 636 | WARN_ON(1); |
649 | } | 637 | } |
650 | 638 | ||
651 | pcp = pdev->sysdata; | 639 | iommu = pdev->dev.archdata.iommu; |
652 | iommu = pcp->pbm->iommu; | 640 | strbuf = pdev->dev.archdata.stc; |
653 | strbuf = &pcp->pbm->stc; | ||
654 | 641 | ||
655 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 642 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
656 | 643 | ||
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
696 | */ | 683 | */ |
697 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 684 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
698 | { | 685 | { |
699 | struct pcidev_cookie *pcp; | 686 | struct iommu *iommu; |
700 | struct pci_iommu *iommu; | 687 | struct strbuf *strbuf; |
701 | struct pci_strbuf *strbuf; | ||
702 | unsigned long flags, ctx, npages; | 688 | unsigned long flags, ctx, npages; |
703 | 689 | ||
704 | pcp = pdev->sysdata; | 690 | iommu = pdev->dev.archdata.iommu; |
705 | iommu = pcp->pbm->iommu; | 691 | strbuf = pdev->dev.archdata.stc; |
706 | strbuf = &pcp->pbm->stc; | ||
707 | 692 | ||
708 | if (!strbuf->strbuf_enabled) | 693 | if (!strbuf->strbuf_enabled) |
709 | return; | 694 | return; |
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_ | |||
736 | */ | 721 | */ |
737 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 722 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
738 | { | 723 | { |
739 | struct pcidev_cookie *pcp; | 724 | struct iommu *iommu; |
740 | struct pci_iommu *iommu; | 725 | struct strbuf *strbuf; |
741 | struct pci_strbuf *strbuf; | ||
742 | unsigned long flags, ctx, npages, i; | 726 | unsigned long flags, ctx, npages, i; |
743 | u32 bus_addr; | 727 | u32 bus_addr; |
744 | 728 | ||
745 | pcp = pdev->sysdata; | 729 | iommu = pdev->dev.archdata.iommu; |
746 | iommu = pcp->pbm->iommu; | 730 | strbuf = pdev->dev.archdata.stc; |
747 | strbuf = &pcp->pbm->stc; | ||
748 | 731 | ||
749 | if (!strbuf->strbuf_enabled) | 732 | if (!strbuf->strbuf_enabled) |
750 | return; | 733 | return; |
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist | |||
775 | spin_unlock_irqrestore(&iommu->lock, flags); | 758 | spin_unlock_irqrestore(&iommu->lock, flags); |
776 | } | 759 | } |
777 | 760 | ||
778 | struct pci_iommu_ops pci_sun4u_iommu_ops = { | 761 | const struct pci_iommu_ops pci_sun4u_iommu_ops = { |
779 | .alloc_consistent = pci_4u_alloc_consistent, | 762 | .alloc_consistent = pci_4u_alloc_consistent, |
780 | .free_consistent = pci_4u_free_consistent, | 763 | .free_consistent = pci_4u_free_consistent, |
781 | .map_single = pci_4u_map_single, | 764 | .map_single = pci_4u_map_single, |
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | |||
809 | 792 | ||
810 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | 793 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) |
811 | { | 794 | { |
812 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
813 | u64 dma_addr_mask; | 795 | u64 dma_addr_mask; |
814 | 796 | ||
815 | if (pdev == NULL) { | 797 | if (pdev == NULL) { |
816 | dma_addr_mask = 0xffffffff; | 798 | dma_addr_mask = 0xffffffff; |
817 | } else { | 799 | } else { |
818 | struct pci_iommu *iommu = pcp->pbm->iommu; | 800 | struct iommu *iommu = pdev->dev.archdata.iommu; |
819 | 801 | ||
820 | dma_addr_mask = iommu->dma_addr_mask; | 802 | dma_addr_mask = iommu->dma_addr_mask; |
821 | 803 | ||