diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-04-27 00:08:21 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-27 00:08:21 -0400 |
commit | 16ce82d846f2e6b652a064f91c5019cfe8682be4 (patch) | |
tree | 9100d2baface8ec8d5b7911e00e093fd177cb9e4 /arch/sparc64/kernel | |
parent | ee5ac9ddf2ea13be2418ac7d0ce5a930e78af013 (diff) |
[SPARC64]: Convert PCI over to generic struct iommu/strbuf.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 53 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 10 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 12 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 34 |
5 files changed, 59 insertions, 61 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 355ed0ba937a..66712772f494 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $ | 1 | /* pci_iommu.c: UltraSparc PCI controller IOM/STC support. |
2 | * pci_iommu.c: UltraSparc PCI controller IOM/STC support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) | 4 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -36,7 +35,7 @@ | |||
36 | "i" (ASI_PHYS_BYPASS_EC_E)) | 35 | "i" (ASI_PHYS_BYPASS_EC_E)) |
37 | 36 | ||
38 | /* Must be invoked under the IOMMU lock. */ | 37 | /* Must be invoked under the IOMMU lock. */ |
39 | static void __iommu_flushall(struct pci_iommu *iommu) | 38 | static void __iommu_flushall(struct iommu *iommu) |
40 | { | 39 | { |
41 | unsigned long tag; | 40 | unsigned long tag; |
42 | int entry; | 41 | int entry; |
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu) | |||
64 | #define IOPTE_IS_DUMMY(iommu, iopte) \ | 63 | #define IOPTE_IS_DUMMY(iommu, iopte) \ |
65 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) | 64 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) |
66 | 65 | ||
67 | static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | 66 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) |
68 | { | 67 | { |
69 | unsigned long val = iopte_val(*iopte); | 68 | unsigned long val = iopte_val(*iopte); |
70 | 69 | ||
@@ -75,7 +74,7 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | |||
75 | } | 74 | } |
76 | 75 | ||
77 | /* Based largely upon the ppc64 iommu allocator. */ | 76 | /* Based largely upon the ppc64 iommu allocator. */ |
78 | static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) | 77 | static long pci_arena_alloc(struct iommu *iommu, unsigned long npages) |
79 | { | 78 | { |
80 | struct iommu_arena *arena = &iommu->arena; | 79 | struct iommu_arena *arena = &iommu->arena; |
81 | unsigned long n, i, start, end, limit; | 80 | unsigned long n, i, start, end, limit; |
@@ -124,7 +123,7 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign | |||
124 | __clear_bit(i, arena->map); | 123 | __clear_bit(i, arena->map); |
125 | } | 124 | } |
126 | 125 | ||
127 | void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) | 126 | void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) |
128 | { | 127 | { |
129 | unsigned long i, tsbbase, order, sz, num_tsb_entries; | 128 | unsigned long i, tsbbase, order, sz, num_tsb_entries; |
130 | 129 | ||
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, | |||
170 | iopte_make_dummy(iommu, &iommu->page_table[i]); | 169 | iopte_make_dummy(iommu, &iommu->page_table[i]); |
171 | } | 170 | } |
172 | 171 | ||
173 | static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) | 172 | static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) |
174 | { | 173 | { |
175 | long entry; | 174 | long entry; |
176 | 175 | ||
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage | |||
181 | return iommu->page_table + entry; | 180 | return iommu->page_table + entry; |
182 | } | 181 | } |
183 | 182 | ||
184 | static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) | 183 | static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) |
185 | { | 184 | { |
186 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); | 185 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); |
187 | } | 186 | } |
188 | 187 | ||
189 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | 188 | static int iommu_alloc_ctx(struct iommu *iommu) |
190 | { | 189 | { |
191 | int lowest = iommu->ctx_lowest_free; | 190 | int lowest = iommu->ctx_lowest_free; |
192 | int sz = IOMMU_NUM_CTXS - lowest; | 191 | int sz = IOMMU_NUM_CTXS - lowest; |
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu) | |||
205 | return n; | 204 | return n; |
206 | } | 205 | } |
207 | 206 | ||
208 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | 207 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) |
209 | { | 208 | { |
210 | if (likely(ctx)) { | 209 | if (likely(ctx)) { |
211 | __clear_bit(ctx, iommu->ctx_bitmap); | 210 | __clear_bit(ctx, iommu->ctx_bitmap); |
@@ -220,7 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | |||
220 | */ | 219 | */ |
221 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) | 220 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
222 | { | 221 | { |
223 | struct pci_iommu *iommu; | 222 | struct iommu *iommu; |
224 | iopte_t *iopte; | 223 | iopte_t *iopte; |
225 | unsigned long flags, order, first_page; | 224 | unsigned long flags, order, first_page; |
226 | void *ret; | 225 | void *ret; |
@@ -266,7 +265,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
266 | /* Free and unmap a consistent DMA translation. */ | 265 | /* Free and unmap a consistent DMA translation. */ |
267 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 266 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
268 | { | 267 | { |
269 | struct pci_iommu *iommu; | 268 | struct iommu *iommu; |
270 | iopte_t *iopte; | 269 | iopte_t *iopte; |
271 | unsigned long flags, order, npages; | 270 | unsigned long flags, order, npages; |
272 | 271 | ||
@@ -291,8 +290,8 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
291 | */ | 290 | */ |
292 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 291 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
293 | { | 292 | { |
294 | struct pci_iommu *iommu; | 293 | struct iommu *iommu; |
295 | struct pci_strbuf *strbuf; | 294 | struct strbuf *strbuf; |
296 | iopte_t *base; | 295 | iopte_t *base; |
297 | unsigned long flags, npages, oaddr; | 296 | unsigned long flags, npages, oaddr; |
298 | unsigned long i, base_paddr, ctx; | 297 | unsigned long i, base_paddr, ctx; |
@@ -343,7 +342,7 @@ bad_no_ctx: | |||
343 | return PCI_DMA_ERROR_CODE; | 342 | return PCI_DMA_ERROR_CODE; |
344 | } | 343 | } |
345 | 344 | ||
346 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) | 345 | static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
347 | { | 346 | { |
348 | int limit; | 347 | int limit; |
349 | 348 | ||
@@ -410,8 +409,8 @@ do_flush_sync: | |||
410 | /* Unmap a single streaming mode DMA translation. */ | 409 | /* Unmap a single streaming mode DMA translation. */ |
411 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 410 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
412 | { | 411 | { |
413 | struct pci_iommu *iommu; | 412 | struct iommu *iommu; |
414 | struct pci_strbuf *strbuf; | 413 | struct strbuf *strbuf; |
415 | iopte_t *base; | 414 | iopte_t *base; |
416 | unsigned long flags, npages, ctx, i; | 415 | unsigned long flags, npages, ctx, i; |
417 | 416 | ||
@@ -541,8 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
541 | */ | 540 | */ |
542 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 541 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
543 | { | 542 | { |
544 | struct pci_iommu *iommu; | 543 | struct iommu *iommu; |
545 | struct pci_strbuf *strbuf; | 544 | struct strbuf *strbuf; |
546 | unsigned long flags, ctx, npages, iopte_protection; | 545 | unsigned long flags, ctx, npages, iopte_protection; |
547 | iopte_t *base; | 546 | iopte_t *base; |
548 | u32 dma_base; | 547 | u32 dma_base; |
@@ -626,8 +625,8 @@ bad_no_ctx: | |||
626 | /* Unmap a set of streaming mode DMA translations. */ | 625 | /* Unmap a set of streaming mode DMA translations. */ |
627 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 626 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
628 | { | 627 | { |
629 | struct pci_iommu *iommu; | 628 | struct iommu *iommu; |
630 | struct pci_strbuf *strbuf; | 629 | struct strbuf *strbuf; |
631 | iopte_t *base; | 630 | iopte_t *base; |
632 | unsigned long flags, ctx, i, npages; | 631 | unsigned long flags, ctx, i, npages; |
633 | u32 bus_addr; | 632 | u32 bus_addr; |
@@ -684,8 +683,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
684 | */ | 683 | */ |
685 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 684 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
686 | { | 685 | { |
687 | struct pci_iommu *iommu; | 686 | struct iommu *iommu; |
688 | struct pci_strbuf *strbuf; | 687 | struct strbuf *strbuf; |
689 | unsigned long flags, ctx, npages; | 688 | unsigned long flags, ctx, npages; |
690 | 689 | ||
691 | iommu = pdev->dev.archdata.iommu; | 690 | iommu = pdev->dev.archdata.iommu; |
@@ -722,8 +721,8 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_ | |||
722 | */ | 721 | */ |
723 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 722 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
724 | { | 723 | { |
725 | struct pci_iommu *iommu; | 724 | struct iommu *iommu; |
726 | struct pci_strbuf *strbuf; | 725 | struct strbuf *strbuf; |
727 | unsigned long flags, ctx, npages, i; | 726 | unsigned long flags, ctx, npages, i; |
728 | u32 bus_addr; | 727 | u32 bus_addr; |
729 | 728 | ||
@@ -798,7 +797,7 @@ int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | |||
798 | if (pdev == NULL) { | 797 | if (pdev == NULL) { |
799 | dma_addr_mask = 0xffffffff; | 798 | dma_addr_mask = 0xffffffff; |
800 | } else { | 799 | } else { |
801 | struct pci_iommu *iommu = pdev->dev.archdata.iommu; | 800 | struct iommu *iommu = pdev->dev.archdata.iommu; |
802 | 801 | ||
803 | dma_addr_mask = iommu->dma_addr_mask; | 802 | dma_addr_mask = iommu->dma_addr_mask; |
804 | 803 | ||
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3725910c8b2b..253d40ec2245 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -269,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p, | |||
269 | struct pci_pbm_info *pbm, | 269 | struct pci_pbm_info *pbm, |
270 | int is_pbm_a) | 270 | int is_pbm_a) |
271 | { | 271 | { |
272 | struct pci_strbuf *strbuf = &pbm->stc; | 272 | struct strbuf *strbuf = &pbm->stc; |
273 | unsigned long regbase = p->pbm_A.controller_regs; | 273 | unsigned long regbase = p->pbm_A.controller_regs; |
274 | unsigned long err_base, tag_base, line_base; | 274 | unsigned long err_base, tag_base, line_base; |
275 | u64 control; | 275 | u64 control; |
@@ -418,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p, | |||
418 | unsigned long afar, | 418 | unsigned long afar, |
419 | enum psycho_error_type type) | 419 | enum psycho_error_type type) |
420 | { | 420 | { |
421 | struct pci_iommu *iommu = p->pbm_A.iommu; | 421 | struct iommu *iommu = p->pbm_A.iommu; |
422 | unsigned long iommu_tag[16]; | 422 | unsigned long iommu_tag[16]; |
423 | unsigned long iommu_data[16]; | 423 | unsigned long iommu_data[16]; |
424 | unsigned long flags; | 424 | unsigned long flags; |
@@ -941,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p) | |||
941 | 941 | ||
942 | static void psycho_iommu_init(struct pci_controller_info *p) | 942 | static void psycho_iommu_init(struct pci_controller_info *p) |
943 | { | 943 | { |
944 | struct pci_iommu *iommu = p->pbm_A.iommu; | 944 | struct iommu *iommu = p->pbm_A.iommu; |
945 | unsigned long i; | 945 | unsigned long i; |
946 | u64 control; | 946 | u64 control; |
947 | 947 | ||
@@ -1131,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1131 | { | 1131 | { |
1132 | struct linux_prom64_registers *pr_regs; | 1132 | struct linux_prom64_registers *pr_regs; |
1133 | struct pci_controller_info *p; | 1133 | struct pci_controller_info *p; |
1134 | struct pci_iommu *iommu; | 1134 | struct iommu *iommu; |
1135 | struct property *prop; | 1135 | struct property *prop; |
1136 | u32 upa_portid; | 1136 | u32 upa_portid; |
1137 | int is_pbm_a; | 1137 | int is_pbm_a; |
@@ -1154,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1154 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1154 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1155 | prom_halt(); | 1155 | prom_halt(); |
1156 | } | 1156 | } |
1157 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1157 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1158 | if (!iommu) { | 1158 | if (!iommu) { |
1159 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1159 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1160 | prom_halt(); | 1160 | prom_halt(); |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 9a2ce0637c80..397862fbd9e1 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $ | 1 | /* pci_sabre.c: Sabre specific PCI controller support. |
2 | * pci_sabre.c: Sabre specific PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) | 3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) | 5 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) |
7 | */ | 6 | */ |
@@ -499,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p, | |||
499 | unsigned long afsr, | 498 | unsigned long afsr, |
500 | unsigned long afar) | 499 | unsigned long afar) |
501 | { | 500 | { |
502 | struct pci_iommu *iommu = p->pbm_A.iommu; | 501 | struct iommu *iommu = p->pbm_A.iommu; |
503 | unsigned long iommu_tag[16]; | 502 | unsigned long iommu_tag[16]; |
504 | unsigned long iommu_data[16]; | 503 | unsigned long iommu_data[16]; |
505 | unsigned long flags; | 504 | unsigned long flags; |
@@ -948,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p, | |||
948 | int tsbsize, unsigned long dvma_offset, | 947 | int tsbsize, unsigned long dvma_offset, |
949 | u32 dma_mask) | 948 | u32 dma_mask) |
950 | { | 949 | { |
951 | struct pci_iommu *iommu = p->pbm_A.iommu; | 950 | struct iommu *iommu = p->pbm_A.iommu; |
952 | unsigned long i; | 951 | unsigned long i; |
953 | u64 control; | 952 | u64 control; |
954 | 953 | ||
@@ -1017,7 +1016,7 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
1017 | { | 1016 | { |
1018 | const struct linux_prom64_registers *pr_regs; | 1017 | const struct linux_prom64_registers *pr_regs; |
1019 | struct pci_controller_info *p; | 1018 | struct pci_controller_info *p; |
1020 | struct pci_iommu *iommu; | 1019 | struct iommu *iommu; |
1021 | int tsbsize; | 1020 | int tsbsize; |
1022 | const u32 *busrange; | 1021 | const u32 *busrange; |
1023 | const u32 *vdma; | 1022 | const u32 *vdma; |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 47a5aa94dcae..91a7385e5d32 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -279,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino) | |||
279 | static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, | 279 | static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, |
280 | enum schizo_error_type type) | 280 | enum schizo_error_type type) |
281 | { | 281 | { |
282 | struct pci_strbuf *strbuf = &pbm->stc; | 282 | struct strbuf *strbuf = &pbm->stc; |
283 | unsigned long regbase = pbm->pbm_regs; | 283 | unsigned long regbase = pbm->pbm_regs; |
284 | unsigned long err_base, tag_base, line_base; | 284 | unsigned long err_base, tag_base, line_base; |
285 | u64 control; | 285 | u64 control; |
@@ -387,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, | |||
387 | static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, | 387 | static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, |
388 | enum schizo_error_type type) | 388 | enum schizo_error_type type) |
389 | { | 389 | { |
390 | struct pci_iommu *iommu = pbm->iommu; | 390 | struct iommu *iommu = pbm->iommu; |
391 | unsigned long iommu_tag[16]; | 391 | unsigned long iommu_tag[16]; |
392 | unsigned long iommu_data[16]; | 392 | unsigned long iommu_data[16]; |
393 | unsigned long flags; | 393 | unsigned long flags; |
@@ -1308,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) | |||
1308 | 1308 | ||
1309 | static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | 1309 | static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) |
1310 | { | 1310 | { |
1311 | struct pci_iommu *iommu = pbm->iommu; | 1311 | struct iommu *iommu = pbm->iommu; |
1312 | unsigned long i, tagbase, database; | 1312 | unsigned long i, tagbase, database; |
1313 | struct property *prop; | 1313 | struct property *prop; |
1314 | u32 vdma[2], dma_mask; | 1314 | u32 vdma[2], dma_mask; |
@@ -1580,7 +1580,7 @@ static inline int portid_compare(u32 x, u32 y, int chip_type) | |||
1580 | static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) | 1580 | static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) |
1581 | { | 1581 | { |
1582 | struct pci_controller_info *p; | 1582 | struct pci_controller_info *p; |
1583 | struct pci_iommu *iommu; | 1583 | struct iommu *iommu; |
1584 | u32 portid; | 1584 | u32 portid; |
1585 | 1585 | ||
1586 | portid = of_getintprop_default(dp, "portid", 0xff); | 1586 | portid = of_getintprop_default(dp, "portid", 0xff); |
@@ -1605,13 +1605,13 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ | |||
1605 | if (!p) | 1605 | if (!p) |
1606 | goto memfail; | 1606 | goto memfail; |
1607 | 1607 | ||
1608 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1608 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1609 | if (!iommu) | 1609 | if (!iommu) |
1610 | goto memfail; | 1610 | goto memfail; |
1611 | 1611 | ||
1612 | p->pbm_A.iommu = iommu; | 1612 | p->pbm_A.iommu = iommu; |
1613 | 1613 | ||
1614 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1614 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1615 | if (!iommu) | 1615 | if (!iommu) |
1616 | goto memfail; | 1616 | goto memfail; |
1617 | 1617 | ||
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 0e99808f2121..94295c219329 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | 30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
31 | 31 | ||
32 | struct pci_iommu_batch { | 32 | struct iommu_batch { |
33 | struct pci_dev *pdev; /* Device mapping is for. */ | 33 | struct pci_dev *pdev; /* Device mapping is for. */ |
34 | unsigned long prot; /* IOMMU page protections */ | 34 | unsigned long prot; /* IOMMU page protections */ |
35 | unsigned long entry; /* Index into IOTSB. */ | 35 | unsigned long entry; /* Index into IOTSB. */ |
@@ -37,12 +37,12 @@ struct pci_iommu_batch { | |||
37 | unsigned long npages; /* Number of pages in list. */ | 37 | unsigned long npages; /* Number of pages in list. */ |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); | 40 | static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); |
41 | 41 | ||
42 | /* Interrupts must be disabled. */ | 42 | /* Interrupts must be disabled. */ |
43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | 43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) |
44 | { | 44 | { |
45 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 45 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
46 | 46 | ||
47 | p->pdev = pdev; | 47 | p->pdev = pdev; |
48 | p->prot = prot; | 48 | p->prot = prot; |
@@ -51,7 +51,7 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro | |||
51 | } | 51 | } |
52 | 52 | ||
53 | /* Interrupts must be disabled. */ | 53 | /* Interrupts must be disabled. */ |
54 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | 54 | static long pci_iommu_batch_flush(struct iommu_batch *p) |
55 | { | 55 | { |
56 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; | 56 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
57 | unsigned long devhandle = pbm->devhandle; | 57 | unsigned long devhandle = pbm->devhandle; |
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | |||
89 | /* Interrupts must be disabled. */ | 89 | /* Interrupts must be disabled. */ |
90 | static inline long pci_iommu_batch_add(u64 phys_page) | 90 | static inline long pci_iommu_batch_add(u64 phys_page) |
91 | { | 91 | { |
92 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 92 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
93 | 93 | ||
94 | BUG_ON(p->npages >= PGLIST_NENTS); | 94 | BUG_ON(p->npages >= PGLIST_NENTS); |
95 | 95 | ||
@@ -103,7 +103,7 @@ static inline long pci_iommu_batch_add(u64 phys_page) | |||
103 | /* Interrupts must be disabled. */ | 103 | /* Interrupts must be disabled. */ |
104 | static inline long pci_iommu_batch_end(void) | 104 | static inline long pci_iommu_batch_end(void) |
105 | { | 105 | { |
106 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 106 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
107 | 107 | ||
108 | BUG_ON(p->npages >= PGLIST_NENTS); | 108 | BUG_ON(p->npages >= PGLIST_NENTS); |
109 | 109 | ||
@@ -159,7 +159,7 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign | |||
159 | 159 | ||
160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) | 160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
161 | { | 161 | { |
162 | struct pci_iommu *iommu; | 162 | struct iommu *iommu; |
163 | unsigned long flags, order, first_page, npages, n; | 163 | unsigned long flags, order, first_page, npages, n; |
164 | void *ret; | 164 | void *ret; |
165 | long entry; | 165 | long entry; |
@@ -225,7 +225,7 @@ arena_alloc_fail: | |||
225 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 225 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
226 | { | 226 | { |
227 | struct pci_pbm_info *pbm; | 227 | struct pci_pbm_info *pbm; |
228 | struct pci_iommu *iommu; | 228 | struct iommu *iommu; |
229 | unsigned long flags, order, npages, entry; | 229 | unsigned long flags, order, npages, entry; |
230 | u32 devhandle; | 230 | u32 devhandle; |
231 | 231 | ||
@@ -257,7 +257,7 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
257 | 257 | ||
258 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 258 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
259 | { | 259 | { |
260 | struct pci_iommu *iommu; | 260 | struct iommu *iommu; |
261 | unsigned long flags, npages, oaddr; | 261 | unsigned long flags, npages, oaddr; |
262 | unsigned long i, base_paddr; | 262 | unsigned long i, base_paddr; |
263 | u32 bus_addr, ret; | 263 | u32 bus_addr, ret; |
@@ -321,7 +321,7 @@ iommu_map_fail: | |||
321 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 321 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
322 | { | 322 | { |
323 | struct pci_pbm_info *pbm; | 323 | struct pci_pbm_info *pbm; |
324 | struct pci_iommu *iommu; | 324 | struct iommu *iommu; |
325 | unsigned long flags, npages; | 325 | unsigned long flags, npages; |
326 | long entry; | 326 | long entry; |
327 | u32 devhandle; | 327 | u32 devhandle; |
@@ -456,7 +456,7 @@ iommu_map_failed: | |||
456 | 456 | ||
457 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 457 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
458 | { | 458 | { |
459 | struct pci_iommu *iommu; | 459 | struct iommu *iommu; |
460 | unsigned long flags, npages, prot; | 460 | unsigned long flags, npages, prot; |
461 | u32 dma_base; | 461 | u32 dma_base; |
462 | struct scatterlist *sgtmp; | 462 | struct scatterlist *sgtmp; |
@@ -532,7 +532,7 @@ iommu_map_failed: | |||
532 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 532 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
533 | { | 533 | { |
534 | struct pci_pbm_info *pbm; | 534 | struct pci_pbm_info *pbm; |
535 | struct pci_iommu *iommu; | 535 | struct iommu *iommu; |
536 | unsigned long flags, i, npages; | 536 | unsigned long flags, i, npages; |
537 | long entry; | 537 | long entry; |
538 | u32 devhandle, bus_addr; | 538 | u32 devhandle, bus_addr; |
@@ -705,7 +705,7 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) | |||
705 | } | 705 | } |
706 | 706 | ||
707 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | 707 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
708 | struct pci_iommu *iommu) | 708 | struct iommu *iommu) |
709 | { | 709 | { |
710 | struct iommu_arena *arena = &iommu->arena; | 710 | struct iommu_arena *arena = &iommu->arena; |
711 | unsigned long i, cnt = 0; | 711 | unsigned long i, cnt = 0; |
@@ -734,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | |||
734 | 734 | ||
735 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | 735 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
736 | { | 736 | { |
737 | struct pci_iommu *iommu = pbm->iommu; | 737 | struct iommu *iommu = pbm->iommu; |
738 | struct property *prop; | 738 | struct property *prop; |
739 | unsigned long num_tsb_entries, sz; | 739 | unsigned long num_tsb_entries, sz; |
740 | u32 vdma[2], dma_mask, dma_offset; | 740 | u32 vdma[2], dma_mask, dma_offset; |
@@ -1279,7 +1279,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node | |||
1279 | void sun4v_pci_init(struct device_node *dp, char *model_name) | 1279 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
1280 | { | 1280 | { |
1281 | struct pci_controller_info *p; | 1281 | struct pci_controller_info *p; |
1282 | struct pci_iommu *iommu; | 1282 | struct iommu *iommu; |
1283 | struct property *prop; | 1283 | struct property *prop; |
1284 | struct linux_prom64_registers *regs; | 1284 | struct linux_prom64_registers *regs; |
1285 | u32 devhandle; | 1285 | u32 devhandle; |
@@ -1319,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) | |||
1319 | if (!p) | 1319 | if (!p) |
1320 | goto fatal_memory_error; | 1320 | goto fatal_memory_error; |
1321 | 1321 | ||
1322 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1322 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1323 | if (!iommu) | 1323 | if (!iommu) |
1324 | goto fatal_memory_error; | 1324 | goto fatal_memory_error; |
1325 | 1325 | ||
1326 | p->pbm_A.iommu = iommu; | 1326 | p->pbm_A.iommu = iommu; |
1327 | 1327 | ||
1328 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1328 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1329 | if (!iommu) | 1329 | if (!iommu) |
1330 | goto fatal_memory_error; | 1330 | goto fatal_memory_error; |
1331 | 1331 | ||