diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-14 00:50:27 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:12:48 -0500 |
commit | 7c8f486ae7fe90d7bb99a70a42d71c9a40688ec2 (patch) | |
tree | 530873917fefde54ab7a5a86f9027ebe70bdd966 /arch | |
parent | 87bdc367ca1a7e16c29a6bff6b1e8fe179e27f90 (diff) |
[SPARC64]: Fix IOMMU mapping on sun4v.
We should dynamically allocate the per-cpu pglist not use
an in-kernel-image datum, since __pa() does not work on
such addresses.
Also, consistently use "u32" for devhandle.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 76 |
1 files changed, 46 insertions, 30 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 17080a61ad6c..ac311d3dbc5c 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -24,10 +24,10 @@ | |||
24 | 24 | ||
25 | #include "pci_sun4v.h" | 25 | #include "pci_sun4v.h" |
26 | 26 | ||
27 | #define PGLIST_NENTS 2048 | 27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
28 | 28 | ||
29 | struct sun4v_pglist { | 29 | struct sun4v_pglist { |
30 | u64 pglist[PGLIST_NENTS]; | 30 | u64 *pglist; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); | 33 | static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); |
@@ -83,10 +83,11 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
83 | { | 83 | { |
84 | struct pcidev_cookie *pcp; | 84 | struct pcidev_cookie *pcp; |
85 | struct pci_iommu *iommu; | 85 | struct pci_iommu *iommu; |
86 | unsigned long devhandle, flags, order, first_page, npages, n; | 86 | unsigned long flags, order, first_page, npages, n; |
87 | void *ret; | 87 | void *ret; |
88 | long entry; | 88 | long entry; |
89 | u64 *pglist; | 89 | u64 *pglist; |
90 | u32 devhandle; | ||
90 | int cpu; | 91 | int cpu; |
91 | 92 | ||
92 | size = IO_PAGE_ALIGN(size); | 93 | size = IO_PAGE_ALIGN(size); |
@@ -123,7 +124,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
123 | 124 | ||
124 | cpu = get_cpu(); | 125 | cpu = get_cpu(); |
125 | 126 | ||
126 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | 127 | pglist = __get_cpu_var(iommu_pglists).pglist; |
127 | for (n = 0; n < npages; n++) | 128 | for (n = 0; n < npages; n++) |
128 | pglist[n] = first_page + (n * PAGE_SIZE); | 129 | pglist[n] = first_page + (n * PAGE_SIZE); |
129 | 130 | ||
@@ -149,7 +150,8 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
149 | { | 150 | { |
150 | struct pcidev_cookie *pcp; | 151 | struct pcidev_cookie *pcp; |
151 | struct pci_iommu *iommu; | 152 | struct pci_iommu *iommu; |
152 | unsigned long flags, order, npages, entry, devhandle; | 153 | unsigned long flags, order, npages, entry; |
154 | u32 devhandle; | ||
153 | 155 | ||
154 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 156 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
155 | pcp = pdev->sysdata; | 157 | pcp = pdev->sysdata; |
@@ -182,8 +184,8 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, | |||
182 | struct pcidev_cookie *pcp; | 184 | struct pcidev_cookie *pcp; |
183 | struct pci_iommu *iommu; | 185 | struct pci_iommu *iommu; |
184 | unsigned long flags, npages, oaddr; | 186 | unsigned long flags, npages, oaddr; |
185 | unsigned long i, base_paddr, devhandle; | 187 | unsigned long i, base_paddr; |
186 | u32 bus_addr, ret; | 188 | u32 devhandle, bus_addr, ret; |
187 | unsigned long prot; | 189 | unsigned long prot; |
188 | long entry; | 190 | long entry; |
189 | u64 *pglist; | 191 | u64 *pglist; |
@@ -219,7 +221,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, | |||
219 | 221 | ||
220 | cpu = get_cpu(); | 222 | cpu = get_cpu(); |
221 | 223 | ||
222 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | 224 | pglist = __get_cpu_var(iommu_pglists).pglist; |
223 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) | 225 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) |
224 | pglist[i] = base_paddr; | 226 | pglist[i] = base_paddr; |
225 | 227 | ||
@@ -248,8 +250,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
248 | { | 250 | { |
249 | struct pcidev_cookie *pcp; | 251 | struct pcidev_cookie *pcp; |
250 | struct pci_iommu *iommu; | 252 | struct pci_iommu *iommu; |
251 | unsigned long flags, npages, devhandle; | 253 | unsigned long flags, npages; |
252 | long entry; | 254 | long entry; |
255 | u32 devhandle; | ||
253 | 256 | ||
254 | if (unlikely(direction == PCI_DMA_NONE)) { | 257 | if (unlikely(direction == PCI_DMA_NONE)) { |
255 | if (printk_ratelimit()) | 258 | if (printk_ratelimit()) |
@@ -285,7 +288,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
285 | #define SG_ENT_PHYS_ADDRESS(SG) \ | 288 | #define SG_ENT_PHYS_ADDRESS(SG) \ |
286 | (__pa(page_address((SG)->page)) + (SG)->offset) | 289 | (__pa(page_address((SG)->page)) + (SG)->offset) |
287 | 290 | ||
288 | static inline void fill_sg(long entry, unsigned long devhandle, | 291 | static inline void fill_sg(long entry, u32 devhandle, |
289 | struct scatterlist *sg, | 292 | struct scatterlist *sg, |
290 | int nused, int nelems, unsigned long prot) | 293 | int nused, int nelems, unsigned long prot) |
291 | { | 294 | { |
@@ -295,7 +298,7 @@ static inline void fill_sg(long entry, unsigned long devhandle, | |||
295 | u64 *pglist; | 298 | u64 *pglist; |
296 | 299 | ||
297 | cpu = get_cpu(); | 300 | cpu = get_cpu(); |
298 | pglist = &__get_cpu_var(iommu_pglists).pglist[0]; | 301 | pglist = __get_cpu_var(iommu_pglists).pglist; |
299 | pglist_ent = 0; | 302 | pglist_ent = 0; |
300 | for (i = 0; i < nused; i++) { | 303 | for (i = 0; i < nused; i++) { |
301 | unsigned long pteval = ~0UL; | 304 | unsigned long pteval = ~0UL; |
@@ -380,8 +383,8 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
380 | { | 383 | { |
381 | struct pcidev_cookie *pcp; | 384 | struct pcidev_cookie *pcp; |
382 | struct pci_iommu *iommu; | 385 | struct pci_iommu *iommu; |
383 | unsigned long flags, npages, prot, devhandle; | 386 | unsigned long flags, npages, prot; |
384 | u32 dma_base; | 387 | u32 devhandle, dma_base; |
385 | struct scatterlist *sgtmp; | 388 | struct scatterlist *sgtmp; |
386 | long entry; | 389 | long entry; |
387 | int used; | 390 | int used; |
@@ -451,9 +454,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
451 | { | 454 | { |
452 | struct pcidev_cookie *pcp; | 455 | struct pcidev_cookie *pcp; |
453 | struct pci_iommu *iommu; | 456 | struct pci_iommu *iommu; |
454 | unsigned long flags, i, npages, devhandle; | 457 | unsigned long flags, i, npages; |
455 | long entry; | 458 | long entry; |
456 | u32 bus_addr; | 459 | u32 devhandle, bus_addr; |
457 | 460 | ||
458 | if (unlikely(direction == PCI_DMA_NONE)) { | 461 | if (unlikely(direction == PCI_DMA_NONE)) { |
459 | if (printk_ratelimit()) | 462 | if (printk_ratelimit()) |
@@ -805,7 +808,8 @@ static void probe_existing_entries(struct pci_pbm_info *pbm, | |||
805 | struct pci_iommu *iommu) | 808 | struct pci_iommu *iommu) |
806 | { | 809 | { |
807 | struct pci_iommu_arena *arena = &iommu->arena; | 810 | struct pci_iommu_arena *arena = &iommu->arena; |
808 | unsigned long i, devhandle; | 811 | unsigned long i; |
812 | u32 devhandle; | ||
809 | 813 | ||
810 | devhandle = pbm->devhandle; | 814 | devhandle = pbm->devhandle; |
811 | for (i = 0; i < arena->limit; i++) { | 815 | for (i = 0; i < arena->limit; i++) { |
@@ -906,7 +910,7 @@ static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) | |||
906 | 910 | ||
907 | } | 911 | } |
908 | 912 | ||
909 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle) | 913 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) |
910 | { | 914 | { |
911 | struct pci_pbm_info *pbm; | 915 | struct pci_pbm_info *pbm; |
912 | int err, i; | 916 | int err, i; |
@@ -978,7 +982,8 @@ void sun4v_pci_init(int node, char *model_name) | |||
978 | struct pci_controller_info *p; | 982 | struct pci_controller_info *p; |
979 | struct pci_iommu *iommu; | 983 | struct pci_iommu *iommu; |
980 | struct linux_prom64_registers regs; | 984 | struct linux_prom64_registers regs; |
981 | unsigned int devhandle; | 985 | u32 devhandle; |
986 | int i; | ||
982 | 987 | ||
983 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | 988 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); |
984 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | 989 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; |
@@ -999,26 +1004,32 @@ void sun4v_pci_init(int node, char *model_name) | |||
999 | } | 1004 | } |
1000 | } | 1005 | } |
1001 | 1006 | ||
1002 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 1007 | for (i = 0; i < NR_CPUS; i++) { |
1003 | if (!p) { | 1008 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1004 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | 1009 | |
1005 | prom_halt(); | 1010 | if (!page) |
1011 | goto fatal_memory_error; | ||
1012 | |||
1013 | per_cpu(iommu_pglists, i).pglist = (u64 *) page; | ||
1006 | } | 1014 | } |
1015 | |||
1016 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | ||
1017 | if (!p) | ||
1018 | goto fatal_memory_error; | ||
1019 | |||
1007 | memset(p, 0, sizeof(*p)); | 1020 | memset(p, 0, sizeof(*p)); |
1008 | 1021 | ||
1009 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1022 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
1010 | if (!iommu) { | 1023 | if (!iommu) |
1011 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 1024 | goto fatal_memory_error; |
1012 | prom_halt(); | 1025 | |
1013 | } | ||
1014 | memset(iommu, 0, sizeof(*iommu)); | 1026 | memset(iommu, 0, sizeof(*iommu)); |
1015 | p->pbm_A.iommu = iommu; | 1027 | p->pbm_A.iommu = iommu; |
1016 | 1028 | ||
1017 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1029 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
1018 | if (!iommu) { | 1030 | if (!iommu) |
1019 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 1031 | goto fatal_memory_error; |
1020 | prom_halt(); | 1032 | |
1021 | } | ||
1022 | memset(iommu, 0, sizeof(*iommu)); | 1033 | memset(iommu, 0, sizeof(*iommu)); |
1023 | p->pbm_B.iommu = iommu; | 1034 | p->pbm_B.iommu = iommu; |
1024 | 1035 | ||
@@ -1040,4 +1051,9 @@ void sun4v_pci_init(int node, char *model_name) | |||
1040 | pci_memspace_mask = 0x7fffffffUL; | 1051 | pci_memspace_mask = 0x7fffffffUL; |
1041 | 1052 | ||
1042 | pci_sun4v_pbm_init(p, node, devhandle); | 1053 | pci_sun4v_pbm_init(p, node, devhandle); |
1054 | return; | ||
1055 | |||
1056 | fatal_memory_error: | ||
1057 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | ||
1058 | prom_halt(); | ||
1043 | } | 1059 | } |