diff options
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 64 |
1 files changed, 42 insertions, 22 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 26d3789dfdd0..4e1c08636edd 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/log2.h> | 11 | #include <linux/log2.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/iommu-helper.h> | ||
13 | 14 | ||
14 | #include <asm/io.h> | 15 | #include <asm/io.h> |
15 | #include <asm/hwrpb.h> | 16 | #include <asm/hwrpb.h> |
@@ -31,7 +32,6 @@ | |||
31 | #endif | 32 | #endif |
32 | 33 | ||
33 | #define DEBUG_NODIRECT 0 | 34 | #define DEBUG_NODIRECT 0 |
34 | #define DEBUG_FORCEDAC 0 | ||
35 | 35 | ||
36 | #define ISA_DMA_MASK 0x00ffffff | 36 | #define ISA_DMA_MASK 0x00ffffff |
37 | 37 | ||
@@ -128,37 +128,55 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base, | |||
128 | 128 | ||
129 | /* Must be called with the arena lock held */ | 129 | /* Must be called with the arena lock held */ |
130 | static long | 130 | static long |
131 | iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | 131 | iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, |
132 | long n, long mask) | ||
132 | { | 133 | { |
133 | unsigned long *ptes; | 134 | unsigned long *ptes; |
134 | long i, p, nent; | 135 | long i, p, nent; |
136 | int pass = 0; | ||
137 | unsigned long base; | ||
138 | unsigned long boundary_size; | ||
139 | |||
140 | base = arena->dma_base >> PAGE_SHIFT; | ||
141 | if (dev) { | ||
142 | boundary_size = dma_get_seg_boundary(dev) + 1; | ||
143 | boundary_size >>= PAGE_SHIFT; | ||
144 | } else { | ||
145 | boundary_size = 1UL << (32 - PAGE_SHIFT); | ||
146 | } | ||
135 | 147 | ||
136 | /* Search forward for the first mask-aligned sequence of N free ptes */ | 148 | /* Search forward for the first mask-aligned sequence of N free ptes */ |
137 | ptes = arena->ptes; | 149 | ptes = arena->ptes; |
138 | nent = arena->size >> PAGE_SHIFT; | 150 | nent = arena->size >> PAGE_SHIFT; |
139 | p = (arena->next_entry + mask) & ~mask; | 151 | p = ALIGN(arena->next_entry, mask + 1); |
140 | i = 0; | 152 | i = 0; |
153 | |||
154 | again: | ||
141 | while (i < n && p+i < nent) { | 155 | while (i < n && p+i < nent) { |
156 | if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { | ||
157 | p = ALIGN(p + 1, mask + 1); | ||
158 | goto again; | ||
159 | } | ||
160 | |||
142 | if (ptes[p+i]) | 161 | if (ptes[p+i]) |
143 | p = (p + i + 1 + mask) & ~mask, i = 0; | 162 | p = ALIGN(p + i + 1, mask + 1), i = 0; |
144 | else | 163 | else |
145 | i = i + 1; | 164 | i = i + 1; |
146 | } | 165 | } |
147 | 166 | ||
148 | if (i < n) { | 167 | if (i < n) { |
149 | /* Reached the end. Flush the TLB and restart the | 168 | if (pass < 1) { |
150 | search from the beginning. */ | 169 | /* |
151 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | 170 | * Reached the end. Flush the TLB and restart |
152 | 171 | * the search from the beginning. | |
153 | p = 0, i = 0; | 172 | */ |
154 | while (i < n && p+i < nent) { | 173 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); |
155 | if (ptes[p+i]) | 174 | |
156 | p = (p + i + 1 + mask) & ~mask, i = 0; | 175 | pass++; |
157 | else | 176 | p = 0; |
158 | i = i + 1; | 177 | i = 0; |
159 | } | 178 | goto again; |
160 | 179 | } else | |
161 | if (i < n) | ||
162 | return -1; | 180 | return -1; |
163 | } | 181 | } |
164 | 182 | ||
@@ -168,7 +186,8 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | |||
168 | } | 186 | } |
169 | 187 | ||
170 | static long | 188 | static long |
171 | iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | 189 | iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, |
190 | unsigned int align) | ||
172 | { | 191 | { |
173 | unsigned long flags; | 192 | unsigned long flags; |
174 | unsigned long *ptes; | 193 | unsigned long *ptes; |
@@ -179,7 +198,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | |||
179 | /* Search for N empty ptes */ | 198 | /* Search for N empty ptes */ |
180 | ptes = arena->ptes; | 199 | ptes = arena->ptes; |
181 | mask = max(align, arena->align_entry) - 1; | 200 | mask = max(align, arena->align_entry) - 1; |
182 | p = iommu_arena_find_pages(arena, n, mask); | 201 | p = iommu_arena_find_pages(dev, arena, n, mask); |
183 | if (p < 0) { | 202 | if (p < 0) { |
184 | spin_unlock_irqrestore(&arena->lock, flags); | 203 | spin_unlock_irqrestore(&arena->lock, flags); |
185 | return -1; | 204 | return -1; |
@@ -229,6 +248,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
229 | unsigned long paddr; | 248 | unsigned long paddr; |
230 | dma_addr_t ret; | 249 | dma_addr_t ret; |
231 | unsigned int align = 0; | 250 | unsigned int align = 0; |
251 | struct device *dev = pdev ? &pdev->dev : NULL; | ||
232 | 252 | ||
233 | paddr = __pa(cpu_addr); | 253 | paddr = __pa(cpu_addr); |
234 | 254 | ||
@@ -276,7 +296,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
276 | /* Force allocation to 64KB boundary for ISA bridges. */ | 296 | /* Force allocation to 64KB boundary for ISA bridges. */ |
277 | if (pdev && pdev == isa_bridge) | 297 | if (pdev && pdev == isa_bridge) |
278 | align = 8; | 298 | align = 8; |
279 | dma_ofs = iommu_arena_alloc(arena, npages, align); | 299 | dma_ofs = iommu_arena_alloc(dev, arena, npages, align); |
280 | if (dma_ofs < 0) { | 300 | if (dma_ofs < 0) { |
281 | printk(KERN_WARNING "pci_map_single failed: " | 301 | printk(KERN_WARNING "pci_map_single failed: " |
282 | "could not allocate dma page tables\n"); | 302 | "could not allocate dma page tables\n"); |
@@ -563,7 +583,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, | |||
563 | 583 | ||
564 | paddr &= ~PAGE_MASK; | 584 | paddr &= ~PAGE_MASK; |
565 | npages = calc_npages(paddr + size); | 585 | npages = calc_npages(paddr + size); |
566 | dma_ofs = iommu_arena_alloc(arena, npages, 0); | 586 | dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); |
567 | if (dma_ofs < 0) { | 587 | if (dma_ofs < 0) { |
568 | /* If we attempted a direct map above but failed, die. */ | 588 | /* If we attempted a direct map above but failed, die. */ |
569 | if (leader->dma_address == 0) | 589 | if (leader->dma_address == 0) |
@@ -830,7 +850,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) | |||
830 | 850 | ||
831 | /* Search for N empty ptes. */ | 851 | /* Search for N empty ptes. */ |
832 | ptes = arena->ptes; | 852 | ptes = arena->ptes; |
833 | p = iommu_arena_find_pages(arena, pg_count, align_mask); | 853 | p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); |
834 | if (p < 0) { | 854 | if (p < 0) { |
835 | spin_unlock_irqrestore(&arena->lock, flags); | 855 | spin_unlock_irqrestore(&arena->lock, flags); |
836 | return -1; | 856 | return -1; |