aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/kernel/pci_iommu.c40
1 files changed, 34 insertions, 6 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index e54f829528cb..54540c369c90 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -126,13 +126,34 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
126 return iommu_arena_new_node(0, hose, base, window_size, align); 126 return iommu_arena_new_node(0, hose, base, window_size, align);
127} 127}
128 128
129static inline int is_span_boundary(unsigned int index, unsigned int nr,
130 unsigned long shift,
131 unsigned long boundary_size)
132{
133 shift = (shift + index) & (boundary_size - 1);
134 return shift + nr > boundary_size;
135}
136
129/* Must be called with the arena lock held */ 137/* Must be called with the arena lock held */
130static long 138static long
131iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) 139iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
140 long n, long mask)
132{ 141{
133 unsigned long *ptes; 142 unsigned long *ptes;
134 long i, p, nent; 143 long i, p, nent;
135 int pass = 0; 144 int pass = 0;
145 unsigned long base;
146 unsigned long boundary_size;
147
148 BUG_ON(arena->dma_base & ~PAGE_MASK);
149 base = arena->dma_base >> PAGE_SHIFT;
150 if (dev)
151 boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE)
152 >> PAGE_SHIFT;
153 else
154 boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT;
155
156 BUG_ON(!is_power_of_2(boundary_size));
136 157
137 /* Search forward for the first mask-aligned sequence of N free ptes */ 158 /* Search forward for the first mask-aligned sequence of N free ptes */
138 ptes = arena->ptes; 159 ptes = arena->ptes;
@@ -142,6 +163,11 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
142 163
143again: 164again:
144 while (i < n && p+i < nent) { 165 while (i < n && p+i < nent) {
166 if (!i && is_span_boundary(p, n, base, boundary_size)) {
167 p = ALIGN(p + 1, mask + 1);
168 goto again;
169 }
170
145 if (ptes[p+i]) 171 if (ptes[p+i])
146 p = ALIGN(p + i + 1, mask + 1), i = 0; 172 p = ALIGN(p + i + 1, mask + 1), i = 0;
147 else 173 else
@@ -170,7 +196,8 @@ again:
170} 196}
171 197
172static long 198static long
173iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) 199iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
200 unsigned int align)
174{ 201{
175 unsigned long flags; 202 unsigned long flags;
176 unsigned long *ptes; 203 unsigned long *ptes;
@@ -181,7 +208,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
181 /* Search for N empty ptes */ 208 /* Search for N empty ptes */
182 ptes = arena->ptes; 209 ptes = arena->ptes;
183 mask = max(align, arena->align_entry) - 1; 210 mask = max(align, arena->align_entry) - 1;
184 p = iommu_arena_find_pages(arena, n, mask); 211 p = iommu_arena_find_pages(dev, arena, n, mask);
185 if (p < 0) { 212 if (p < 0) {
186 spin_unlock_irqrestore(&arena->lock, flags); 213 spin_unlock_irqrestore(&arena->lock, flags);
187 return -1; 214 return -1;
@@ -231,6 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
231 unsigned long paddr; 258 unsigned long paddr;
232 dma_addr_t ret; 259 dma_addr_t ret;
233 unsigned int align = 0; 260 unsigned int align = 0;
261 struct device *dev = pdev ? &pdev->dev : NULL;
234 262
235 paddr = __pa(cpu_addr); 263 paddr = __pa(cpu_addr);
236 264
@@ -278,7 +306,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
278 /* Force allocation to 64KB boundary for ISA bridges. */ 306 /* Force allocation to 64KB boundary for ISA bridges. */
279 if (pdev && pdev == isa_bridge) 307 if (pdev && pdev == isa_bridge)
280 align = 8; 308 align = 8;
281 dma_ofs = iommu_arena_alloc(arena, npages, align); 309 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
282 if (dma_ofs < 0) { 310 if (dma_ofs < 0) {
283 printk(KERN_WARNING "pci_map_single failed: " 311 printk(KERN_WARNING "pci_map_single failed: "
284 "could not allocate dma page tables\n"); 312 "could not allocate dma page tables\n");
@@ -565,7 +593,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
565 593
566 paddr &= ~PAGE_MASK; 594 paddr &= ~PAGE_MASK;
567 npages = calc_npages(paddr + size); 595 npages = calc_npages(paddr + size);
568 dma_ofs = iommu_arena_alloc(arena, npages, 0); 596 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
569 if (dma_ofs < 0) { 597 if (dma_ofs < 0) {
570 /* If we attempted a direct map above but failed, die. */ 598 /* If we attempted a direct map above but failed, die. */
571 if (leader->dma_address == 0) 599 if (leader->dma_address == 0)
@@ -832,7 +860,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
832 860
833 /* Search for N empty ptes. */ 861 /* Search for N empty ptes. */
834 ptes = arena->ptes; 862 ptes = arena->ptes;
835 p = iommu_arena_find_pages(arena, pg_count, align_mask); 863 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
836 if (p < 0) { 864 if (p < 0) {
837 spin_unlock_irqrestore(&arena->lock, flags); 865 spin_unlock_irqrestore(&arena->lock, flags);
838 return -1; 866 return -1;