aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-14 20:17:04 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-14 20:17:04 -0400
commitf8cc5756dee3c2a4c56a7f089291f5ae0232747f (patch)
treec56914922a40cca1a48ac0191f2f5d7fb224ad55
parentbf7c7decb9752a3cb644b435cb354c8948606680 (diff)
parentb4d1b825785847cddee6d104113da913f2ca8efb (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
-rw-r--r--arch/sparc64/kernel/pci_iommu.c363
-rw-r--r--arch/sparc64/kernel/pci_psycho.c44
-rw-r--r--arch/sparc64/kernel/pci_sabre.c39
-rw-r--r--arch/sparc64/kernel/pci_schizo.c57
-rw-r--r--arch/sparc64/kernel/smp.c7
-rw-r--r--arch/sparc64/mm/ultra.S16
-rw-r--r--arch/sparc64/prom/misc.c12
-rw-r--r--drivers/scsi/qlogicpti.c39
-rw-r--r--include/asm-sparc64/pbm.h30
9 files changed, 215 insertions, 392 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 425c60cfea19..a11910be1013 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
49 49
50 /* Ensure completion of previous PIO writes. */ 50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg); 51 (void) pci_iommu_read(iommu->write_complete_reg);
52
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
57 }
58} 52}
59 53
60#define IOPTE_CONSISTENT(CTX) \ 54#define IOPTE_CONSISTENT(CTX) \
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
80 iopte_val(*iopte) = val; 74 iopte_val(*iopte) = val;
81} 75}
82 76
83void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize) 77/* Based largely upon the ppc64 iommu allocator. */
78static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
84{ 79{
85 int i; 80 struct pci_iommu_arena *arena = &iommu->arena;
86 81 unsigned long n, i, start, end, limit;
87 tsbsize /= sizeof(iopte_t); 82 int pass;
88 83
89 for (i = 0; i < tsbsize; i++) 84 limit = arena->limit;
90 iopte_make_dummy(iommu, &iommu->page_table[i]); 85 start = arena->hint;
91} 86 pass = 0;
92 87
93static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages) 88again:
94{ 89 n = find_next_zero_bit(arena->map, limit, start);
95 iopte_t *iopte, *limit, *first; 90 end = n + npages;
96 unsigned long cnum, ent, flush_point; 91 if (unlikely(end >= limit)) {
97 92 if (likely(pass < 1)) {
98 cnum = 0; 93 limit = start;
99 while ((1UL << cnum) < npages) 94 start = 0;
100 cnum++; 95 __iommu_flushall(iommu);
101 iopte = (iommu->page_table + 96 pass++;
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 97 goto again;
103 98 } else {
104 if (cnum == 0) 99 /* Scanned the whole thing, give up. */
105 limit = (iommu->page_table + 100 return -1;
106 iommu->lowest_consistent_map);
107 else
108 limit = (iopte +
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
110
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
113
114 first = iopte;
115 for (;;) {
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
118 ent = 0;
119 else
120 ent = ent + 1;
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
124 break;
125 } 101 }
126 iopte += (1 << cnum); 102 }
127 ent++; 103
128 if (iopte >= limit) { 104 for (i = n; i < end; i++) {
129 iopte = (iommu->page_table + 105 if (test_bit(i, arena->map)) {
130 (cnum << 106 start = i + 1;
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 107 goto again;
132 ent = 0;
133 } 108 }
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
136 if (iopte == first)
137 goto bad;
138 } 109 }
139 110
140 /* I've got your streaming cluster right here buddy boy... */ 111 for (i = n; i < end; i++)
141 return iopte; 112 __set_bit(i, arena->map);
142 113
143bad: 114 arena->hint = end;
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n", 115
145 npages); 116 return n;
146 return NULL;
147} 117}
148 118
149static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base, 119static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
150 unsigned long npages, unsigned long ctx)
151{ 120{
152 unsigned long cnum, ent; 121 unsigned long i;
153 122
154 cnum = 0; 123 for (i = base; i < (base + npages); i++)
155 while ((1UL << cnum) < npages) 124 __clear_bit(i, arena->map);
156 cnum++; 125}
157 126
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits)) 127void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits); 128{
129 unsigned long i, tsbbase, order, sz, num_tsb_entries;
130
131 num_tsb_entries = tsbsize / sizeof(iopte_t);
132
133 /* Setup initial software IOMMU state. */
134 spin_lock_init(&iommu->lock);
135 iommu->ctx_lowest_free = 1;
136 iommu->page_table_map_base = dma_offset;
137 iommu->dma_addr_mask = dma_addr_mask;
138
139 /* Allocate and initialize the free area map. */
140 sz = num_tsb_entries / 8;
141 sz = (sz + 7UL) & ~7UL;
142 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
143 if (!iommu->arena.map) {
144 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
145 prom_halt();
146 }
147 memset(iommu->arena.map, 0, sz);
148 iommu->arena.limit = num_tsb_entries;
160 149
161 /* If the global flush might not have caught this entry, 150 /* Allocate and initialize the dummy page which we
162 * adjust the flush point such that we will flush before 151 * set inactive IO PTEs to point to.
163 * ever trying to reuse it.
164 */ 152 */
165#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y))) 153 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush)) 154 if (!iommu->dummy_page) {
167 iommu->alloc_info[cnum].flush = ent; 155 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
168#undef between 156 prom_halt();
157 }
158 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
159 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
160
161 /* Now allocate and setup the IOMMU page table itself. */
162 order = get_order(tsbsize);
163 tsbbase = __get_free_pages(GFP_KERNEL, order);
164 if (!tsbbase) {
165 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
166 prom_halt();
167 }
168 iommu->page_table = (iopte_t *)tsbbase;
169
170 for (i = 0; i < num_tsb_entries; i++)
171 iopte_make_dummy(iommu, &iommu->page_table[i]);
169} 172}
170 173
171/* We allocate consistent mappings from the end of cluster zero. */ 174static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
172static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
173{ 175{
174 iopte_t *iopte; 176 long entry;
175 177
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)); 178 entry = pci_arena_alloc(iommu, npages);
177 while (iopte > iommu->page_table) { 179 if (unlikely(entry < 0))
178 iopte--; 180 return NULL;
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
181 181
182 while (--tmp) { 182 return iommu->page_table + entry;
183 iopte--; 183}
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
185 break;
186 }
187 if (tmp == 0) {
188 u32 entry = (iopte - iommu->page_table);
189 184
190 if (entry < iommu->lowest_consistent_map) 185static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
191 iommu->lowest_consistent_map = entry; 186{
192 return iopte; 187 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
193 }
194 }
195 }
196 return NULL;
197} 188}
198 189
199static int iommu_alloc_ctx(struct pci_iommu *iommu) 190static int iommu_alloc_ctx(struct pci_iommu *iommu)
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
233 struct pcidev_cookie *pcp; 224 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu; 225 struct pci_iommu *iommu;
235 iopte_t *iopte; 226 iopte_t *iopte;
236 unsigned long flags, order, first_page, ctx; 227 unsigned long flags, order, first_page;
237 void *ret; 228 void *ret;
238 int npages; 229 int npages;
239 230
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
251 iommu = pcp->pbm->iommu; 242 iommu = pcp->pbm->iommu;
252 243
253 spin_lock_irqsave(&iommu->lock, flags); 244 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT); 245 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
255 if (iopte == NULL) { 246 spin_unlock_irqrestore(&iommu->lock, flags);
256 spin_unlock_irqrestore(&iommu->lock, flags); 247
248 if (unlikely(iopte == NULL)) {
257 free_pages(first_page, order); 249 free_pages(first_page, order);
258 return NULL; 250 return NULL;
259 } 251 }
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 254 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page; 255 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT; 256 npages = size >> IO_PAGE_SHIFT;
265 ctx = 0;
266 if (iommu->iommu_ctxflush)
267 ctx = iommu_alloc_ctx(iommu);
268 first_page = __pa(first_page); 257 first_page = __pa(first_page);
269 while (npages--) { 258 while (npages--) {
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | 259 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
271 IOPTE_WRITE | 260 IOPTE_WRITE |
272 (first_page & IOPTE_PAGE)); 261 (first_page & IOPTE_PAGE));
273 iopte++; 262 iopte++;
274 first_page += IO_PAGE_SIZE; 263 first_page += IO_PAGE_SIZE;
275 } 264 }
276 265
277 {
278 int i;
279 u32 daddr = *dma_addrp;
280
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
285 }
286 }
287
288 spin_unlock_irqrestore(&iommu->lock, flags);
289
290 return ret; 266 return ret;
291} 267}
292 268
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
296 struct pcidev_cookie *pcp; 272 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu; 273 struct pci_iommu *iommu;
298 iopte_t *iopte; 274 iopte_t *iopte;
299 unsigned long flags, order, npages, i, ctx; 275 unsigned long flags, order, npages;
300 276
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 277 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
302 pcp = pdev->sysdata; 278 pcp = pdev->sysdata;
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
306 282
307 spin_lock_irqsave(&iommu->lock, flags); 283 spin_lock_irqsave(&iommu->lock, flags);
308 284
309 if ((iopte - iommu->page_table) == 285 free_npages(iommu, dvma, npages);
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
312 iopte_t *limit;
313
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
318 break;
319 walk++;
320 }
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
323 }
324
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
329 */
330
331 ctx = 0;
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
334
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
337
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
340 } else {
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
343
344 pci_iommu_write(iommu->iommu_flush, daddr);
345 }
346 }
347
348 iommu_free_ctx(iommu, ctx);
349 286
350 spin_unlock_irqrestore(&iommu->lock, flags); 287 spin_unlock_irqrestore(&iommu->lock, flags);
351 288
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
372 iommu = pcp->pbm->iommu; 309 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc; 310 strbuf = &pcp->pbm->stc;
374 311
375 if (direction == PCI_DMA_NONE) 312 if (unlikely(direction == PCI_DMA_NONE))
376 BUG(); 313 goto bad_no_ctx;
377 314
378 oaddr = (unsigned long)ptr; 315 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 316 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT; 317 npages >>= IO_PAGE_SHIFT;
381 318
382 spin_lock_irqsave(&iommu->lock, flags); 319 spin_lock_irqsave(&iommu->lock, flags);
320 base = alloc_npages(iommu, npages);
321 ctx = 0;
322 if (iommu->iommu_ctxflush)
323 ctx = iommu_alloc_ctx(iommu);
324 spin_unlock_irqrestore(&iommu->lock, flags);
383 325
384 base = alloc_streaming_cluster(iommu, npages); 326 if (unlikely(!base))
385 if (base == NULL)
386 goto bad; 327 goto bad;
328
387 bus_addr = (iommu->page_table_map_base + 329 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 330 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 331 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK); 332 base_paddr = __pa(oaddr & IO_PAGE_MASK);
391 ctx = 0;
392 if (iommu->iommu_ctxflush)
393 ctx = iommu_alloc_ctx(iommu);
394 if (strbuf->strbuf_enabled) 333 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx); 334 iopte_protection = IOPTE_STREAMING(ctx);
396 else 335 else
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 340 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr; 341 iopte_val(*base) = iopte_protection | base_paddr;
403 342
404 spin_unlock_irqrestore(&iommu->lock, flags);
405
406 return ret; 343 return ret;
407 344
408bad: 345bad:
409 spin_unlock_irqrestore(&iommu->lock, flags); 346 iommu_free_ctx(iommu, ctx);
347bad_no_ctx:
348 if (printk_ratelimit())
349 WARN_ON(1);
410 return PCI_DMA_ERROR_CODE; 350 return PCI_DMA_ERROR_CODE;
411} 351}
412 352
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
481 struct pci_iommu *iommu; 421 struct pci_iommu *iommu;
482 struct pci_strbuf *strbuf; 422 struct pci_strbuf *strbuf;
483 iopte_t *base; 423 iopte_t *base;
484 unsigned long flags, npages, ctx; 424 unsigned long flags, npages, ctx, i;
485 425
486 if (direction == PCI_DMA_NONE) 426 if (unlikely(direction == PCI_DMA_NONE)) {
487 BUG(); 427 if (printk_ratelimit())
428 WARN_ON(1);
429 return;
430 }
488 431
489 pcp = pdev->sysdata; 432 pcp = pdev->sysdata;
490 iommu = pcp->pbm->iommu; 433 iommu = pcp->pbm->iommu;
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
510 453
511 /* Step 1: Kick data out of streaming buffers if necessary. */ 454 /* Step 1: Kick data out of streaming buffers if necessary. */
512 if (strbuf->strbuf_enabled) 455 if (strbuf->strbuf_enabled)
513 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 456 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
457 npages, direction);
514 458
515 /* Step 2: Clear out first TSB entry. */ 459 /* Step 2: Clear out TSB entries. */
516 iopte_make_dummy(iommu, base); 460 for (i = 0; i < npages; i++)
461 iopte_make_dummy(iommu, base + i);
517 462
518 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 463 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
519 npages, ctx);
520 464
521 iommu_free_ctx(iommu, ctx); 465 iommu_free_ctx(iommu, ctx);
522 466
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
621 pci_map_single(pdev, 565 pci_map_single(pdev,
622 (page_address(sglist->page) + sglist->offset), 566 (page_address(sglist->page) + sglist->offset),
623 sglist->length, direction); 567 sglist->length, direction);
568 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
569 return 0;
624 sglist->dma_length = sglist->length; 570 sglist->dma_length = sglist->length;
625 return 1; 571 return 1;
626 } 572 }
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
629 iommu = pcp->pbm->iommu; 575 iommu = pcp->pbm->iommu;
630 strbuf = &pcp->pbm->stc; 576 strbuf = &pcp->pbm->stc;
631 577
632 if (direction == PCI_DMA_NONE) 578 if (unlikely(direction == PCI_DMA_NONE))
633 BUG(); 579 goto bad_no_ctx;
634 580
635 /* Step 1: Prepare scatter list. */ 581 /* Step 1: Prepare scatter list. */
636 582
637 npages = prepare_sg(sglist, nelems); 583 npages = prepare_sg(sglist, nelems);
638 584
639 /* Step 2: Allocate a cluster. */ 585 /* Step 2: Allocate a cluster and context, if necessary. */
640 586
641 spin_lock_irqsave(&iommu->lock, flags); 587 spin_lock_irqsave(&iommu->lock, flags);
642 588
643 base = alloc_streaming_cluster(iommu, npages); 589 base = alloc_npages(iommu, npages);
590 ctx = 0;
591 if (iommu->iommu_ctxflush)
592 ctx = iommu_alloc_ctx(iommu);
593
594 spin_unlock_irqrestore(&iommu->lock, flags);
595
644 if (base == NULL) 596 if (base == NULL)
645 goto bad; 597 goto bad;
646 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT); 598
599 dma_base = iommu->page_table_map_base +
600 ((base - iommu->page_table) << IO_PAGE_SHIFT);
647 601
648 /* Step 3: Normalize DMA addresses. */ 602 /* Step 3: Normalize DMA addresses. */
649 used = nelems; 603 used = nelems;
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
656 } 610 }
657 used = nelems - used; 611 used = nelems - used;
658 612
659 /* Step 4: Choose a context if necessary. */ 613 /* Step 4: Create the mappings. */
660 ctx = 0;
661 if (iommu->iommu_ctxflush)
662 ctx = iommu_alloc_ctx(iommu);
663
664 /* Step 5: Create the mappings. */
665 if (strbuf->strbuf_enabled) 614 if (strbuf->strbuf_enabled)
666 iopte_protection = IOPTE_STREAMING(ctx); 615 iopte_protection = IOPTE_STREAMING(ctx);
667 else 616 else
668 iopte_protection = IOPTE_CONSISTENT(ctx); 617 iopte_protection = IOPTE_CONSISTENT(ctx);
669 if (direction != PCI_DMA_TODEVICE) 618 if (direction != PCI_DMA_TODEVICE)
670 iopte_protection |= IOPTE_WRITE; 619 iopte_protection |= IOPTE_WRITE;
671 fill_sg (base, sglist, used, nelems, iopte_protection); 620
621 fill_sg(base, sglist, used, nelems, iopte_protection);
622
672#ifdef VERIFY_SG 623#ifdef VERIFY_SG
673 verify_sglist(sglist, nelems, base, npages); 624 verify_sglist(sglist, nelems, base, npages);
674#endif 625#endif
675 626
676 spin_unlock_irqrestore(&iommu->lock, flags);
677
678 return used; 627 return used;
679 628
680bad: 629bad:
681 spin_unlock_irqrestore(&iommu->lock, flags); 630 iommu_free_ctx(iommu, ctx);
682 return PCI_DMA_ERROR_CODE; 631bad_no_ctx:
632 if (printk_ratelimit())
633 WARN_ON(1);
634 return 0;
683} 635}
684 636
685/* Unmap a set of streaming mode DMA translations. */ 637/* Unmap a set of streaming mode DMA translations. */
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
692 unsigned long flags, ctx, i, npages; 644 unsigned long flags, ctx, i, npages;
693 u32 bus_addr; 645 u32 bus_addr;
694 646
695 if (direction == PCI_DMA_NONE) 647 if (unlikely(direction == PCI_DMA_NONE)) {
696 BUG(); 648 if (printk_ratelimit())
649 WARN_ON(1);
650 }
697 651
698 pcp = pdev->sysdata; 652 pcp = pdev->sysdata;
699 iommu = pcp->pbm->iommu; 653 iommu = pcp->pbm->iommu;
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
705 if (sglist[i].dma_length == 0) 659 if (sglist[i].dma_length == 0)
706 break; 660 break;
707 i--; 661 i--;
708 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; 662 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
663 bus_addr) >> IO_PAGE_SHIFT;
709 664
710 base = iommu->page_table + 665 base = iommu->page_table +
711 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 666 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
726 if (strbuf->strbuf_enabled) 681 if (strbuf->strbuf_enabled)
727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 682 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
728 683
729 /* Step 2: Clear out first TSB entry. */ 684 /* Step 2: Clear out the TSB entries. */
730 iopte_make_dummy(iommu, base); 685 for (i = 0; i < npages; i++)
686 iopte_make_dummy(iommu, base + i);
731 687
732 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 688 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
733 npages, ctx);
734 689
735 iommu_free_ctx(iommu, ctx); 690 iommu_free_ctx(iommu, ctx);
736 691
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 6ed1ef25e0ac..c03ed5f49d31 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
1207static void psycho_iommu_init(struct pci_controller_info *p) 1207static void psycho_iommu_init(struct pci_controller_info *p)
1208{ 1208{
1209 struct pci_iommu *iommu = p->pbm_A.iommu; 1209 struct pci_iommu *iommu = p->pbm_A.iommu;
1210 unsigned long tsbbase, i; 1210 unsigned long i;
1211 u64 control; 1211 u64 control;
1212 1212
1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock);
1215 iommu->ctx_lowest_free = 1;
1216
1217 /* Register addresses. */ 1213 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; 1214 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1219 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE; 1215 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1240 /* Leave diag mode enabled for full-flushing done 1236 /* Leave diag mode enabled for full-flushing done
1241 * in pci_iommu.c 1237 * in pci_iommu.c
1242 */ 1238 */
1239 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
1243 1240
1244 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1241 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
1245 if (!iommu->dummy_page) { 1242 __pa(iommu->page_table));
1246 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1247 prom_halt();
1248 }
1249 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1250 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1251
1252 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1253 * table (128K ioptes * 8 bytes per iopte). This is
1254 * page order 7 on UltraSparc.
1255 */
1256 tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
1257 if (!tsbbase) {
1258 prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
1259 prom_halt();
1260 }
1261 iommu->page_table = (iopte_t *)tsbbase;
1262 iommu->page_table_sz_bits = 17;
1263 iommu->page_table_map_base = 0xc0000000;
1264 iommu->dma_addr_mask = 0xffffffff;
1265 pci_iommu_table_init(iommu, IO_TSB_SIZE);
1266
1267 /* We start with no consistent mappings. */
1268 iommu->lowest_consistent_map =
1269 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1270
1271 for (i = 0; i < PBM_NCLUSTERS; i++) {
1272 iommu->alloc_info[i].flush = 0;
1273 iommu->alloc_info[i].next = 0;
1274 }
1275
1276 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
1277 1243
1278 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); 1244 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1279 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); 1245 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1281 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); 1247 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1282 1248
1283 /* If necessary, hook us up for starfire IRQ translations. */ 1249 /* If necessary, hook us up for starfire IRQ translations. */
1284 if(this_is_starfire) 1250 if (this_is_starfire)
1285 p->starfire_cookie = starfire_hookup(p->pbm_A.portid); 1251 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1286 else 1252 else
1287 p->starfire_cookie = NULL; 1253 p->starfire_cookie = NULL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 0ee6bd5b9ac6..da8e1364194f 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1267 u32 dma_mask) 1267 u32 dma_mask)
1268{ 1268{
1269 struct pci_iommu *iommu = p->pbm_A.iommu; 1269 struct pci_iommu *iommu = p->pbm_A.iommu;
1270 unsigned long tsbbase, i, order; 1270 unsigned long i;
1271 u64 control; 1271 u64 control;
1272 1272
1273 /* Setup initial software IOMMU state. */
1274 spin_lock_init(&iommu->lock);
1275 iommu->ctx_lowest_free = 1;
1276
1277 /* Register addresses. */ 1273 /* Register addresses. */
1278 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; 1274 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1279 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE; 1275 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1295 /* Leave diag mode enabled for full-flushing done 1291 /* Leave diag mode enabled for full-flushing done
1296 * in pci_iommu.c 1292 * in pci_iommu.c
1297 */ 1293 */
1294 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
1298 1295
1299 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1296 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
1300 if (!iommu->dummy_page) { 1297 __pa(iommu->page_table));
1301 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1302 prom_halt();
1303 }
1304 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1305 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1306
1307 tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
1308 if (!tsbbase) {
1309 prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
1310 prom_halt();
1311 }
1312 iommu->page_table = (iopte_t *)tsbbase;
1313 iommu->page_table_map_base = dvma_offset;
1314 iommu->dma_addr_mask = dma_mask;
1315 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1316
1317 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
1318 1298
1319 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); 1299 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1320 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ); 1300 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1322 switch(tsbsize) { 1302 switch(tsbsize) {
1323 case 64: 1303 case 64:
1324 control |= SABRE_IOMMU_TSBSZ_64K; 1304 control |= SABRE_IOMMU_TSBSZ_64K;
1325 iommu->page_table_sz_bits = 16;
1326 break; 1305 break;
1327 case 128: 1306 case 128:
1328 control |= SABRE_IOMMU_TSBSZ_128K; 1307 control |= SABRE_IOMMU_TSBSZ_128K;
1329 iommu->page_table_sz_bits = 17;
1330 break; 1308 break;
1331 default: 1309 default:
1332 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); 1310 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1334 break; 1312 break;
1335 } 1313 }
1336 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 1314 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1337
1338 /* We start with no consistent mappings. */
1339 iommu->lowest_consistent_map =
1340 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1341
1342 for (i = 0; i < PBM_NCLUSTERS; i++) {
1343 iommu->alloc_info[i].flush = 0;
1344 iommu->alloc_info[i].next = 0;
1345 }
1346} 1315}
1347 1316
1348static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1317static void pbm_register_toplevel_resources(struct pci_controller_info *p,
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index cae5b61fe2f0..d8c4e0919b4e 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1766{ 1766{
1767 struct pci_iommu *iommu = pbm->iommu; 1767 struct pci_iommu *iommu = pbm->iommu;
1768 unsigned long tsbbase, i, tagbase, database, order; 1768 unsigned long i, tagbase, database;
1769 u32 vdma[2], dma_mask; 1769 u32 vdma[2], dma_mask;
1770 u64 control; 1770 u64 control;
1771 int err, tsbsize; 1771 int err, tsbsize;
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1800 prom_halt(); 1800 prom_halt();
1801 }; 1801 };
1802 1802
1803 /* Setup initial software IOMMU state. */
1804 spin_lock_init(&iommu->lock);
1805 iommu->ctx_lowest_free = 1;
1806
1807 /* Register addresses, SCHIZO has iommu ctx flushing. */ 1803 /* Register addresses, SCHIZO has iommu ctx flushing. */
1808 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1804 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1809 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; 1805 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1832 /* Leave diag mode enabled for full-flushing done 1828 /* Leave diag mode enabled for full-flushing done
1833 * in pci_iommu.c 1829 * in pci_iommu.c
1834 */ 1830 */
1831 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
1835 1832
1836 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1833 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
1837 if (!iommu->dummy_page) {
1838 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1839 prom_halt();
1840 }
1841 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1842 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1843
1844 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1845 * table (128K ioptes * 8 bytes per iopte). This is
1846 * page order 7 on UltraSparc.
1847 */
1848 order = get_order(tsbsize * 8 * 1024);
1849 tsbbase = __get_free_pages(GFP_KERNEL, order);
1850 if (!tsbbase) {
1851 prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
1852 prom_halt();
1853 }
1854
1855 iommu->page_table = (iopte_t *)tsbbase;
1856 iommu->page_table_map_base = vdma[0];
1857 iommu->dma_addr_mask = dma_mask;
1858 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1859
1860 switch (tsbsize) {
1861 case 64:
1862 iommu->page_table_sz_bits = 16;
1863 break;
1864
1865 case 128:
1866 iommu->page_table_sz_bits = 17;
1867 break;
1868
1869 default:
1870 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1871 prom_halt();
1872 break;
1873 };
1874
1875 /* We start with no consistent mappings. */
1876 iommu->lowest_consistent_map =
1877 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1878
1879 for (i = 0; i < PBM_NCLUSTERS; i++) {
1880 iommu->alloc_info[i].flush = 0;
1881 iommu->alloc_info[i].next = 0;
1882 }
1883
1884 schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
1885 1834
1886 control = schizo_read(iommu->iommu_control); 1835 control = schizo_read(iommu->iommu_control);
1887 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); 1836 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 590df5a16f5a..b137fd63f5e1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1001,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1001 preempt_enable(); 1001 preempt_enable();
1002} 1002}
1003 1003
1004extern unsigned long xcall_promstop;
1005
1006void smp_promstop_others(void)
1007{
1008 smp_cross_call(&xcall_promstop, 0, 0, 0);
1009}
1010
1011#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier 1004#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1012#define prof_counter(__cpu) cpu_data(__cpu).counter 1005#define prof_counter(__cpu) cpu_data(__cpu).counter
1013 1006
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 058b8126c1a7..e4c9151fa116 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -453,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
453 nop 453 nop
454 nop 454 nop
455 455
456 .globl xcall_promstop
457xcall_promstop:
458 rdpr %pstate, %g2
459 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
460 rdpr %pil, %g2
461 wrpr %g0, 15, %pil
462 sethi %hi(109f), %g7
463 b,pt %xcc, etrap_irq
464109: or %g7, %lo(109b), %g7
465 flushw
466 call prom_stopself
467 nop
468 /* We should not return, just spin if we do... */
4691: b,a,pt %xcc, 1b
470 nop
471
472 .data 456 .data
473 457
474errata32_hwbug: 458errata32_hwbug:
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 9b895faf077b..87f5cfce23bb 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -68,19 +68,11 @@ void prom_cmdline(void)
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
71#ifdef CONFIG_SMP
72extern void smp_promstop_others(void);
73#endif
74
75/* Drop into the prom, but completely terminate the program. 71/* Drop into the prom, but completely terminate the program.
76 * No chance of continuing. 72 * No chance of continuing.
77 */ 73 */
78void prom_halt(void) 74void prom_halt(void)
79{ 75{
80#ifdef CONFIG_SMP
81 smp_promstop_others();
82 udelay(8000);
83#endif
84again: 76again:
85 p1275_cmd("exit", P1275_INOUT(0, 0)); 77 p1275_cmd("exit", P1275_INOUT(0, 0));
86 goto again; /* PROM is out to get me -DaveM */ 78 goto again; /* PROM is out to get me -DaveM */
@@ -88,10 +80,6 @@ again:
88 80
89void prom_halt_power_off(void) 81void prom_halt_power_off(void)
90{ 82{
91#ifdef CONFIG_SMP
92 smp_promstop_others();
93 udelay(8000);
94#endif
95 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); 83 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
96 84
97 /* if nothing else helps, we just halt */ 85 /* if nothing else helps, we just halt */
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index a917ab7475ac..1fd5fc6d0fe3 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1119,6 +1119,36 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
1119 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); 1119 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
1120} 1120}
1121 1121
1122static unsigned int scsi_rbuf_get(struct scsi_cmnd *cmd, unsigned char **buf_out)
1123{
1124 unsigned char *buf;
1125 unsigned int buflen;
1126
1127 if (cmd->use_sg) {
1128 struct scatterlist *sg;
1129
1130 sg = (struct scatterlist *) cmd->request_buffer;
1131 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1132 buflen = sg->length;
1133 } else {
1134 buf = cmd->request_buffer;
1135 buflen = cmd->request_bufflen;
1136 }
1137
1138 *buf_out = buf;
1139 return buflen;
1140}
1141
1142static void scsi_rbuf_put(struct scsi_cmnd *cmd, unsigned char *buf)
1143{
1144 if (cmd->use_sg) {
1145 struct scatterlist *sg;
1146
1147 sg = (struct scatterlist *) cmd->request_buffer;
1148 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1149 }
1150}
1151
1122/* 1152/*
1123 * Until we scan the entire bus with inquiries, go throught this fella... 1153 * Until we scan the entire bus with inquiries, go throught this fella...
1124 */ 1154 */
@@ -1145,11 +1175,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
1145 int ok = host_byte(Cmnd->result) == DID_OK; 1175 int ok = host_byte(Cmnd->result) == DID_OK;
1146 if (Cmnd->cmnd[0] == 0x12 && ok) { 1176 if (Cmnd->cmnd[0] == 0x12 && ok) {
1147 unsigned char *iqd; 1177 unsigned char *iqd;
1178 unsigned int iqd_len;
1148 1179
1149 if (Cmnd->use_sg != 0) 1180 iqd_len = scsi_rbuf_get(Cmnd, &iqd);
1150 BUG();
1151
1152 iqd = ((unsigned char *)Cmnd->buffer);
1153 1181
1154 /* tags handled in midlayer */ 1182 /* tags handled in midlayer */
1155 /* enable sync mode? */ 1183 /* enable sync mode? */
@@ -1163,6 +1191,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
1163 if (iqd[7] & 0x20) { 1191 if (iqd[7] & 0x20) {
1164 qpti->dev_param[tgt].device_flags |= 0x20; 1192 qpti->dev_param[tgt].device_flags |= 0x20;
1165 } 1193 }
1194
1195 scsi_rbuf_put(Cmnd, iqd);
1196
1166 qpti->sbits |= (1 << tgt); 1197 qpti->sbits |= (1 << tgt);
1167 } else if (!ok) { 1198 } else if (!ok) {
1168 qpti->sbits |= (1 << tgt); 1199 qpti->sbits |= (1 << tgt);
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index 38bbbccb4068..dd35a2c7798a 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -27,23 +27,27 @@
27 * PCI bus. 27 * PCI bus.
28 */ 28 */
29 29
30#define PBM_LOGCLUSTERS 3
31#define PBM_NCLUSTERS (1 << PBM_LOGCLUSTERS)
32
33struct pci_controller_info; 30struct pci_controller_info;
34 31
35/* This contains the software state necessary to drive a PCI 32/* This contains the software state necessary to drive a PCI
36 * controller's IOMMU. 33 * controller's IOMMU.
37 */ 34 */
35struct pci_iommu_arena {
36 unsigned long *map;
37 unsigned int hint;
38 unsigned int limit;
39};
40
38struct pci_iommu { 41struct pci_iommu {
39 /* This protects the controller's IOMMU and all 42 /* This protects the controller's IOMMU and all
40 * streaming buffers underneath. 43 * streaming buffers underneath.
41 */ 44 */
42 spinlock_t lock; 45 spinlock_t lock;
43 46
47 struct pci_iommu_arena arena;
48
44 /* IOMMU page table, a linear array of ioptes. */ 49 /* IOMMU page table, a linear array of ioptes. */
45 iopte_t *page_table; /* The page table itself. */ 50 iopte_t *page_table; /* The page table itself. */
46 int page_table_sz_bits; /* log2 of ow many pages does it map? */
47 51
48 /* Base PCI memory space address where IOMMU mappings 52 /* Base PCI memory space address where IOMMU mappings
49 * begin. 53 * begin.
@@ -62,12 +66,6 @@ struct pci_iommu {
62 */ 66 */
63 unsigned long write_complete_reg; 67 unsigned long write_complete_reg;
64 68
65 /* The lowest used consistent mapping entry. Since
66 * we allocate consistent maps out of cluster 0 this
67 * is relative to the beginning of closter 0.
68 */
69 u32 lowest_consistent_map;
70
71 /* In order to deal with some buggy third-party PCI bridges that 69 /* In order to deal with some buggy third-party PCI bridges that
72 * do wrong prefetching, we never mark valid mappings as invalid. 70 * do wrong prefetching, we never mark valid mappings as invalid.
73 * Instead we point them at this dummy page. 71 * Instead we point them at this dummy page.
@@ -75,16 +73,6 @@ struct pci_iommu {
75 unsigned long dummy_page; 73 unsigned long dummy_page;
76 unsigned long dummy_page_pa; 74 unsigned long dummy_page_pa;
77 75
78 /* If PBM_NCLUSTERS is ever decreased to 4 or lower,
79 * or if largest supported page_table_sz * 8K goes above
80 * 2GB, you must increase the size of the type of
81 * these counters. You have been duly warned. -DaveM
82 */
83 struct {
84 u16 next;
85 u16 flush;
86 } alloc_info[PBM_NCLUSTERS];
87
88 /* CTX allocation. */ 76 /* CTX allocation. */
89 unsigned long ctx_lowest_free; 77 unsigned long ctx_lowest_free;
90 unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; 78 unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
@@ -102,7 +90,7 @@ struct pci_iommu {
102 u32 dma_addr_mask; 90 u32 dma_addr_mask;
103}; 91};
104 92
105extern void pci_iommu_table_init(struct pci_iommu *, int); 93extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
106 94
107/* This describes a PCI bus module's streaming buffer. */ 95/* This describes a PCI bus module's streaming buffer. */
108struct pci_strbuf { 96struct pci_strbuf {