aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/sbus.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/sbus.c')
-rw-r--r--arch/sparc64/kernel/sbus.c568
1 files changed, 24 insertions, 544 deletions
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index a1fd9bcc0b87..d1fb13ba02b5 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -26,11 +26,6 @@
26 26
27#define MAP_BASE ((u32)0xc0000000) 27#define MAP_BASE ((u32)0xc0000000)
28 28
29struct sbus_info {
30 struct iommu iommu;
31 struct strbuf strbuf;
32};
33
34/* Offsets from iommu_regs */ 29/* Offsets from iommu_regs */
35#define SYSIO_IOMMUREG_BASE 0x2400UL 30#define SYSIO_IOMMUREG_BASE 0x2400UL
36#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */ 31#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
@@ -44,19 +39,6 @@ struct sbus_info {
44 39
45#define IOMMU_DRAM_VALID (1UL << 30UL) 40#define IOMMU_DRAM_VALID (1UL << 30UL)
46 41
47static void __iommu_flushall(struct iommu *iommu)
48{
49 unsigned long tag;
50 int entry;
51
52 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
53 for (entry = 0; entry < 16; entry++) {
54 upa_writeq(0, tag);
55 tag += 8UL;
56 }
57 upa_readq(iommu->write_complete_reg);
58}
59
60/* Offsets from strbuf_regs */ 42/* Offsets from strbuf_regs */
61#define SYSIO_STRBUFREG_BASE 0x2800UL 43#define SYSIO_STRBUFREG_BASE 0x2800UL
62#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */ 44#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
@@ -69,511 +51,10 @@ static void __iommu_flushall(struct iommu *iommu)
69 51
70#define STRBUF_TAG_VALID 0x02UL 52#define STRBUF_TAG_VALID 0x02UL
71 53
72static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
73{
74 unsigned long n;
75 int limit;
76
77 n = npages;
78 while (n--)
79 upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
80
81 /* If the device could not have possibly put dirty data into
82 * the streaming cache, no flush-flag synchronization needs
83 * to be performed.
84 */
85 if (direction == SBUS_DMA_TODEVICE)
86 return;
87
88 *(strbuf->strbuf_flushflag) = 0UL;
89
90 /* Whoopee cushion! */
91 upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
92 upa_readq(iommu->write_complete_reg);
93
94 limit = 100000;
95 while (*(strbuf->strbuf_flushflag) == 0UL) {
96 limit--;
97 if (!limit)
98 break;
99 udelay(1);
100 rmb();
101 }
102 if (!limit)
103 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
104 "vaddr[%08x] npages[%ld]\n",
105 base, npages);
106}
107
108/* Based largely upon the ppc64 iommu allocator. */
109static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
110{
111 struct iommu_arena *arena = &iommu->arena;
112 unsigned long n, i, start, end, limit;
113 int pass;
114
115 limit = arena->limit;
116 start = arena->hint;
117 pass = 0;
118
119again:
120 n = find_next_zero_bit(arena->map, limit, start);
121 end = n + npages;
122 if (unlikely(end >= limit)) {
123 if (likely(pass < 1)) {
124 limit = start;
125 start = 0;
126 __iommu_flushall(iommu);
127 pass++;
128 goto again;
129 } else {
130 /* Scanned the whole thing, give up. */
131 return -1;
132 }
133 }
134
135 for (i = n; i < end; i++) {
136 if (test_bit(i, arena->map)) {
137 start = i + 1;
138 goto again;
139 }
140 }
141
142 for (i = n; i < end; i++)
143 __set_bit(i, arena->map);
144
145 arena->hint = end;
146
147 return n;
148}
149
150static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
151{
152 unsigned long i;
153
154 for (i = base; i < (base + npages); i++)
155 __clear_bit(i, arena->map);
156}
157
158static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
159{
160 unsigned long tsbbase, order, sz, num_tsb_entries;
161
162 num_tsb_entries = tsbsize / sizeof(iopte_t);
163
164 /* Setup initial software IOMMU state. */
165 spin_lock_init(&iommu->lock);
166 iommu->page_table_map_base = MAP_BASE;
167
168 /* Allocate and initialize the free area map. */
169 sz = num_tsb_entries / 8;
170 sz = (sz + 7UL) & ~7UL;
171 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
172 if (!iommu->arena.map) {
173 prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
174 prom_halt();
175 }
176 iommu->arena.limit = num_tsb_entries;
177
178 /* Now allocate and setup the IOMMU page table itself. */
179 order = get_order(tsbsize);
180 tsbbase = __get_free_pages(GFP_KERNEL, order);
181 if (!tsbbase) {
182 prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
183 prom_halt();
184 }
185 iommu->page_table = (iopte_t *)tsbbase;
186 memset(iommu->page_table, 0, tsbsize);
187}
188
189static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
190{
191 long entry;
192
193 entry = sbus_arena_alloc(iommu, npages);
194 if (unlikely(entry < 0))
195 return NULL;
196
197 return iommu->page_table + entry;
198}
199
200static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
201{
202 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
203}
204
205void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
206{
207 struct sbus_info *info;
208 struct iommu *iommu;
209 iopte_t *iopte;
210 unsigned long flags, order, first_page;
211 void *ret;
212 int npages;
213
214 size = IO_PAGE_ALIGN(size);
215 order = get_order(size);
216 if (order >= 10)
217 return NULL;
218
219 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
220 if (first_page == 0UL)
221 return NULL;
222 memset((char *)first_page, 0, PAGE_SIZE << order);
223
224 info = sdev->bus->iommu;
225 iommu = &info->iommu;
226
227 spin_lock_irqsave(&iommu->lock, flags);
228 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
229 spin_unlock_irqrestore(&iommu->lock, flags);
230
231 if (unlikely(iopte == NULL)) {
232 free_pages(first_page, order);
233 return NULL;
234 }
235
236 *dvma_addr = (iommu->page_table_map_base +
237 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
238 ret = (void *) first_page;
239 npages = size >> IO_PAGE_SHIFT;
240 first_page = __pa(first_page);
241 while (npages--) {
242 iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
243 IOPTE_WRITE |
244 (first_page & IOPTE_PAGE));
245 iopte++;
246 first_page += IO_PAGE_SIZE;
247 }
248
249 return ret;
250}
251
252void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
253{
254 struct sbus_info *info;
255 struct iommu *iommu;
256 iopte_t *iopte;
257 unsigned long flags, order, npages;
258
259 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
260 info = sdev->bus->iommu;
261 iommu = &info->iommu;
262 iopte = iommu->page_table +
263 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
264
265 spin_lock_irqsave(&iommu->lock, flags);
266
267 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
268
269 spin_unlock_irqrestore(&iommu->lock, flags);
270
271 order = get_order(size);
272 if (order < 10)
273 free_pages((unsigned long)cpu, order);
274}
275
276dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
277{
278 struct sbus_info *info;
279 struct iommu *iommu;
280 iopte_t *base;
281 unsigned long flags, npages, oaddr;
282 unsigned long i, base_paddr;
283 u32 bus_addr, ret;
284 unsigned long iopte_protection;
285
286 info = sdev->bus->iommu;
287 iommu = &info->iommu;
288
289 if (unlikely(direction == SBUS_DMA_NONE))
290 BUG();
291
292 oaddr = (unsigned long)ptr;
293 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
294 npages >>= IO_PAGE_SHIFT;
295
296 spin_lock_irqsave(&iommu->lock, flags);
297 base = alloc_npages(iommu, npages);
298 spin_unlock_irqrestore(&iommu->lock, flags);
299
300 if (unlikely(!base))
301 BUG();
302
303 bus_addr = (iommu->page_table_map_base +
304 ((base - iommu->page_table) << IO_PAGE_SHIFT));
305 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
306 base_paddr = __pa(oaddr & IO_PAGE_MASK);
307
308 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
309 if (direction != SBUS_DMA_TODEVICE)
310 iopte_protection |= IOPTE_WRITE;
311
312 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
313 iopte_val(*base) = iopte_protection | base_paddr;
314
315 return ret;
316}
317
318void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
319{
320 struct sbus_info *info = sdev->bus->iommu;
321 struct iommu *iommu = &info->iommu;
322 struct strbuf *strbuf = &info->strbuf;
323 iopte_t *base;
324 unsigned long flags, npages, i;
325
326 if (unlikely(direction == SBUS_DMA_NONE))
327 BUG();
328
329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
330 npages >>= IO_PAGE_SHIFT;
331 base = iommu->page_table +
332 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
333
334 bus_addr &= IO_PAGE_MASK;
335
336 spin_lock_irqsave(&iommu->lock, flags);
337 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
338 for (i = 0; i < npages; i++)
339 iopte_val(base[i]) = 0UL;
340 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
341 spin_unlock_irqrestore(&iommu->lock, flags);
342}
343
344#define SG_ENT_PHYS_ADDRESS(SG) \
345 (__pa(page_address((SG)->page)) + (SG)->offset)
346
347static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
348 int nused, int nelems, unsigned long iopte_protection)
349{
350 struct scatterlist *dma_sg = sg;
351 struct scatterlist *sg_end = sg + nelems;
352 int i;
353
354 for (i = 0; i < nused; i++) {
355 unsigned long pteval = ~0UL;
356 u32 dma_npages;
357
358 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
359 dma_sg->dma_length +
360 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
361 do {
362 unsigned long offset;
363 signed int len;
364
365 /* If we are here, we know we have at least one
366 * more page to map. So walk forward until we
367 * hit a page crossing, and begin creating new
368 * mappings from that spot.
369 */
370 for (;;) {
371 unsigned long tmp;
372
373 tmp = SG_ENT_PHYS_ADDRESS(sg);
374 len = sg->length;
375 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
376 pteval = tmp & IO_PAGE_MASK;
377 offset = tmp & (IO_PAGE_SIZE - 1UL);
378 break;
379 }
380 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
381 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
382 offset = 0UL;
383 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
384 break;
385 }
386 sg++;
387 }
388
389 pteval = iopte_protection | (pteval & IOPTE_PAGE);
390 while (len > 0) {
391 *iopte++ = __iopte(pteval);
392 pteval += IO_PAGE_SIZE;
393 len -= (IO_PAGE_SIZE - offset);
394 offset = 0;
395 dma_npages--;
396 }
397
398 pteval = (pteval & IOPTE_PAGE) + len;
399 sg++;
400
401 /* Skip over any tail mappings we've fully mapped,
402 * adjusting pteval along the way. Stop when we
403 * detect a page crossing event.
404 */
405 while (sg < sg_end &&
406 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
407 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
408 ((pteval ^
409 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
410 pteval += sg->length;
411 sg++;
412 }
413 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
414 pteval = ~0UL;
415 } while (dma_npages != 0);
416 dma_sg++;
417 }
418}
419
420int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
421{
422 struct sbus_info *info;
423 struct iommu *iommu;
424 unsigned long flags, npages, iopte_protection;
425 iopte_t *base;
426 u32 dma_base;
427 struct scatterlist *sgtmp;
428 int used;
429
430 /* Fast path single entry scatterlists. */
431 if (nelems == 1) {
432 sglist->dma_address =
433 sbus_map_single(sdev,
434 (page_address(sglist->page) + sglist->offset),
435 sglist->length, direction);
436 sglist->dma_length = sglist->length;
437 return 1;
438 }
439
440 info = sdev->bus->iommu;
441 iommu = &info->iommu;
442
443 if (unlikely(direction == SBUS_DMA_NONE))
444 BUG();
445
446 npages = prepare_sg(sglist, nelems);
447
448 spin_lock_irqsave(&iommu->lock, flags);
449 base = alloc_npages(iommu, npages);
450 spin_unlock_irqrestore(&iommu->lock, flags);
451
452 if (unlikely(base == NULL))
453 BUG();
454
455 dma_base = iommu->page_table_map_base +
456 ((base - iommu->page_table) << IO_PAGE_SHIFT);
457
458 /* Normalize DVMA addresses. */
459 used = nelems;
460
461 sgtmp = sglist;
462 while (used && sgtmp->dma_length) {
463 sgtmp->dma_address += dma_base;
464 sgtmp++;
465 used--;
466 }
467 used = nelems - used;
468
469 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
470 if (direction != SBUS_DMA_TODEVICE)
471 iopte_protection |= IOPTE_WRITE;
472
473 fill_sg(base, sglist, used, nelems, iopte_protection);
474
475#ifdef VERIFY_SG
476 verify_sglist(sglist, nelems, base, npages);
477#endif
478
479 return used;
480}
481
482void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
483{
484 struct sbus_info *info;
485 struct iommu *iommu;
486 struct strbuf *strbuf;
487 iopte_t *base;
488 unsigned long flags, i, npages;
489 u32 bus_addr;
490
491 if (unlikely(direction == SBUS_DMA_NONE))
492 BUG();
493
494 info = sdev->bus->iommu;
495 iommu = &info->iommu;
496 strbuf = &info->strbuf;
497
498 bus_addr = sglist->dma_address & IO_PAGE_MASK;
499
500 for (i = 1; i < nelems; i++)
501 if (sglist[i].dma_length == 0)
502 break;
503 i--;
504 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
505 bus_addr) >> IO_PAGE_SHIFT;
506
507 base = iommu->page_table +
508 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509
510 spin_lock_irqsave(&iommu->lock, flags);
511 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
512 for (i = 0; i < npages; i++)
513 iopte_val(base[i]) = 0UL;
514 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
515 spin_unlock_irqrestore(&iommu->lock, flags);
516}
517
518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
519{
520 struct sbus_info *info;
521 struct iommu *iommu;
522 struct strbuf *strbuf;
523 unsigned long flags, npages;
524
525 info = sdev->bus->iommu;
526 iommu = &info->iommu;
527 strbuf = &info->strbuf;
528
529 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
530 npages >>= IO_PAGE_SHIFT;
531 bus_addr &= IO_PAGE_MASK;
532
533 spin_lock_irqsave(&iommu->lock, flags);
534 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
535 spin_unlock_irqrestore(&iommu->lock, flags);
536}
537
538void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
539{
540}
541
542void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
543{
544 struct sbus_info *info;
545 struct iommu *iommu;
546 struct strbuf *strbuf;
547 unsigned long flags, npages, i;
548 u32 bus_addr;
549
550 info = sdev->bus->iommu;
551 iommu = &info->iommu;
552 strbuf = &info->strbuf;
553
554 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
555 for (i = 0; i < nelems; i++) {
556 if (!sglist[i].dma_length)
557 break;
558 }
559 i--;
560 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
561 - bus_addr) >> IO_PAGE_SHIFT;
562
563 spin_lock_irqsave(&iommu->lock, flags);
564 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
565 spin_unlock_irqrestore(&iommu->lock, flags);
566}
567
568void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
569{
570}
571
572/* Enable 64-bit DVMA mode for the given device. */ 54/* Enable 64-bit DVMA mode for the given device. */
573void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) 55void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
574{ 56{
575 struct sbus_info *info = sdev->bus->iommu; 57 struct iommu *iommu = sdev->ofdev.dev.archdata.iommu;
576 struct iommu *iommu = &info->iommu;
577 int slot = sdev->slot; 58 int slot = sdev->slot;
578 unsigned long cfg_reg; 59 unsigned long cfg_reg;
579 u64 val; 60 u64 val;
@@ -713,8 +194,7 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
713unsigned int sbus_build_irq(void *buscookie, unsigned int ino) 194unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
714{ 195{
715 struct sbus_bus *sbus = (struct sbus_bus *)buscookie; 196 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
716 struct sbus_info *info = sbus->iommu; 197 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
717 struct iommu *iommu = &info->iommu;
718 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 198 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
719 unsigned long imap, iclr; 199 unsigned long imap, iclr;
720 int sbus_level = 0; 200 int sbus_level = 0;
@@ -776,8 +256,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
776static irqreturn_t sysio_ue_handler(int irq, void *dev_id) 256static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
777{ 257{
778 struct sbus_bus *sbus = dev_id; 258 struct sbus_bus *sbus = dev_id;
779 struct sbus_info *info = sbus->iommu; 259 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
780 struct iommu *iommu = &info->iommu;
781 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 260 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
782 unsigned long afsr_reg, afar_reg; 261 unsigned long afsr_reg, afar_reg;
783 unsigned long afsr, afar, error_bits; 262 unsigned long afsr, afar, error_bits;
@@ -849,8 +328,7 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
849static irqreturn_t sysio_ce_handler(int irq, void *dev_id) 328static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
850{ 329{
851 struct sbus_bus *sbus = dev_id; 330 struct sbus_bus *sbus = dev_id;
852 struct sbus_info *info = sbus->iommu; 331 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
853 struct iommu *iommu = &info->iommu;
854 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 332 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
855 unsigned long afsr_reg, afar_reg; 333 unsigned long afsr_reg, afar_reg;
856 unsigned long afsr, afar, error_bits; 334 unsigned long afsr, afar, error_bits;
@@ -927,8 +405,7 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
927static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) 405static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
928{ 406{
929 struct sbus_bus *sbus = dev_id; 407 struct sbus_bus *sbus = dev_id;
930 struct sbus_info *info = sbus->iommu; 408 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
931 struct iommu *iommu = &info->iommu;
932 unsigned long afsr_reg, afar_reg, reg_base; 409 unsigned long afsr_reg, afar_reg, reg_base;
933 unsigned long afsr, afar, error_bits; 410 unsigned long afsr, afar, error_bits;
934 int reported; 411 int reported;
@@ -995,8 +472,7 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
995 472
996static void __init sysio_register_error_handlers(struct sbus_bus *sbus) 473static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
997{ 474{
998 struct sbus_info *info = sbus->iommu; 475 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
999 struct iommu *iommu = &info->iommu;
1000 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 476 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
1001 unsigned int irq; 477 unsigned int irq;
1002 u64 control; 478 u64 control;
@@ -1041,7 +517,6 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1041{ 517{
1042 const struct linux_prom64_registers *pr; 518 const struct linux_prom64_registers *pr;
1043 struct device_node *dp; 519 struct device_node *dp;
1044 struct sbus_info *info;
1045 struct iommu *iommu; 520 struct iommu *iommu;
1046 struct strbuf *strbuf; 521 struct strbuf *strbuf;
1047 unsigned long regs, reg_base; 522 unsigned long regs, reg_base;
@@ -1054,25 +529,28 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1054 529
1055 pr = of_get_property(dp, "reg", NULL); 530 pr = of_get_property(dp, "reg", NULL);
1056 if (!pr) { 531 if (!pr) {
1057 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n"); 532 prom_printf("sbus_iommu_init: Cannot map SYSIO "
533 "control registers.\n");
1058 prom_halt(); 534 prom_halt();
1059 } 535 }
1060 regs = pr->phys_addr; 536 regs = pr->phys_addr;
1061 537
1062 info = kzalloc(sizeof(*info), GFP_ATOMIC); 538 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
1063 if (info == NULL) { 539 if (!iommu)
1064 prom_printf("sbus_iommu_init: Fatal error, " 540 goto fatal_memory_error;
1065 "kmalloc(info) failed\n"); 541 strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
1066 prom_halt(); 542 if (!strbuf)
1067 } 543 goto fatal_memory_error;
1068 544
1069 iommu = &info->iommu; 545 sbus->ofdev.dev.archdata.iommu = iommu;
1070 strbuf = &info->strbuf; 546 sbus->ofdev.dev.archdata.stc = strbuf;
1071 547
1072 reg_base = regs + SYSIO_IOMMUREG_BASE; 548 reg_base = regs + SYSIO_IOMMUREG_BASE;
1073 iommu->iommu_control = reg_base + IOMMU_CONTROL; 549 iommu->iommu_control = reg_base + IOMMU_CONTROL;
1074 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; 550 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
1075 iommu->iommu_flush = reg_base + IOMMU_FLUSH; 551 iommu->iommu_flush = reg_base + IOMMU_FLUSH;
552 iommu->iommu_tags = iommu->iommu_control +
553 (IOMMU_TAGDIAG - IOMMU_CONTROL);
1076 554
1077 reg_base = regs + SYSIO_STRBUFREG_BASE; 555 reg_base = regs + SYSIO_STRBUFREG_BASE;
1078 strbuf->strbuf_control = reg_base + STRBUF_CONTROL; 556 strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
@@ -1093,14 +571,12 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1093 */ 571 */
1094 iommu->write_complete_reg = regs + 0x2000UL; 572 iommu->write_complete_reg = regs + 0x2000UL;
1095 573
1096 /* Link into SYSIO software state. */
1097 sbus->iommu = info;
1098
1099 printk("SYSIO: UPA portID %x, at %016lx\n", 574 printk("SYSIO: UPA portID %x, at %016lx\n",
1100 sbus->portid, regs); 575 sbus->portid, regs);
1101 576
1102 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ 577 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1103 sbus_iommu_table_init(iommu, IO_TSB_SIZE); 578 if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff))
579 goto fatal_memory_error;
1104 580
1105 control = upa_readq(iommu->iommu_control); 581 control = upa_readq(iommu->iommu_control);
1106 control = ((7UL << 16UL) | 582 control = ((7UL << 16UL) |
@@ -1157,6 +633,10 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1157 starfire_hookup(sbus->portid); 633 starfire_hookup(sbus->portid);
1158 634
1159 sysio_register_error_handlers(sbus); 635 sysio_register_error_handlers(sbus);
636 return;
637
638fatal_memory_error:
639 prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
1160} 640}
1161 641
1162void sbus_fill_device_irq(struct sbus_dev *sdev) 642void sbus_fill_device_irq(struct sbus_dev *sdev)