aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/sbus.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/sbus.c')
-rw-r--r--arch/sparc64/kernel/sbus.c243
1 files changed, 136 insertions, 107 deletions
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 14f78fb5e890..3b05428cc909 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -26,23 +26,9 @@
26 26
27#define MAP_BASE ((u32)0xc0000000) 27#define MAP_BASE ((u32)0xc0000000)
28 28
29struct sbus_iommu_arena { 29struct sbus_info {
30 unsigned long *map; 30 struct iommu iommu;
31 unsigned int hint; 31 struct strbuf strbuf;
32 unsigned int limit;
33};
34
35struct sbus_iommu {
36 spinlock_t lock;
37
38 struct sbus_iommu_arena arena;
39
40 iopte_t *page_table;
41 unsigned long strbuf_regs;
42 unsigned long iommu_regs;
43 unsigned long sbus_control_reg;
44
45 volatile unsigned long strbuf_flushflag;
46}; 32};
47 33
48/* Offsets from iommu_regs */ 34/* Offsets from iommu_regs */
@@ -58,16 +44,17 @@ struct sbus_iommu {
58 44
59#define IOMMU_DRAM_VALID (1UL << 30UL) 45#define IOMMU_DRAM_VALID (1UL << 30UL)
60 46
61static void __iommu_flushall(struct sbus_iommu *iommu) 47static void __iommu_flushall(struct iommu *iommu)
62{ 48{
63 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; 49 unsigned long tag;
64 int entry; 50 int entry;
65 51
52 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
66 for (entry = 0; entry < 16; entry++) { 53 for (entry = 0; entry < 16; entry++) {
67 upa_writeq(0, tag); 54 upa_writeq(0, tag);
68 tag += 8UL; 55 tag += 8UL;
69 } 56 }
70 upa_readq(iommu->sbus_control_reg); 57 upa_readq(iommu->write_complete_reg);
71} 58}
72 59
73/* Offsets from strbuf_regs */ 60/* Offsets from strbuf_regs */
@@ -82,15 +69,14 @@ static void __iommu_flushall(struct sbus_iommu *iommu)
82 69
83#define STRBUF_TAG_VALID 0x02UL 70#define STRBUF_TAG_VALID 0x02UL
84 71
85static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) 72static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
86{ 73{
87 unsigned long n; 74 unsigned long n;
88 int limit; 75 int limit;
89 76
90 n = npages; 77 n = npages;
91 while (n--) 78 while (n--)
92 upa_writeq(base + (n << IO_PAGE_SHIFT), 79 upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
93 iommu->strbuf_regs + STRBUF_PFLUSH);
94 80
95 /* If the device could not have possibly put dirty data into 81 /* If the device could not have possibly put dirty data into
96 * the streaming cache, no flush-flag synchronization needs 82 * the streaming cache, no flush-flag synchronization needs
@@ -99,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
99 if (direction == SBUS_DMA_TODEVICE) 85 if (direction == SBUS_DMA_TODEVICE)
100 return; 86 return;
101 87
102 iommu->strbuf_flushflag = 0UL; 88 *(strbuf->strbuf_flushflag) = 0UL;
103 89
104 /* Whoopee cushion! */ 90 /* Whoopee cushion! */
105 upa_writeq(__pa(&iommu->strbuf_flushflag), 91 upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
106 iommu->strbuf_regs + STRBUF_FSYNC); 92 upa_readq(iommu->write_complete_reg);
107 upa_readq(iommu->sbus_control_reg);
108 93
109 limit = 100000; 94 limit = 100000;
110 while (iommu->strbuf_flushflag == 0UL) { 95 while (*(strbuf->strbuf_flushflag) == 0UL) {
111 limit--; 96 limit--;
112 if (!limit) 97 if (!limit)
113 break; 98 break;
@@ -121,9 +106,9 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
121} 106}
122 107
123/* Based largely upon the ppc64 iommu allocator. */ 108/* Based largely upon the ppc64 iommu allocator. */
124static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) 109static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
125{ 110{
126 struct sbus_iommu_arena *arena = &iommu->arena; 111 struct iommu_arena *arena = &iommu->arena;
127 unsigned long n, i, start, end, limit; 112 unsigned long n, i, start, end, limit;
128 int pass; 113 int pass;
129 114
@@ -162,7 +147,7 @@ again:
162 return n; 147 return n;
163} 148}
164 149
165static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages) 150static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
166{ 151{
167 unsigned long i; 152 unsigned long i;
168 153
@@ -170,7 +155,7 @@ static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base,
170 __clear_bit(i, arena->map); 155 __clear_bit(i, arena->map);
171} 156}
172 157
173static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize) 158static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
174{ 159{
175 unsigned long tsbbase, order, sz, num_tsb_entries; 160 unsigned long tsbbase, order, sz, num_tsb_entries;
176 161
@@ -178,13 +163,14 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
178 163
179 /* Setup initial software IOMMU state. */ 164 /* Setup initial software IOMMU state. */
180 spin_lock_init(&iommu->lock); 165 spin_lock_init(&iommu->lock);
166 iommu->page_table_map_base = MAP_BASE;
181 167
182 /* Allocate and initialize the free area map. */ 168 /* Allocate and initialize the free area map. */
183 sz = num_tsb_entries / 8; 169 sz = num_tsb_entries / 8;
184 sz = (sz + 7UL) & ~7UL; 170 sz = (sz + 7UL) & ~7UL;
185 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 171 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
186 if (!iommu->arena.map) { 172 if (!iommu->arena.map) {
187 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 173 prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
188 prom_halt(); 174 prom_halt();
189 } 175 }
190 iommu->arena.limit = num_tsb_entries; 176 iommu->arena.limit = num_tsb_entries;
@@ -200,7 +186,7 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
200 memset(iommu->page_table, 0, tsbsize); 186 memset(iommu->page_table, 0, tsbsize);
201} 187}
202 188
203static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages) 189static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
204{ 190{
205 long entry; 191 long entry;
206 192
@@ -211,14 +197,15 @@ static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npag
211 return iommu->page_table + entry; 197 return iommu->page_table + entry;
212} 198}
213 199
214static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages) 200static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
215{ 201{
216 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 202 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
217} 203}
218 204
219void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) 205void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
220{ 206{
221 struct sbus_iommu *iommu; 207 struct sbus_info *info;
208 struct iommu *iommu;
222 iopte_t *iopte; 209 iopte_t *iopte;
223 unsigned long flags, order, first_page; 210 unsigned long flags, order, first_page;
224 void *ret; 211 void *ret;
@@ -234,7 +221,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
234 return NULL; 221 return NULL;
235 memset((char *)first_page, 0, PAGE_SIZE << order); 222 memset((char *)first_page, 0, PAGE_SIZE << order);
236 223
237 iommu = sdev->bus->iommu; 224 info = sdev->bus->iommu;
225 iommu = &info->iommu;
238 226
239 spin_lock_irqsave(&iommu->lock, flags); 227 spin_lock_irqsave(&iommu->lock, flags);
240 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 228 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -245,7 +233,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
245 return NULL; 233 return NULL;
246 } 234 }
247 235
248 *dvma_addr = (MAP_BASE + 236 *dvma_addr = (iommu->page_table_map_base +
249 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 237 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
250 ret = (void *) first_page; 238 ret = (void *) first_page;
251 npages = size >> IO_PAGE_SHIFT; 239 npages = size >> IO_PAGE_SHIFT;
@@ -263,18 +251,20 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
263 251
264void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) 252void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
265{ 253{
266 struct sbus_iommu *iommu; 254 struct sbus_info *info;
255 struct iommu *iommu;
267 iopte_t *iopte; 256 iopte_t *iopte;
268 unsigned long flags, order, npages; 257 unsigned long flags, order, npages;
269 258
270 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 259 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
271 iommu = sdev->bus->iommu; 260 info = sdev->bus->iommu;
261 iommu = &info->iommu;
272 iopte = iommu->page_table + 262 iopte = iommu->page_table +
273 ((dvma - MAP_BASE) >> IO_PAGE_SHIFT); 263 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
274 264
275 spin_lock_irqsave(&iommu->lock, flags); 265 spin_lock_irqsave(&iommu->lock, flags);
276 266
277 free_npages(iommu, dvma - MAP_BASE, npages); 267 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
278 268
279 spin_unlock_irqrestore(&iommu->lock, flags); 269 spin_unlock_irqrestore(&iommu->lock, flags);
280 270
@@ -285,14 +275,16 @@ void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_add
285 275
286dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) 276dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
287{ 277{
288 struct sbus_iommu *iommu; 278 struct sbus_info *info;
279 struct iommu *iommu;
289 iopte_t *base; 280 iopte_t *base;
290 unsigned long flags, npages, oaddr; 281 unsigned long flags, npages, oaddr;
291 unsigned long i, base_paddr; 282 unsigned long i, base_paddr;
292 u32 bus_addr, ret; 283 u32 bus_addr, ret;
293 unsigned long iopte_protection; 284 unsigned long iopte_protection;
294 285
295 iommu = sdev->bus->iommu; 286 info = sdev->bus->iommu;
287 iommu = &info->iommu;
296 288
297 if (unlikely(direction == SBUS_DMA_NONE)) 289 if (unlikely(direction == SBUS_DMA_NONE))
298 BUG(); 290 BUG();
@@ -308,7 +300,7 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
308 if (unlikely(!base)) 300 if (unlikely(!base))
309 BUG(); 301 BUG();
310 302
311 bus_addr = (MAP_BASE + 303 bus_addr = (iommu->page_table_map_base +
312 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 304 ((base - iommu->page_table) << IO_PAGE_SHIFT));
313 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 305 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
314 base_paddr = __pa(oaddr & IO_PAGE_MASK); 306 base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -325,7 +317,9 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
325 317
326void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 318void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
327{ 319{
328 struct sbus_iommu *iommu = sdev->bus->iommu; 320 struct sbus_info *info = sdev->bus->iommu;
321 struct iommu *iommu = &info->iommu;
322 struct strbuf *strbuf = &info->strbuf;
329 iopte_t *base; 323 iopte_t *base;
330 unsigned long flags, npages, i; 324 unsigned long flags, npages, i;
331 325
@@ -335,15 +329,15 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, in
335 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
336 npages >>= IO_PAGE_SHIFT; 330 npages >>= IO_PAGE_SHIFT;
337 base = iommu->page_table + 331 base = iommu->page_table +
338 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 332 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
339 333
340 bus_addr &= IO_PAGE_MASK; 334 bus_addr &= IO_PAGE_MASK;
341 335
342 spin_lock_irqsave(&iommu->lock, flags); 336 spin_lock_irqsave(&iommu->lock, flags);
343 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 337 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
344 for (i = 0; i < npages; i++) 338 for (i = 0; i < npages; i++)
345 iopte_val(base[i]) = 0UL; 339 iopte_val(base[i]) = 0UL;
346 free_npages(iommu, bus_addr - MAP_BASE, npages); 340 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
347 spin_unlock_irqrestore(&iommu->lock, flags); 341 spin_unlock_irqrestore(&iommu->lock, flags);
348} 342}
349 343
@@ -425,7 +419,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
425 419
426int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 420int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
427{ 421{
428 struct sbus_iommu *iommu; 422 struct sbus_info *info;
423 struct iommu *iommu;
429 unsigned long flags, npages, iopte_protection; 424 unsigned long flags, npages, iopte_protection;
430 iopte_t *base; 425 iopte_t *base;
431 u32 dma_base; 426 u32 dma_base;
@@ -442,7 +437,8 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
442 return 1; 437 return 1;
443 } 438 }
444 439
445 iommu = sdev->bus->iommu; 440 info = sdev->bus->iommu;
441 iommu = &info->iommu;
446 442
447 if (unlikely(direction == SBUS_DMA_NONE)) 443 if (unlikely(direction == SBUS_DMA_NONE))
448 BUG(); 444 BUG();
@@ -456,7 +452,7 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
456 if (unlikely(base == NULL)) 452 if (unlikely(base == NULL))
457 BUG(); 453 BUG();
458 454
459 dma_base = MAP_BASE + 455 dma_base = iommu->page_table_map_base +
460 ((base - iommu->page_table) << IO_PAGE_SHIFT); 456 ((base - iommu->page_table) << IO_PAGE_SHIFT);
461 457
462 /* Normalize DVMA addresses. */ 458 /* Normalize DVMA addresses. */
@@ -485,7 +481,9 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
485 481
486void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 482void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
487{ 483{
488 struct sbus_iommu *iommu; 484 struct sbus_info *info;
485 struct iommu *iommu;
486 struct strbuf *strbuf;
489 iopte_t *base; 487 iopte_t *base;
490 unsigned long flags, i, npages; 488 unsigned long flags, i, npages;
491 u32 bus_addr; 489 u32 bus_addr;
@@ -493,7 +491,9 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
493 if (unlikely(direction == SBUS_DMA_NONE)) 491 if (unlikely(direction == SBUS_DMA_NONE))
494 BUG(); 492 BUG();
495 493
496 iommu = sdev->bus->iommu; 494 info = sdev->bus->iommu;
495 iommu = &info->iommu;
496 strbuf = &info->strbuf;
497 497
498 bus_addr = sglist->dma_address & IO_PAGE_MASK; 498 bus_addr = sglist->dma_address & IO_PAGE_MASK;
499 499
@@ -505,29 +505,33 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
505 bus_addr) >> IO_PAGE_SHIFT; 505 bus_addr) >> IO_PAGE_SHIFT;
506 506
507 base = iommu->page_table + 507 base = iommu->page_table +
508 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 508 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509 509
510 spin_lock_irqsave(&iommu->lock, flags); 510 spin_lock_irqsave(&iommu->lock, flags);
511 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 511 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
512 for (i = 0; i < npages; i++) 512 for (i = 0; i < npages; i++)
513 iopte_val(base[i]) = 0UL; 513 iopte_val(base[i]) = 0UL;
514 free_npages(iommu, bus_addr - MAP_BASE, npages); 514 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
515 spin_unlock_irqrestore(&iommu->lock, flags); 515 spin_unlock_irqrestore(&iommu->lock, flags);
516} 516}
517 517
518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
519{ 519{
520 struct sbus_iommu *iommu; 520 struct sbus_info *info;
521 struct iommu *iommu;
522 struct strbuf *strbuf;
521 unsigned long flags, npages; 523 unsigned long flags, npages;
522 524
523 iommu = sdev->bus->iommu; 525 info = sdev->bus->iommu;
526 iommu = &info->iommu;
527 strbuf = &info->strbuf;
524 528
525 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 529 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
526 npages >>= IO_PAGE_SHIFT; 530 npages >>= IO_PAGE_SHIFT;
527 bus_addr &= IO_PAGE_MASK; 531 bus_addr &= IO_PAGE_MASK;
528 532
529 spin_lock_irqsave(&iommu->lock, flags); 533 spin_lock_irqsave(&iommu->lock, flags);
530 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 534 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
531 spin_unlock_irqrestore(&iommu->lock, flags); 535 spin_unlock_irqrestore(&iommu->lock, flags);
532} 536}
533 537
@@ -537,11 +541,15 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz
537 541
538void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 542void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
539{ 543{
540 struct sbus_iommu *iommu; 544 struct sbus_info *info;
545 struct iommu *iommu;
546 struct strbuf *strbuf;
541 unsigned long flags, npages, i; 547 unsigned long flags, npages, i;
542 u32 bus_addr; 548 u32 bus_addr;
543 549
544 iommu = sdev->bus->iommu; 550 info = sdev->bus->iommu;
551 iommu = &info->iommu;
552 strbuf = &info->strbuf;
545 553
546 bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 554 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
547 for (i = 0; i < nelems; i++) { 555 for (i = 0; i < nelems; i++) {
@@ -553,7 +561,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist,
553 - bus_addr) >> IO_PAGE_SHIFT; 561 - bus_addr) >> IO_PAGE_SHIFT;
554 562
555 spin_lock_irqsave(&iommu->lock, flags); 563 spin_lock_irqsave(&iommu->lock, flags);
556 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 564 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
557 spin_unlock_irqrestore(&iommu->lock, flags); 565 spin_unlock_irqrestore(&iommu->lock, flags);
558} 566}
559 567
@@ -564,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg,
564/* Enable 64-bit DVMA mode for the given device. */ 572/* Enable 64-bit DVMA mode for the given device. */
565void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) 573void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
566{ 574{
567 struct sbus_iommu *iommu = sdev->bus->iommu; 575 struct sbus_info *info = sdev->bus->iommu;
576 struct iommu *iommu = &info->iommu;
568 int slot = sdev->slot; 577 int slot = sdev->slot;
569 unsigned long cfg_reg; 578 unsigned long cfg_reg;
570 u64 val; 579 u64 val;
571 580
572 cfg_reg = iommu->sbus_control_reg; 581 cfg_reg = iommu->write_complete_reg;
573 switch (slot) { 582 switch (slot) {
574 case 0: 583 case 0:
575 cfg_reg += 0x20UL; 584 cfg_reg += 0x20UL;
@@ -704,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
704unsigned int sbus_build_irq(void *buscookie, unsigned int ino) 713unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
705{ 714{
706 struct sbus_bus *sbus = (struct sbus_bus *)buscookie; 715 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
707 struct sbus_iommu *iommu = sbus->iommu; 716 struct sbus_info *info = sbus->iommu;
708 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 717 struct iommu *iommu = &info->iommu;
718 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
709 unsigned long imap, iclr; 719 unsigned long imap, iclr;
710 int sbus_level = 0; 720 int sbus_level = 0;
711 721
@@ -766,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
766static irqreturn_t sysio_ue_handler(int irq, void *dev_id) 776static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
767{ 777{
768 struct sbus_bus *sbus = dev_id; 778 struct sbus_bus *sbus = dev_id;
769 struct sbus_iommu *iommu = sbus->iommu; 779 struct sbus_info *info = sbus->iommu;
770 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 780 struct iommu *iommu = &info->iommu;
781 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
771 unsigned long afsr_reg, afar_reg; 782 unsigned long afsr_reg, afar_reg;
772 unsigned long afsr, afar, error_bits; 783 unsigned long afsr, afar, error_bits;
773 int reported; 784 int reported;
@@ -838,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
838static irqreturn_t sysio_ce_handler(int irq, void *dev_id) 849static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
839{ 850{
840 struct sbus_bus *sbus = dev_id; 851 struct sbus_bus *sbus = dev_id;
841 struct sbus_iommu *iommu = sbus->iommu; 852 struct sbus_info *info = sbus->iommu;
842 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 853 struct iommu *iommu = &info->iommu;
854 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
843 unsigned long afsr_reg, afar_reg; 855 unsigned long afsr_reg, afar_reg;
844 unsigned long afsr, afar, error_bits; 856 unsigned long afsr, afar, error_bits;
845 int reported; 857 int reported;
@@ -915,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
915static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) 927static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
916{ 928{
917 struct sbus_bus *sbus = dev_id; 929 struct sbus_bus *sbus = dev_id;
918 struct sbus_iommu *iommu = sbus->iommu; 930 struct sbus_info *info = sbus->iommu;
931 struct iommu *iommu = &info->iommu;
919 unsigned long afsr_reg, afar_reg, reg_base; 932 unsigned long afsr_reg, afar_reg, reg_base;
920 unsigned long afsr, afar, error_bits; 933 unsigned long afsr, afar, error_bits;
921 int reported; 934 int reported;
922 935
923 reg_base = iommu->sbus_control_reg - 0x2000UL; 936 reg_base = iommu->write_complete_reg - 0x2000UL;
924 afsr_reg = reg_base + SYSIO_SBUS_AFSR; 937 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
925 afar_reg = reg_base + SYSIO_SBUS_AFAR; 938 afar_reg = reg_base + SYSIO_SBUS_AFAR;
926 939
@@ -982,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
982 995
983static void __init sysio_register_error_handlers(struct sbus_bus *sbus) 996static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
984{ 997{
985 struct sbus_iommu *iommu = sbus->iommu; 998 struct sbus_info *info = sbus->iommu;
986 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 999 struct iommu *iommu = &info->iommu;
1000 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
987 unsigned int irq; 1001 unsigned int irq;
988 u64 control; 1002 u64 control;
989 1003
@@ -1017,18 +1031,20 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1017 SYSIO_ECNTRL_CEEN), 1031 SYSIO_ECNTRL_CEEN),
1018 reg_base + ECC_CONTROL); 1032 reg_base + ECC_CONTROL);
1019 1033
1020 control = upa_readq(iommu->sbus_control_reg); 1034 control = upa_readq(iommu->write_complete_reg);
1021 control |= 0x100UL; /* SBUS Error Interrupt Enable */ 1035 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1022 upa_writeq(control, iommu->sbus_control_reg); 1036 upa_writeq(control, iommu->write_complete_reg);
1023} 1037}
1024 1038
1025/* Boot time initialization. */ 1039/* Boot time initialization. */
1026static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) 1040static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1027{ 1041{
1028 struct linux_prom64_registers *pr; 1042 const struct linux_prom64_registers *pr;
1029 struct device_node *dp; 1043 struct device_node *dp;
1030 struct sbus_iommu *iommu; 1044 struct sbus_info *info;
1031 unsigned long regs; 1045 struct iommu *iommu;
1046 struct strbuf *strbuf;
1047 unsigned long regs, reg_base;
1032 u64 control; 1048 u64 control;
1033 int i; 1049 int i;
1034 1050
@@ -1043,33 +1059,42 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1043 } 1059 }
1044 regs = pr->phys_addr; 1060 regs = pr->phys_addr;
1045 1061
1046 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC); 1062 info = kzalloc(sizeof(*info), GFP_ATOMIC);
1047 if (iommu == NULL) { 1063 if (info == NULL) {
1048 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n"); 1064 prom_printf("sbus_iommu_init: Fatal error, "
1065 "kmalloc(info) failed\n");
1049 prom_halt(); 1066 prom_halt();
1050 } 1067 }
1051 1068
1052 /* Align on E$ line boundary. */ 1069 iommu = &info->iommu;
1053 iommu = (struct sbus_iommu *) 1070 strbuf = &info->strbuf;
1054 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1055 ~(SMP_CACHE_BYTES - 1UL));
1056 1071
1057 memset(iommu, 0, sizeof(*iommu)); 1072 reg_base = regs + SYSIO_IOMMUREG_BASE;
1073 iommu->iommu_control = reg_base + IOMMU_CONTROL;
1074 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
1075 iommu->iommu_flush = reg_base + IOMMU_FLUSH;
1058 1076
1059 /* Setup spinlock. */ 1077 reg_base = regs + SYSIO_STRBUFREG_BASE;
1060 spin_lock_init(&iommu->lock); 1078 strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
1079 strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
1080 strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
1061 1081
1062 /* Init register offsets. */ 1082 strbuf->strbuf_enabled = 1;
1063 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE; 1083
1064 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE; 1084 strbuf->strbuf_flushflag = (volatile unsigned long *)
1085 ((((unsigned long)&strbuf->__flushflag_buf[0])
1086 + 63UL)
1087 & ~63UL);
1088 strbuf->strbuf_flushflag_pa = (unsigned long)
1089 __pa(strbuf->strbuf_flushflag);
1065 1090
1066 /* The SYSIO SBUS control register is used for dummy reads 1091 /* The SYSIO SBUS control register is used for dummy reads
1067 * in order to ensure write completion. 1092 * in order to ensure write completion.
1068 */ 1093 */
1069 iommu->sbus_control_reg = regs + 0x2000UL; 1094 iommu->write_complete_reg = regs + 0x2000UL;
1070 1095
1071 /* Link into SYSIO software state. */ 1096 /* Link into SYSIO software state. */
1072 sbus->iommu = iommu; 1097 sbus->iommu = info;
1073 1098
1074 printk("SYSIO: UPA portID %x, at %016lx\n", 1099 printk("SYSIO: UPA portID %x, at %016lx\n",
1075 sbus->portid, regs); 1100 sbus->portid, regs);
@@ -1077,40 +1102,44 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1077 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ 1102 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1078 sbus_iommu_table_init(iommu, IO_TSB_SIZE); 1103 sbus_iommu_table_init(iommu, IO_TSB_SIZE);
1079 1104
1080 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL); 1105 control = upa_readq(iommu->iommu_control);
1081 control = ((7UL << 16UL) | 1106 control = ((7UL << 16UL) |
1082 (0UL << 2UL) | 1107 (0UL << 2UL) |
1083 (1UL << 1UL) | 1108 (1UL << 1UL) |
1084 (1UL << 0UL)); 1109 (1UL << 0UL));
1085 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL); 1110 upa_writeq(control, iommu->iommu_control);
1086 1111
1087 /* Clean out any cruft in the IOMMU using 1112 /* Clean out any cruft in the IOMMU using
1088 * diagnostic accesses. 1113 * diagnostic accesses.
1089 */ 1114 */
1090 for (i = 0; i < 16; i++) { 1115 for (i = 0; i < 16; i++) {
1091 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG; 1116 unsigned long dram, tag;
1092 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; 1117
1118 dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
1119 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
1093 1120
1094 dram += (unsigned long)i * 8UL; 1121 dram += (unsigned long)i * 8UL;
1095 tag += (unsigned long)i * 8UL; 1122 tag += (unsigned long)i * 8UL;
1096 upa_writeq(0, dram); 1123 upa_writeq(0, dram);
1097 upa_writeq(0, tag); 1124 upa_writeq(0, tag);
1098 } 1125 }
1099 upa_readq(iommu->sbus_control_reg); 1126 upa_readq(iommu->write_complete_reg);
1100 1127
1101 /* Give the TSB to SYSIO. */ 1128 /* Give the TSB to SYSIO. */
1102 upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE); 1129 upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
1103 1130
1104 /* Setup streaming buffer, DE=1 SB_EN=1 */ 1131 /* Setup streaming buffer, DE=1 SB_EN=1 */
1105 control = (1UL << 1UL) | (1UL << 0UL); 1132 control = (1UL << 1UL) | (1UL << 0UL);
1106 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL); 1133 upa_writeq(control, strbuf->strbuf_control);
1107 1134
1108 /* Clear out the tags using diagnostics. */ 1135 /* Clear out the tags using diagnostics. */
1109 for (i = 0; i < 16; i++) { 1136 for (i = 0; i < 16; i++) {
1110 unsigned long ptag, ltag; 1137 unsigned long ptag, ltag;
1111 1138
1112 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG; 1139 ptag = strbuf->strbuf_control +
1113 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG; 1140 (STRBUF_PTAGDIAG - STRBUF_CONTROL);
1141 ltag = strbuf->strbuf_control +
1142 (STRBUF_LTAGDIAG - STRBUF_CONTROL);
1114 ptag += (unsigned long)i * 8UL; 1143 ptag += (unsigned long)i * 8UL;
1115 ltag += (unsigned long)i * 8UL; 1144 ltag += (unsigned long)i * 8UL;
1116 1145
@@ -1119,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1119 } 1148 }
1120 1149
1121 /* Enable DVMA arbitration for all devices/slots. */ 1150 /* Enable DVMA arbitration for all devices/slots. */
1122 control = upa_readq(iommu->sbus_control_reg); 1151 control = upa_readq(iommu->write_complete_reg);
1123 control |= 0x3fUL; 1152 control |= 0x3fUL;
1124 upa_writeq(control, iommu->sbus_control_reg); 1153 upa_writeq(control, iommu->write_complete_reg);
1125 1154
1126 /* Now some Xfire specific grot... */ 1155 /* Now some Xfire specific grot... */
1127 if (this_is_starfire) 1156 if (this_is_starfire)
@@ -1133,7 +1162,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1133void sbus_fill_device_irq(struct sbus_dev *sdev) 1162void sbus_fill_device_irq(struct sbus_dev *sdev)
1134{ 1163{
1135 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node); 1164 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
1136 struct linux_prom_irqs *irqs; 1165 const struct linux_prom_irqs *irqs;
1137 1166
1138 irqs = of_get_property(dp, "interrupts", NULL); 1167 irqs = of_get_property(dp, "interrupts", NULL);
1139 if (!irqs) { 1168 if (!irqs) {