diff options
| -rw-r--r-- | arch/sparc64/kernel/sbus.c | 229 |
1 files changed, 132 insertions, 97 deletions
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 279758d56b29..3b05428cc909 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
| @@ -26,17 +26,9 @@ | |||
| 26 | 26 | ||
| 27 | #define MAP_BASE ((u32)0xc0000000) | 27 | #define MAP_BASE ((u32)0xc0000000) |
| 28 | 28 | ||
| 29 | struct sbus_iommu { | 29 | struct sbus_info { |
| 30 | spinlock_t lock; | 30 | struct iommu iommu; |
| 31 | 31 | struct strbuf strbuf; | |
| 32 | struct iommu_arena arena; | ||
| 33 | |||
| 34 | iopte_t *page_table; | ||
| 35 | unsigned long strbuf_regs; | ||
| 36 | unsigned long iommu_regs; | ||
| 37 | unsigned long sbus_control_reg; | ||
| 38 | |||
| 39 | volatile unsigned long strbuf_flushflag; | ||
| 40 | }; | 32 | }; |
| 41 | 33 | ||
| 42 | /* Offsets from iommu_regs */ | 34 | /* Offsets from iommu_regs */ |
| @@ -52,16 +44,17 @@ struct sbus_iommu { | |||
| 52 | 44 | ||
| 53 | #define IOMMU_DRAM_VALID (1UL << 30UL) | 45 | #define IOMMU_DRAM_VALID (1UL << 30UL) |
| 54 | 46 | ||
| 55 | static void __iommu_flushall(struct sbus_iommu *iommu) | 47 | static void __iommu_flushall(struct iommu *iommu) |
| 56 | { | 48 | { |
| 57 | unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; | 49 | unsigned long tag; |
| 58 | int entry; | 50 | int entry; |
| 59 | 51 | ||
| 52 | tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
| 60 | for (entry = 0; entry < 16; entry++) { | 53 | for (entry = 0; entry < 16; entry++) { |
| 61 | upa_writeq(0, tag); | 54 | upa_writeq(0, tag); |
| 62 | tag += 8UL; | 55 | tag += 8UL; |
| 63 | } | 56 | } |
| 64 | upa_readq(iommu->sbus_control_reg); | 57 | upa_readq(iommu->write_complete_reg); |
| 65 | } | 58 | } |
| 66 | 59 | ||
| 67 | /* Offsets from strbuf_regs */ | 60 | /* Offsets from strbuf_regs */ |
| @@ -76,15 +69,14 @@ static void __iommu_flushall(struct sbus_iommu *iommu) | |||
| 76 | 69 | ||
| 77 | #define STRBUF_TAG_VALID 0x02UL | 70 | #define STRBUF_TAG_VALID 0x02UL |
| 78 | 71 | ||
| 79 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) | 72 | static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction) |
| 80 | { | 73 | { |
| 81 | unsigned long n; | 74 | unsigned long n; |
| 82 | int limit; | 75 | int limit; |
| 83 | 76 | ||
| 84 | n = npages; | 77 | n = npages; |
| 85 | while (n--) | 78 | while (n--) |
| 86 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 79 | upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush); |
| 87 | iommu->strbuf_regs + STRBUF_PFLUSH); | ||
| 88 | 80 | ||
| 89 | /* If the device could not have possibly put dirty data into | 81 | /* If the device could not have possibly put dirty data into |
| 90 | * the streaming cache, no flush-flag synchronization needs | 82 | * the streaming cache, no flush-flag synchronization needs |
| @@ -93,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
| 93 | if (direction == SBUS_DMA_TODEVICE) | 85 | if (direction == SBUS_DMA_TODEVICE) |
| 94 | return; | 86 | return; |
| 95 | 87 | ||
| 96 | iommu->strbuf_flushflag = 0UL; | 88 | *(strbuf->strbuf_flushflag) = 0UL; |
| 97 | 89 | ||
| 98 | /* Whoopee cushion! */ | 90 | /* Whoopee cushion! */ |
| 99 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 91 | upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync); |
| 100 | iommu->strbuf_regs + STRBUF_FSYNC); | 92 | upa_readq(iommu->write_complete_reg); |
| 101 | upa_readq(iommu->sbus_control_reg); | ||
| 102 | 93 | ||
| 103 | limit = 100000; | 94 | limit = 100000; |
| 104 | while (iommu->strbuf_flushflag == 0UL) { | 95 | while (*(strbuf->strbuf_flushflag) == 0UL) { |
| 105 | limit--; | 96 | limit--; |
| 106 | if (!limit) | 97 | if (!limit) |
| 107 | break; | 98 | break; |
| @@ -115,7 +106,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
| 115 | } | 106 | } |
| 116 | 107 | ||
| 117 | /* Based largely upon the ppc64 iommu allocator. */ | 108 | /* Based largely upon the ppc64 iommu allocator. */ |
| 118 | static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) | 109 | static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages) |
| 119 | { | 110 | { |
| 120 | struct iommu_arena *arena = &iommu->arena; | 111 | struct iommu_arena *arena = &iommu->arena; |
| 121 | unsigned long n, i, start, end, limit; | 112 | unsigned long n, i, start, end, limit; |
| @@ -164,7 +155,7 @@ static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsig | |||
| 164 | __clear_bit(i, arena->map); | 155 | __clear_bit(i, arena->map); |
| 165 | } | 156 | } |
| 166 | 157 | ||
| 167 | static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize) | 158 | static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize) |
| 168 | { | 159 | { |
| 169 | unsigned long tsbbase, order, sz, num_tsb_entries; | 160 | unsigned long tsbbase, order, sz, num_tsb_entries; |
| 170 | 161 | ||
| @@ -172,13 +163,14 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize | |||
| 172 | 163 | ||
| 173 | /* Setup initial software IOMMU state. */ | 164 | /* Setup initial software IOMMU state. */ |
| 174 | spin_lock_init(&iommu->lock); | 165 | spin_lock_init(&iommu->lock); |
| 166 | iommu->page_table_map_base = MAP_BASE; | ||
| 175 | 167 | ||
| 176 | /* Allocate and initialize the free area map. */ | 168 | /* Allocate and initialize the free area map. */ |
| 177 | sz = num_tsb_entries / 8; | 169 | sz = num_tsb_entries / 8; |
| 178 | sz = (sz + 7UL) & ~7UL; | 170 | sz = (sz + 7UL) & ~7UL; |
| 179 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | 171 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
| 180 | if (!iommu->arena.map) { | 172 | if (!iommu->arena.map) { |
| 181 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | 173 | prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n"); |
| 182 | prom_halt(); | 174 | prom_halt(); |
| 183 | } | 175 | } |
| 184 | iommu->arena.limit = num_tsb_entries; | 176 | iommu->arena.limit = num_tsb_entries; |
| @@ -194,7 +186,7 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize | |||
| 194 | memset(iommu->page_table, 0, tsbsize); | 186 | memset(iommu->page_table, 0, tsbsize); |
| 195 | } | 187 | } |
| 196 | 188 | ||
| 197 | static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages) | 189 | static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) |
| 198 | { | 190 | { |
| 199 | long entry; | 191 | long entry; |
| 200 | 192 | ||
| @@ -205,14 +197,15 @@ static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npag | |||
| 205 | return iommu->page_table + entry; | 197 | return iommu->page_table + entry; |
| 206 | } | 198 | } |
| 207 | 199 | ||
| 208 | static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages) | 200 | static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) |
| 209 | { | 201 | { |
| 210 | sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); | 202 | sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); |
| 211 | } | 203 | } |
| 212 | 204 | ||
| 213 | void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) | 205 | void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) |
| 214 | { | 206 | { |
| 215 | struct sbus_iommu *iommu; | 207 | struct sbus_info *info; |
| 208 | struct iommu *iommu; | ||
| 216 | iopte_t *iopte; | 209 | iopte_t *iopte; |
| 217 | unsigned long flags, order, first_page; | 210 | unsigned long flags, order, first_page; |
| 218 | void *ret; | 211 | void *ret; |
| @@ -228,7 +221,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
| 228 | return NULL; | 221 | return NULL; |
| 229 | memset((char *)first_page, 0, PAGE_SIZE << order); | 222 | memset((char *)first_page, 0, PAGE_SIZE << order); |
| 230 | 223 | ||
| 231 | iommu = sdev->bus->iommu; | 224 | info = sdev->bus->iommu; |
| 225 | iommu = &info->iommu; | ||
| 232 | 226 | ||
| 233 | spin_lock_irqsave(&iommu->lock, flags); | 227 | spin_lock_irqsave(&iommu->lock, flags); |
| 234 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); | 228 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); |
| @@ -239,7 +233,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
| 239 | return NULL; | 233 | return NULL; |
| 240 | } | 234 | } |
| 241 | 235 | ||
| 242 | *dvma_addr = (MAP_BASE + | 236 | *dvma_addr = (iommu->page_table_map_base + |
| 243 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); | 237 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); |
| 244 | ret = (void *) first_page; | 238 | ret = (void *) first_page; |
| 245 | npages = size >> IO_PAGE_SHIFT; | 239 | npages = size >> IO_PAGE_SHIFT; |
| @@ -257,18 +251,20 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
| 257 | 251 | ||
| 258 | void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) | 252 | void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) |
| 259 | { | 253 | { |
| 260 | struct sbus_iommu *iommu; | 254 | struct sbus_info *info; |
| 255 | struct iommu *iommu; | ||
| 261 | iopte_t *iopte; | 256 | iopte_t *iopte; |
| 262 | unsigned long flags, order, npages; | 257 | unsigned long flags, order, npages; |
| 263 | 258 | ||
| 264 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 259 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
| 265 | iommu = sdev->bus->iommu; | 260 | info = sdev->bus->iommu; |
| 261 | iommu = &info->iommu; | ||
| 266 | iopte = iommu->page_table + | 262 | iopte = iommu->page_table + |
| 267 | ((dvma - MAP_BASE) >> IO_PAGE_SHIFT); | 263 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
| 268 | 264 | ||
| 269 | spin_lock_irqsave(&iommu->lock, flags); | 265 | spin_lock_irqsave(&iommu->lock, flags); |
| 270 | 266 | ||
| 271 | free_npages(iommu, dvma - MAP_BASE, npages); | 267 | free_npages(iommu, dvma - iommu->page_table_map_base, npages); |
| 272 | 268 | ||
| 273 | spin_unlock_irqrestore(&iommu->lock, flags); | 269 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 274 | 270 | ||
| @@ -279,14 +275,16 @@ void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_add | |||
| 279 | 275 | ||
| 280 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) | 276 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) |
| 281 | { | 277 | { |
| 282 | struct sbus_iommu *iommu; | 278 | struct sbus_info *info; |
| 279 | struct iommu *iommu; | ||
| 283 | iopte_t *base; | 280 | iopte_t *base; |
| 284 | unsigned long flags, npages, oaddr; | 281 | unsigned long flags, npages, oaddr; |
| 285 | unsigned long i, base_paddr; | 282 | unsigned long i, base_paddr; |
| 286 | u32 bus_addr, ret; | 283 | u32 bus_addr, ret; |
| 287 | unsigned long iopte_protection; | 284 | unsigned long iopte_protection; |
| 288 | 285 | ||
| 289 | iommu = sdev->bus->iommu; | 286 | info = sdev->bus->iommu; |
| 287 | iommu = &info->iommu; | ||
| 290 | 288 | ||
| 291 | if (unlikely(direction == SBUS_DMA_NONE)) | 289 | if (unlikely(direction == SBUS_DMA_NONE)) |
| 292 | BUG(); | 290 | BUG(); |
| @@ -302,7 +300,7 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire | |||
| 302 | if (unlikely(!base)) | 300 | if (unlikely(!base)) |
| 303 | BUG(); | 301 | BUG(); |
| 304 | 302 | ||
| 305 | bus_addr = (MAP_BASE + | 303 | bus_addr = (iommu->page_table_map_base + |
| 306 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); | 304 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); |
| 307 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 305 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
| 308 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 306 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
| @@ -319,7 +317,9 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire | |||
| 319 | 317 | ||
| 320 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) | 318 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) |
| 321 | { | 319 | { |
| 322 | struct sbus_iommu *iommu = sdev->bus->iommu; | 320 | struct sbus_info *info = sdev->bus->iommu; |
| 321 | struct iommu *iommu = &info->iommu; | ||
| 322 | struct strbuf *strbuf = &info->strbuf; | ||
| 323 | iopte_t *base; | 323 | iopte_t *base; |
| 324 | unsigned long flags, npages, i; | 324 | unsigned long flags, npages, i; |
| 325 | 325 | ||
| @@ -329,15 +329,15 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, in | |||
| 329 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 329 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
| 330 | npages >>= IO_PAGE_SHIFT; | 330 | npages >>= IO_PAGE_SHIFT; |
| 331 | base = iommu->page_table + | 331 | base = iommu->page_table + |
| 332 | ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); | 332 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
| 333 | 333 | ||
| 334 | bus_addr &= IO_PAGE_MASK; | 334 | bus_addr &= IO_PAGE_MASK; |
| 335 | 335 | ||
| 336 | spin_lock_irqsave(&iommu->lock, flags); | 336 | spin_lock_irqsave(&iommu->lock, flags); |
| 337 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 337 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
| 338 | for (i = 0; i < npages; i++) | 338 | for (i = 0; i < npages; i++) |
| 339 | iopte_val(base[i]) = 0UL; | 339 | iopte_val(base[i]) = 0UL; |
| 340 | free_npages(iommu, bus_addr - MAP_BASE, npages); | 340 | free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); |
| 341 | spin_unlock_irqrestore(&iommu->lock, flags); | 341 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| @@ -419,7 +419,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
| 419 | 419 | ||
| 420 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 420 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
| 421 | { | 421 | { |
| 422 | struct sbus_iommu *iommu; | 422 | struct sbus_info *info; |
| 423 | struct iommu *iommu; | ||
| 423 | unsigned long flags, npages, iopte_protection; | 424 | unsigned long flags, npages, iopte_protection; |
| 424 | iopte_t *base; | 425 | iopte_t *base; |
| 425 | u32 dma_base; | 426 | u32 dma_base; |
| @@ -436,7 +437,8 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
| 436 | return 1; | 437 | return 1; |
| 437 | } | 438 | } |
| 438 | 439 | ||
| 439 | iommu = sdev->bus->iommu; | 440 | info = sdev->bus->iommu; |
| 441 | iommu = &info->iommu; | ||
| 440 | 442 | ||
| 441 | if (unlikely(direction == SBUS_DMA_NONE)) | 443 | if (unlikely(direction == SBUS_DMA_NONE)) |
| 442 | BUG(); | 444 | BUG(); |
| @@ -450,7 +452,7 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
| 450 | if (unlikely(base == NULL)) | 452 | if (unlikely(base == NULL)) |
| 451 | BUG(); | 453 | BUG(); |
| 452 | 454 | ||
| 453 | dma_base = MAP_BASE + | 455 | dma_base = iommu->page_table_map_base + |
| 454 | ((base - iommu->page_table) << IO_PAGE_SHIFT); | 456 | ((base - iommu->page_table) << IO_PAGE_SHIFT); |
| 455 | 457 | ||
| 456 | /* Normalize DVMA addresses. */ | 458 | /* Normalize DVMA addresses. */ |
| @@ -479,7 +481,9 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
| 479 | 481 | ||
| 480 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 482 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
| 481 | { | 483 | { |
| 482 | struct sbus_iommu *iommu; | 484 | struct sbus_info *info; |
| 485 | struct iommu *iommu; | ||
| 486 | struct strbuf *strbuf; | ||
| 483 | iopte_t *base; | 487 | iopte_t *base; |
| 484 | unsigned long flags, i, npages; | 488 | unsigned long flags, i, npages; |
| 485 | u32 bus_addr; | 489 | u32 bus_addr; |
| @@ -487,7 +491,9 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems | |||
| 487 | if (unlikely(direction == SBUS_DMA_NONE)) | 491 | if (unlikely(direction == SBUS_DMA_NONE)) |
| 488 | BUG(); | 492 | BUG(); |
| 489 | 493 | ||
| 490 | iommu = sdev->bus->iommu; | 494 | info = sdev->bus->iommu; |
| 495 | iommu = &info->iommu; | ||
| 496 | strbuf = &info->strbuf; | ||
| 491 | 497 | ||
| 492 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 498 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
| 493 | 499 | ||
| @@ -499,29 +505,33 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems | |||
| 499 | bus_addr) >> IO_PAGE_SHIFT; | 505 | bus_addr) >> IO_PAGE_SHIFT; |
| 500 | 506 | ||
| 501 | base = iommu->page_table + | 507 | base = iommu->page_table + |
| 502 | ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); | 508 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
| 503 | 509 | ||
| 504 | spin_lock_irqsave(&iommu->lock, flags); | 510 | spin_lock_irqsave(&iommu->lock, flags); |
| 505 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 511 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
| 506 | for (i = 0; i < npages; i++) | 512 | for (i = 0; i < npages; i++) |
| 507 | iopte_val(base[i]) = 0UL; | 513 | iopte_val(base[i]) = 0UL; |
| 508 | free_npages(iommu, bus_addr - MAP_BASE, npages); | 514 | free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); |
| 509 | spin_unlock_irqrestore(&iommu->lock, flags); | 515 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 510 | } | 516 | } |
| 511 | 517 | ||
| 512 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) | 518 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) |
| 513 | { | 519 | { |
| 514 | struct sbus_iommu *iommu; | 520 | struct sbus_info *info; |
| 521 | struct iommu *iommu; | ||
| 522 | struct strbuf *strbuf; | ||
| 515 | unsigned long flags, npages; | 523 | unsigned long flags, npages; |
| 516 | 524 | ||
| 517 | iommu = sdev->bus->iommu; | 525 | info = sdev->bus->iommu; |
| 526 | iommu = &info->iommu; | ||
| 527 | strbuf = &info->strbuf; | ||
| 518 | 528 | ||
| 519 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 529 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
| 520 | npages >>= IO_PAGE_SHIFT; | 530 | npages >>= IO_PAGE_SHIFT; |
| 521 | bus_addr &= IO_PAGE_MASK; | 531 | bus_addr &= IO_PAGE_MASK; |
| 522 | 532 | ||
| 523 | spin_lock_irqsave(&iommu->lock, flags); | 533 | spin_lock_irqsave(&iommu->lock, flags); |
| 524 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 534 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
| 525 | spin_unlock_irqrestore(&iommu->lock, flags); | 535 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 526 | } | 536 | } |
| 527 | 537 | ||
| @@ -531,11 +541,15 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz | |||
| 531 | 541 | ||
| 532 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 542 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
| 533 | { | 543 | { |
| 534 | struct sbus_iommu *iommu; | 544 | struct sbus_info *info; |
| 545 | struct iommu *iommu; | ||
| 546 | struct strbuf *strbuf; | ||
| 535 | unsigned long flags, npages, i; | 547 | unsigned long flags, npages, i; |
| 536 | u32 bus_addr; | 548 | u32 bus_addr; |
| 537 | 549 | ||
| 538 | iommu = sdev->bus->iommu; | 550 | info = sdev->bus->iommu; |
| 551 | iommu = &info->iommu; | ||
| 552 | strbuf = &info->strbuf; | ||
| 539 | 553 | ||
| 540 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | 554 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
| 541 | for (i = 0; i < nelems; i++) { | 555 | for (i = 0; i < nelems; i++) { |
| @@ -547,7 +561,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, | |||
| 547 | - bus_addr) >> IO_PAGE_SHIFT; | 561 | - bus_addr) >> IO_PAGE_SHIFT; |
| 548 | 562 | ||
| 549 | spin_lock_irqsave(&iommu->lock, flags); | 563 | spin_lock_irqsave(&iommu->lock, flags); |
| 550 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 564 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
| 551 | spin_unlock_irqrestore(&iommu->lock, flags); | 565 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 552 | } | 566 | } |
| 553 | 567 | ||
| @@ -558,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, | |||
| 558 | /* Enable 64-bit DVMA mode for the given device. */ | 572 | /* Enable 64-bit DVMA mode for the given device. */ |
| 559 | void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) | 573 | void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) |
| 560 | { | 574 | { |
| 561 | struct sbus_iommu *iommu = sdev->bus->iommu; | 575 | struct sbus_info *info = sdev->bus->iommu; |
| 576 | struct iommu *iommu = &info->iommu; | ||
| 562 | int slot = sdev->slot; | 577 | int slot = sdev->slot; |
| 563 | unsigned long cfg_reg; | 578 | unsigned long cfg_reg; |
| 564 | u64 val; | 579 | u64 val; |
| 565 | 580 | ||
| 566 | cfg_reg = iommu->sbus_control_reg; | 581 | cfg_reg = iommu->write_complete_reg; |
| 567 | switch (slot) { | 582 | switch (slot) { |
| 568 | case 0: | 583 | case 0: |
| 569 | cfg_reg += 0x20UL; | 584 | cfg_reg += 0x20UL; |
| @@ -698,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap) | |||
| 698 | unsigned int sbus_build_irq(void *buscookie, unsigned int ino) | 713 | unsigned int sbus_build_irq(void *buscookie, unsigned int ino) |
| 699 | { | 714 | { |
| 700 | struct sbus_bus *sbus = (struct sbus_bus *)buscookie; | 715 | struct sbus_bus *sbus = (struct sbus_bus *)buscookie; |
| 701 | struct sbus_iommu *iommu = sbus->iommu; | 716 | struct sbus_info *info = sbus->iommu; |
| 702 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 717 | struct iommu *iommu = &info->iommu; |
| 718 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
| 703 | unsigned long imap, iclr; | 719 | unsigned long imap, iclr; |
| 704 | int sbus_level = 0; | 720 | int sbus_level = 0; |
| 705 | 721 | ||
| @@ -760,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino) | |||
| 760 | static irqreturn_t sysio_ue_handler(int irq, void *dev_id) | 776 | static irqreturn_t sysio_ue_handler(int irq, void *dev_id) |
| 761 | { | 777 | { |
| 762 | struct sbus_bus *sbus = dev_id; | 778 | struct sbus_bus *sbus = dev_id; |
| 763 | struct sbus_iommu *iommu = sbus->iommu; | 779 | struct sbus_info *info = sbus->iommu; |
| 764 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 780 | struct iommu *iommu = &info->iommu; |
| 781 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
| 765 | unsigned long afsr_reg, afar_reg; | 782 | unsigned long afsr_reg, afar_reg; |
| 766 | unsigned long afsr, afar, error_bits; | 783 | unsigned long afsr, afar, error_bits; |
| 767 | int reported; | 784 | int reported; |
| @@ -832,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id) | |||
| 832 | static irqreturn_t sysio_ce_handler(int irq, void *dev_id) | 849 | static irqreturn_t sysio_ce_handler(int irq, void *dev_id) |
| 833 | { | 850 | { |
| 834 | struct sbus_bus *sbus = dev_id; | 851 | struct sbus_bus *sbus = dev_id; |
| 835 | struct sbus_iommu *iommu = sbus->iommu; | 852 | struct sbus_info *info = sbus->iommu; |
| 836 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 853 | struct iommu *iommu = &info->iommu; |
| 854 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
| 837 | unsigned long afsr_reg, afar_reg; | 855 | unsigned long afsr_reg, afar_reg; |
| 838 | unsigned long afsr, afar, error_bits; | 856 | unsigned long afsr, afar, error_bits; |
| 839 | int reported; | 857 | int reported; |
| @@ -909,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id) | |||
| 909 | static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) | 927 | static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) |
| 910 | { | 928 | { |
| 911 | struct sbus_bus *sbus = dev_id; | 929 | struct sbus_bus *sbus = dev_id; |
| 912 | struct sbus_iommu *iommu = sbus->iommu; | 930 | struct sbus_info *info = sbus->iommu; |
| 931 | struct iommu *iommu = &info->iommu; | ||
| 913 | unsigned long afsr_reg, afar_reg, reg_base; | 932 | unsigned long afsr_reg, afar_reg, reg_base; |
| 914 | unsigned long afsr, afar, error_bits; | 933 | unsigned long afsr, afar, error_bits; |
| 915 | int reported; | 934 | int reported; |
| 916 | 935 | ||
| 917 | reg_base = iommu->sbus_control_reg - 0x2000UL; | 936 | reg_base = iommu->write_complete_reg - 0x2000UL; |
| 918 | afsr_reg = reg_base + SYSIO_SBUS_AFSR; | 937 | afsr_reg = reg_base + SYSIO_SBUS_AFSR; |
| 919 | afar_reg = reg_base + SYSIO_SBUS_AFAR; | 938 | afar_reg = reg_base + SYSIO_SBUS_AFAR; |
| 920 | 939 | ||
| @@ -976,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) | |||
| 976 | 995 | ||
| 977 | static void __init sysio_register_error_handlers(struct sbus_bus *sbus) | 996 | static void __init sysio_register_error_handlers(struct sbus_bus *sbus) |
| 978 | { | 997 | { |
| 979 | struct sbus_iommu *iommu = sbus->iommu; | 998 | struct sbus_info *info = sbus->iommu; |
| 980 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 999 | struct iommu *iommu = &info->iommu; |
| 1000 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
| 981 | unsigned int irq; | 1001 | unsigned int irq; |
| 982 | u64 control; | 1002 | u64 control; |
| 983 | 1003 | ||
| @@ -1011,9 +1031,9 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus) | |||
| 1011 | SYSIO_ECNTRL_CEEN), | 1031 | SYSIO_ECNTRL_CEEN), |
| 1012 | reg_base + ECC_CONTROL); | 1032 | reg_base + ECC_CONTROL); |
| 1013 | 1033 | ||
| 1014 | control = upa_readq(iommu->sbus_control_reg); | 1034 | control = upa_readq(iommu->write_complete_reg); |
| 1015 | control |= 0x100UL; /* SBUS Error Interrupt Enable */ | 1035 | control |= 0x100UL; /* SBUS Error Interrupt Enable */ |
| 1016 | upa_writeq(control, iommu->sbus_control_reg); | 1036 | upa_writeq(control, iommu->write_complete_reg); |
| 1017 | } | 1037 | } |
| 1018 | 1038 | ||
| 1019 | /* Boot time initialization. */ | 1039 | /* Boot time initialization. */ |
| @@ -1021,8 +1041,10 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
| 1021 | { | 1041 | { |
| 1022 | const struct linux_prom64_registers *pr; | 1042 | const struct linux_prom64_registers *pr; |
| 1023 | struct device_node *dp; | 1043 | struct device_node *dp; |
| 1024 | struct sbus_iommu *iommu; | 1044 | struct sbus_info *info; |
| 1025 | unsigned long regs; | 1045 | struct iommu *iommu; |
| 1046 | struct strbuf *strbuf; | ||
| 1047 | unsigned long regs, reg_base; | ||
| 1026 | u64 control; | 1048 | u64 control; |
| 1027 | int i; | 1049 | int i; |
| 1028 | 1050 | ||
| @@ -1037,33 +1059,42 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
| 1037 | } | 1059 | } |
| 1038 | regs = pr->phys_addr; | 1060 | regs = pr->phys_addr; |
| 1039 | 1061 | ||
| 1040 | iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC); | 1062 | info = kzalloc(sizeof(*info), GFP_ATOMIC); |
| 1041 | if (iommu == NULL) { | 1063 | if (info == NULL) { |
| 1042 | prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n"); | 1064 | prom_printf("sbus_iommu_init: Fatal error, " |
| 1065 | "kmalloc(info) failed\n"); | ||
| 1043 | prom_halt(); | 1066 | prom_halt(); |
| 1044 | } | 1067 | } |
| 1045 | 1068 | ||
| 1046 | /* Align on E$ line boundary. */ | 1069 | iommu = &info->iommu; |
| 1047 | iommu = (struct sbus_iommu *) | 1070 | strbuf = &info->strbuf; |
| 1048 | (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) & | ||
| 1049 | ~(SMP_CACHE_BYTES - 1UL)); | ||
| 1050 | 1071 | ||
| 1051 | memset(iommu, 0, sizeof(*iommu)); | 1072 | reg_base = regs + SYSIO_IOMMUREG_BASE; |
| 1073 | iommu->iommu_control = reg_base + IOMMU_CONTROL; | ||
| 1074 | iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; | ||
| 1075 | iommu->iommu_flush = reg_base + IOMMU_FLUSH; | ||
| 1052 | 1076 | ||
| 1053 | /* Setup spinlock. */ | 1077 | reg_base = regs + SYSIO_STRBUFREG_BASE; |
| 1054 | spin_lock_init(&iommu->lock); | 1078 | strbuf->strbuf_control = reg_base + STRBUF_CONTROL; |
| 1079 | strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH; | ||
| 1080 | strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC; | ||
| 1081 | |||
| 1082 | strbuf->strbuf_enabled = 1; | ||
| 1055 | 1083 | ||
| 1056 | /* Init register offsets. */ | 1084 | strbuf->strbuf_flushflag = (volatile unsigned long *) |
| 1057 | iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE; | 1085 | ((((unsigned long)&strbuf->__flushflag_buf[0]) |
| 1058 | iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE; | 1086 | + 63UL) |
| 1087 | & ~63UL); | ||
| 1088 | strbuf->strbuf_flushflag_pa = (unsigned long) | ||
| 1089 | __pa(strbuf->strbuf_flushflag); | ||
| 1059 | 1090 | ||
| 1060 | /* The SYSIO SBUS control register is used for dummy reads | 1091 | /* The SYSIO SBUS control register is used for dummy reads |
| 1061 | * in order to ensure write completion. | 1092 | * in order to ensure write completion. |
| 1062 | */ | 1093 | */ |
| 1063 | iommu->sbus_control_reg = regs + 0x2000UL; | 1094 | iommu->write_complete_reg = regs + 0x2000UL; |
| 1064 | 1095 | ||
| 1065 | /* Link into SYSIO software state. */ | 1096 | /* Link into SYSIO software state. */ |
| 1066 | sbus->iommu = iommu; | 1097 | sbus->iommu = info; |
| 1067 | 1098 | ||
| 1068 | printk("SYSIO: UPA portID %x, at %016lx\n", | 1099 | printk("SYSIO: UPA portID %x, at %016lx\n", |
| 1069 | sbus->portid, regs); | 1100 | sbus->portid, regs); |
| @@ -1071,40 +1102,44 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
| 1071 | /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ | 1102 | /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ |
| 1072 | sbus_iommu_table_init(iommu, IO_TSB_SIZE); | 1103 | sbus_iommu_table_init(iommu, IO_TSB_SIZE); |
| 1073 | 1104 | ||
| 1074 | control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL); | 1105 | control = upa_readq(iommu->iommu_control); |
| 1075 | control = ((7UL << 16UL) | | 1106 | control = ((7UL << 16UL) | |
| 1076 | (0UL << 2UL) | | 1107 | (0UL << 2UL) | |
| 1077 | (1UL << 1UL) | | 1108 | (1UL << 1UL) | |
| 1078 | (1UL << 0UL)); | 1109 | (1UL << 0UL)); |
| 1079 | upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL); | 1110 | upa_writeq(control, iommu->iommu_control); |
| 1080 | 1111 | ||
| 1081 | /* Clean out any cruft in the IOMMU using | 1112 | /* Clean out any cruft in the IOMMU using |
| 1082 | * diagnostic accesses. | 1113 | * diagnostic accesses. |
| 1083 | */ | 1114 | */ |
| 1084 | for (i = 0; i < 16; i++) { | 1115 | for (i = 0; i < 16; i++) { |
| 1085 | unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG; | 1116 | unsigned long dram, tag; |
| 1086 | unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; | 1117 | |
| 1118 | dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL); | ||
| 1119 | tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
| 1087 | 1120 | ||
| 1088 | dram += (unsigned long)i * 8UL; | 1121 | dram += (unsigned long)i * 8UL; |
| 1089 | tag += (unsigned long)i * 8UL; | 1122 | tag += (unsigned long)i * 8UL; |
| 1090 | upa_writeq(0, dram); | 1123 | upa_writeq(0, dram); |
| 1091 | upa_writeq(0, tag); | 1124 | upa_writeq(0, tag); |
| 1092 | } | 1125 | } |
| 1093 | upa_readq(iommu->sbus_control_reg); | 1126 | upa_readq(iommu->write_complete_reg); |
| 1094 | 1127 | ||
| 1095 | /* Give the TSB to SYSIO. */ | 1128 | /* Give the TSB to SYSIO. */ |
| 1096 | upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE); | 1129 | upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); |
| 1097 | 1130 | ||
| 1098 | /* Setup streaming buffer, DE=1 SB_EN=1 */ | 1131 | /* Setup streaming buffer, DE=1 SB_EN=1 */ |
| 1099 | control = (1UL << 1UL) | (1UL << 0UL); | 1132 | control = (1UL << 1UL) | (1UL << 0UL); |
| 1100 | upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL); | 1133 | upa_writeq(control, strbuf->strbuf_control); |
| 1101 | 1134 | ||
| 1102 | /* Clear out the tags using diagnostics. */ | 1135 | /* Clear out the tags using diagnostics. */ |
| 1103 | for (i = 0; i < 16; i++) { | 1136 | for (i = 0; i < 16; i++) { |
| 1104 | unsigned long ptag, ltag; | 1137 | unsigned long ptag, ltag; |
| 1105 | 1138 | ||
| 1106 | ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG; | 1139 | ptag = strbuf->strbuf_control + |
| 1107 | ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG; | 1140 | (STRBUF_PTAGDIAG - STRBUF_CONTROL); |
| 1141 | ltag = strbuf->strbuf_control + | ||
| 1142 | (STRBUF_LTAGDIAG - STRBUF_CONTROL); | ||
| 1108 | ptag += (unsigned long)i * 8UL; | 1143 | ptag += (unsigned long)i * 8UL; |
| 1109 | ltag += (unsigned long)i * 8UL; | 1144 | ltag += (unsigned long)i * 8UL; |
| 1110 | 1145 | ||
| @@ -1113,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
| 1113 | } | 1148 | } |
| 1114 | 1149 | ||
| 1115 | /* Enable DVMA arbitration for all devices/slots. */ | 1150 | /* Enable DVMA arbitration for all devices/slots. */ |
| 1116 | control = upa_readq(iommu->sbus_control_reg); | 1151 | control = upa_readq(iommu->write_complete_reg); |
| 1117 | control |= 0x3fUL; | 1152 | control |= 0x3fUL; |
| 1118 | upa_writeq(control, iommu->sbus_control_reg); | 1153 | upa_writeq(control, iommu->write_complete_reg); |
| 1119 | 1154 | ||
| 1120 | /* Now some Xfire specific grot... */ | 1155 | /* Now some Xfire specific grot... */ |
| 1121 | if (this_is_starfire) | 1156 | if (this_is_starfire) |
