diff options
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 43 |
1 files changed, 19 insertions, 24 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index f6158c4a3995..aa73b3b71e85 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -300,11 +300,10 @@ void __init sbus_fill_device_irq(struct sbus_dev *sdev) | |||
300 | * Allocate a chunk of memory suitable for DMA. | 300 | * Allocate a chunk of memory suitable for DMA. |
301 | * Typically devices use them for control blocks. | 301 | * Typically devices use them for control blocks. |
302 | * CPU may access them without any explicit flushing. | 302 | * CPU may access them without any explicit flushing. |
303 | * | ||
304 | * XXX Some clever people know that sdev is not used and supply NULL. Watch. | ||
305 | */ | 303 | */ |
306 | void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp) | 304 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) |
307 | { | 305 | { |
306 | struct of_device *op = to_of_device(dev); | ||
308 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 307 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; |
309 | unsigned long va; | 308 | unsigned long va; |
310 | struct resource *res; | 309 | struct resource *res; |
@@ -341,10 +340,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp) | |||
341 | if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0) | 340 | if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0) |
342 | goto err_noiommu; | 341 | goto err_noiommu; |
343 | 342 | ||
344 | /* Set the resource name, if known. */ | 343 | res->name = op->node->name; |
345 | if (sdev) { | ||
346 | res->name = sdev->prom_name; | ||
347 | } | ||
348 | 344 | ||
349 | return (void *)(unsigned long)res->start; | 345 | return (void *)(unsigned long)res->start; |
350 | 346 | ||
@@ -358,7 +354,7 @@ err_nopages: | |||
358 | return NULL; | 354 | return NULL; |
359 | } | 355 | } |
360 | 356 | ||
361 | void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba) | 357 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) |
362 | { | 358 | { |
363 | struct resource *res; | 359 | struct resource *res; |
364 | struct page *pgv; | 360 | struct page *pgv; |
@@ -396,8 +392,10 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba) | |||
396 | * CPU view of this memory may be inconsistent with | 392 | * CPU view of this memory may be inconsistent with |
397 | * a device view and explicit flushing is necessary. | 393 | * a device view and explicit flushing is necessary. |
398 | */ | 394 | */ |
399 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction) | 395 | dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) |
400 | { | 396 | { |
397 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
398 | |||
401 | /* XXX why are some lengths signed, others unsigned? */ | 399 | /* XXX why are some lengths signed, others unsigned? */ |
402 | if (len <= 0) { | 400 | if (len <= 0) { |
403 | return 0; | 401 | return 0; |
@@ -409,13 +407,16 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int dire | |||
409 | return mmu_get_scsi_one(va, len, sdev->bus); | 407 | return mmu_get_scsi_one(va, len, sdev->bus); |
410 | } | 408 | } |
411 | 409 | ||
412 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction) | 410 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) |
413 | { | 411 | { |
412 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
414 | mmu_release_scsi_one(ba, n, sdev->bus); | 413 | mmu_release_scsi_one(ba, n, sdev->bus); |
415 | } | 414 | } |
416 | 415 | ||
417 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | 416 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) |
418 | { | 417 | { |
418 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
419 | |||
419 | mmu_get_scsi_sgl(sg, n, sdev->bus); | 420 | mmu_get_scsi_sgl(sg, n, sdev->bus); |
420 | 421 | ||
421 | /* | 422 | /* |
@@ -425,16 +426,19 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direct | |||
425 | return n; | 426 | return n; |
426 | } | 427 | } |
427 | 428 | ||
428 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | 429 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) |
429 | { | 430 | { |
431 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
432 | |||
430 | mmu_release_scsi_sgl(sg, n, sdev->bus); | 433 | mmu_release_scsi_sgl(sg, n, sdev->bus); |
431 | } | 434 | } |
432 | 435 | ||
433 | /* | 436 | /* |
434 | */ | 437 | */ |
435 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | 438 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) |
436 | { | 439 | { |
437 | #if 0 | 440 | #if 0 |
441 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
438 | unsigned long va; | 442 | unsigned long va; |
439 | struct resource *res; | 443 | struct resource *res; |
440 | 444 | ||
@@ -452,9 +456,10 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t s | |||
452 | #endif | 456 | #endif |
453 | } | 457 | } |
454 | 458 | ||
455 | void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | 459 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) |
456 | { | 460 | { |
457 | #if 0 | 461 | #if 0 |
462 | struct sbus_dev *sdev = to_sbus_device(dev); | ||
458 | unsigned long va; | 463 | unsigned long va; |
459 | struct resource *res; | 464 | struct resource *res; |
460 | 465 | ||
@@ -472,16 +477,6 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_ | |||
472 | #endif | 477 | #endif |
473 | } | 478 | } |
474 | 479 | ||
475 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | ||
476 | { | ||
477 | printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n"); | ||
478 | } | ||
479 | |||
480 | void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | ||
481 | { | ||
482 | printk("sbus_dma_sync_sg_for_device: not implemented yet\n"); | ||
483 | } | ||
484 | |||
485 | /* Support code for sbus_init(). */ | 480 | /* Support code for sbus_init(). */ |
486 | /* | 481 | /* |
487 | * XXX This functions appears to be a distorted version of | 482 | * XXX This functions appears to be a distorted version of |