diff options
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 24e0f8c21f2f..569bb6dd154a 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) | |||
329 | return 1; | 329 | return 1; |
330 | } | 330 | } |
331 | 331 | ||
332 | static struct dma_map_ops tile_default_dma_map_ops = { | 332 | static const struct dma_map_ops tile_default_dma_map_ops = { |
333 | .alloc = tile_dma_alloc_coherent, | 333 | .alloc = tile_dma_alloc_coherent, |
334 | .free = tile_dma_free_coherent, | 334 | .free = tile_dma_free_coherent, |
335 | .map_page = tile_dma_map_page, | 335 | .map_page = tile_dma_map_page, |
@@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { | |||
344 | .dma_supported = tile_dma_supported | 344 | .dma_supported = tile_dma_supported |
345 | }; | 345 | }; |
346 | 346 | ||
347 | struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; | 347 | const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; |
348 | EXPORT_SYMBOL(tile_dma_map_ops); | 348 | EXPORT_SYMBOL(tile_dma_map_ops); |
349 | 349 | ||
350 | /* Generic PCI DMA mapping functions */ | 350 | /* Generic PCI DMA mapping functions */ |
@@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) | |||
516 | return 1; | 516 | return 1; |
517 | } | 517 | } |
518 | 518 | ||
519 | static struct dma_map_ops tile_pci_default_dma_map_ops = { | 519 | static const struct dma_map_ops tile_pci_default_dma_map_ops = { |
520 | .alloc = tile_pci_dma_alloc_coherent, | 520 | .alloc = tile_pci_dma_alloc_coherent, |
521 | .free = tile_pci_dma_free_coherent, | 521 | .free = tile_pci_dma_free_coherent, |
522 | .map_page = tile_pci_dma_map_page, | 522 | .map_page = tile_pci_dma_map_page, |
@@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { | |||
531 | .dma_supported = tile_pci_dma_supported | 531 | .dma_supported = tile_pci_dma_supported |
532 | }; | 532 | }; |
533 | 533 | ||
534 | struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; | 534 | const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; |
535 | EXPORT_SYMBOL(gx_pci_dma_map_ops); | 535 | EXPORT_SYMBOL(gx_pci_dma_map_ops); |
536 | 536 | ||
537 | /* PCI DMA mapping functions for legacy PCI devices */ | 537 | /* PCI DMA mapping functions for legacy PCI devices */ |
@@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | |||
552 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 552 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
553 | } | 553 | } |
554 | 554 | ||
555 | static struct dma_map_ops pci_swiotlb_dma_ops = { | 555 | static const struct dma_map_ops pci_swiotlb_dma_ops = { |
556 | .alloc = tile_swiotlb_alloc_coherent, | 556 | .alloc = tile_swiotlb_alloc_coherent, |
557 | .free = tile_swiotlb_free_coherent, | 557 | .free = tile_swiotlb_free_coherent, |
558 | .map_page = swiotlb_map_page, | 558 | .map_page = swiotlb_map_page, |
@@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { | |||
567 | .mapping_error = swiotlb_dma_mapping_error, | 567 | .mapping_error = swiotlb_dma_mapping_error, |
568 | }; | 568 | }; |
569 | 569 | ||
570 | static struct dma_map_ops pci_hybrid_dma_ops = { | 570 | static const struct dma_map_ops pci_hybrid_dma_ops = { |
571 | .alloc = tile_swiotlb_alloc_coherent, | 571 | .alloc = tile_swiotlb_alloc_coherent, |
572 | .free = tile_swiotlb_free_coherent, | 572 | .free = tile_swiotlb_free_coherent, |
573 | .map_page = tile_pci_dma_map_page, | 573 | .map_page = tile_pci_dma_map_page, |
@@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { | |||
582 | .dma_supported = tile_pci_dma_supported | 582 | .dma_supported = tile_pci_dma_supported |
583 | }; | 583 | }; |
584 | 584 | ||
585 | struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; | 585 | const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; |
586 | struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; | 586 | const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; |
587 | #else | 587 | #else |
588 | struct dma_map_ops *gx_legacy_pci_dma_map_ops; | 588 | const struct dma_map_ops *gx_legacy_pci_dma_map_ops; |
589 | struct dma_map_ops *gx_hybrid_pci_dma_map_ops; | 589 | const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; |
590 | #endif | 590 | #endif |
591 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | 591 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); |
592 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); | 592 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); |
593 | 593 | ||
594 | int dma_set_mask(struct device *dev, u64 mask) | 594 | int dma_set_mask(struct device *dev, u64 mask) |
595 | { | 595 | { |
596 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 596 | const struct dma_map_ops *dma_ops = get_dma_ops(dev); |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * For PCI devices with 64-bit DMA addressing capability, promote | 599 | * For PCI devices with 64-bit DMA addressing capability, promote |
@@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); | |||
623 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | 623 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
624 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 624 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
625 | { | 625 | { |
626 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 626 | const struct dma_map_ops *dma_ops = get_dma_ops(dev); |
627 | 627 | ||
628 | /* | 628 | /* |
629 | * For PCI devices with 64-bit DMA addressing capability, promote | 629 | * For PCI devices with 64-bit DMA addressing capability, promote |