diff options
author | David Daney <ddaney@caviumnetworks.com> | 2008-12-11 18:33:36 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-01-11 04:57:24 -0500 |
commit | 843aef4930b9953c9ca624a990b201440304b56f (patch) | |
tree | 9debbaa7d9caa8c73db65ea2674e7ed26e285893 /arch/mips/include/asm | |
parent | ec454d8c4fee3b2feb87e594d806c0987c5dd538 (diff) |
MIPS: Adjust the dma-common.c platform hooks.
We add a dev parameter to plat_unmap_dma_mem(), and hooks for
plat_dma_supported() and plat_extra_sync_for_device() which should be
nop changes for all existing targets.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r-- | arch/mips/include/asm/mach-generic/dma-coherence.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-ip27/dma-coherence.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-ip32/dma-coherence.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-jazz/dma-coherence.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-lemote/dma-coherence.h | 26 |
5 files changed, 125 insertions, 5 deletions
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 76e04e7feb84..36c611b6c597 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h | |||
@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |||
28 | return dma_addr; | 28 | return dma_addr; |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) | 31 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
32 | { | 32 | { |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
36 | { | ||
37 | /* | ||
38 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
39 | * so we can't guarantee allocations that must be | ||
40 | * within a tighter range than GFP_DMA.. | ||
41 | */ | ||
42 | if (mask < DMA_BIT_MASK(24)) | ||
43 | return 0; | ||
44 | |||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
49 | { | ||
50 | return; | ||
51 | } | ||
52 | |||
53 | static inline int plat_dma_mapping_error(struct device *dev, | ||
54 | dma_addr_t dma_addr) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
35 | static inline int plat_device_is_coherent(struct device *dev) | 59 | static inline int plat_device_is_coherent(struct device *dev) |
36 | { | 60 | { |
37 | #ifdef CONFIG_DMA_COHERENT | 61 | #ifdef CONFIG_DMA_COHERENT |
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index ed7e6222dc15..4c21bfca10c3 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h | |||
@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |||
38 | return dma_addr & ~(0xffUL << 56); | 38 | return dma_addr & ~(0xffUL << 56); |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) | 41 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
42 | { | 42 | { |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
46 | { | ||
47 | /* | ||
48 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
49 | * so we can't guarantee allocations that must be | ||
50 | * within a tighter range than GFP_DMA.. | ||
51 | */ | ||
52 | if (mask < DMA_BIT_MASK(24)) | ||
53 | return 0; | ||
54 | |||
55 | return 1; | ||
56 | } | ||
57 | |||
58 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
59 | { | ||
60 | return; | ||
61 | } | ||
62 | |||
63 | static inline int plat_dma_mapping_error(struct device *dev, | ||
64 | dma_addr_t dma_addr) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | |||
45 | static inline int plat_device_is_coherent(struct device *dev) | 69 | static inline int plat_device_is_coherent(struct device *dev) |
46 | { | 70 | { |
47 | return 1; /* IP27 non-cohernet mode is unsupported */ | 71 | return 1; /* IP27 non-cohernet mode is unsupported */ |
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h index a5511ebb2d53..7ae40f4b1c80 100644 --- a/arch/mips/include/asm/mach-ip32/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h | |||
@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |||
60 | return addr; | 60 | return addr; |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) | 63 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
64 | { | 64 | { |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
68 | { | ||
69 | /* | ||
70 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
71 | * so we can't guarantee allocations that must be | ||
72 | * within a tighter range than GFP_DMA.. | ||
73 | */ | ||
74 | if (mask < DMA_BIT_MASK(24)) | ||
75 | return 0; | ||
76 | |||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
81 | { | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | static inline int plat_dma_mapping_error(struct device *dev, | ||
86 | dma_addr_t dma_addr) | ||
87 | { | ||
88 | return 0; | ||
89 | } | ||
90 | |||
67 | static inline int plat_device_is_coherent(struct device *dev) | 91 | static inline int plat_device_is_coherent(struct device *dev) |
68 | { | 92 | { |
69 | return 0; /* IP32 is non-cohernet */ | 93 | return 0; /* IP32 is non-cohernet */ |
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index d66979a124a8..1c7cd27efa7b 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h | |||
@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |||
27 | return vdma_log2phys(dma_addr); | 27 | return vdma_log2phys(dma_addr); |
28 | } | 28 | } |
29 | 29 | ||
30 | static void plat_unmap_dma_mem(dma_addr_t dma_addr) | 30 | static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
31 | { | 31 | { |
32 | vdma_free(dma_addr); | 32 | vdma_free(dma_addr); |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
36 | { | ||
37 | /* | ||
38 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
39 | * so we can't guarantee allocations that must be | ||
40 | * within a tighter range than GFP_DMA.. | ||
41 | */ | ||
42 | if (mask < DMA_BIT_MASK(24)) | ||
43 | return 0; | ||
44 | |||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
49 | { | ||
50 | return; | ||
51 | } | ||
52 | |||
53 | static inline int plat_dma_mapping_error(struct device *dev, | ||
54 | dma_addr_t dma_addr) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
35 | static inline int plat_device_is_coherent(struct device *dev) | 59 | static inline int plat_device_is_coherent(struct device *dev) |
36 | { | 60 | { |
37 | return 0; | 61 | return 0; |
diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h index 7e914777ebc4..38fad7dfe7da 100644 --- a/arch/mips/include/asm/mach-lemote/dma-coherence.h +++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h | |||
@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |||
30 | return dma_addr & 0x7fffffff; | 30 | return dma_addr & 0x7fffffff; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) | 33 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
34 | { | 34 | { |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline int plat_dma_supported(struct device *dev, u64 mask) | ||
38 | { | ||
39 | /* | ||
40 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
41 | * so we can't guarantee allocations that must be | ||
42 | * within a tighter range than GFP_DMA.. | ||
43 | */ | ||
44 | if (mask < DMA_BIT_MASK(24)) | ||
45 | return 0; | ||
46 | |||
47 | return 1; | ||
48 | } | ||
49 | |||
50 | static inline void plat_extra_sync_for_device(struct device *dev) | ||
51 | { | ||
52 | return; | ||
53 | } | ||
54 | |||
55 | static inline int plat_dma_mapping_error(struct device *dev, | ||
56 | dma_addr_t dma_addr) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
60 | |||
37 | static inline int plat_device_is_coherent(struct device *dev) | 61 | static inline int plat_device_is_coherent(struct device *dev) |
38 | { | 62 | { |
39 | return 0; | 63 | return 0; |