diff options
author | Jon Mason <jdmason@us.ibm.com> | 2006-06-26 07:58:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 13:48:18 -0400 |
commit | a3c042a0f022dade8e02bf6c9be5d2379d7e133c (patch) | |
tree | 0f690d13444c7c77c90da64ea9fb2373a21f7a4f /include/asm-x86_64 | |
parent | 8d4f6b93a4aaa6b56b600cd1165c971f4395e4b3 (diff) |
[PATCH] x86_64: Calgary IOMMU - move valid_dma_direction into the callers
Based on Andi Kleen's comments on the original Calgary patch, move
valid_dma_direction into the calling functions.
Signed-off-by: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: Jon Mason <jdmason@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/dma-mapping.h | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index 498f66df36b9..b6da83dcc7a6 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h | |||
@@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address; | |||
55 | extern struct dma_mapping_ops* dma_ops; | 55 | extern struct dma_mapping_ops* dma_ops; |
56 | extern int iommu_merge; | 56 | extern int iommu_merge; |
57 | 57 | ||
58 | static inline int valid_dma_direction(int dma_direction) | ||
59 | { | ||
60 | return ((dma_direction == DMA_BIDIRECTIONAL) || | ||
61 | (dma_direction == DMA_TO_DEVICE) || | ||
62 | (dma_direction == DMA_FROM_DEVICE)); | ||
63 | } | ||
64 | |||
58 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 65 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
59 | { | 66 | { |
60 | if (dma_ops->mapping_error) | 67 | if (dma_ops->mapping_error) |
@@ -72,6 +79,7 @@ static inline dma_addr_t | |||
72 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 79 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
73 | int direction) | 80 | int direction) |
74 | { | 81 | { |
82 | BUG_ON(!valid_dma_direction(direction)); | ||
75 | return dma_ops->map_single(hwdev, ptr, size, direction); | 83 | return dma_ops->map_single(hwdev, ptr, size, direction); |
76 | } | 84 | } |
77 | 85 | ||
@@ -79,6 +87,7 @@ static inline void | |||
79 | dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, | 87 | dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, |
80 | int direction) | 88 | int direction) |
81 | { | 89 | { |
90 | BUG_ON(!valid_dma_direction(direction)); | ||
82 | dma_ops->unmap_single(dev, addr, size, direction); | 91 | dma_ops->unmap_single(dev, addr, size, direction); |
83 | } | 92 | } |
84 | 93 | ||
@@ -91,6 +100,7 @@ static inline void | |||
91 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 100 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
92 | size_t size, int direction) | 101 | size_t size, int direction) |
93 | { | 102 | { |
103 | BUG_ON(!valid_dma_direction(direction)); | ||
94 | if (dma_ops->sync_single_for_cpu) | 104 | if (dma_ops->sync_single_for_cpu) |
95 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 105 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, |
96 | direction); | 106 | direction); |
@@ -101,6 +111,7 @@ static inline void | |||
101 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 111 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
102 | size_t size, int direction) | 112 | size_t size, int direction) |
103 | { | 113 | { |
114 | BUG_ON(!valid_dma_direction(direction)); | ||
104 | if (dma_ops->sync_single_for_device) | 115 | if (dma_ops->sync_single_for_device) |
105 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 116 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, |
106 | direction); | 117 | direction); |
@@ -111,6 +122,7 @@ static inline void | |||
111 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 122 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
112 | unsigned long offset, size_t size, int direction) | 123 | unsigned long offset, size_t size, int direction) |
113 | { | 124 | { |
125 | BUG_ON(!valid_dma_direction(direction)); | ||
114 | if (dma_ops->sync_single_range_for_cpu) { | 126 | if (dma_ops->sync_single_range_for_cpu) { |
115 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); | 127 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); |
116 | } | 128 | } |
@@ -122,6 +134,7 @@ static inline void | |||
122 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 134 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
123 | unsigned long offset, size_t size, int direction) | 135 | unsigned long offset, size_t size, int direction) |
124 | { | 136 | { |
137 | BUG_ON(!valid_dma_direction(direction)); | ||
125 | if (dma_ops->sync_single_range_for_device) | 138 | if (dma_ops->sync_single_range_for_device) |
126 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | 139 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, |
127 | offset, size, direction); | 140 | offset, size, direction); |
@@ -133,6 +146,7 @@ static inline void | |||
133 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 146 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
134 | int nelems, int direction) | 147 | int nelems, int direction) |
135 | { | 148 | { |
149 | BUG_ON(!valid_dma_direction(direction)); | ||
136 | if (dma_ops->sync_sg_for_cpu) | 150 | if (dma_ops->sync_sg_for_cpu) |
137 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 151 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
138 | flush_write_buffers(); | 152 | flush_write_buffers(); |
@@ -142,6 +156,7 @@ static inline void | |||
142 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 156 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
143 | int nelems, int direction) | 157 | int nelems, int direction) |
144 | { | 158 | { |
159 | BUG_ON(!valid_dma_direction(direction)); | ||
145 | if (dma_ops->sync_sg_for_device) { | 160 | if (dma_ops->sync_sg_for_device) { |
146 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 161 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
147 | } | 162 | } |
@@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
152 | static inline int | 167 | static inline int |
153 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) | 168 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) |
154 | { | 169 | { |
170 | BUG_ON(!valid_dma_direction(direction)); | ||
155 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 171 | return dma_ops->map_sg(hwdev, sg, nents, direction); |
156 | } | 172 | } |
157 | 173 | ||
@@ -159,6 +175,7 @@ static inline void | |||
159 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 175 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
160 | int direction) | 176 | int direction) |
161 | { | 177 | { |
178 | BUG_ON(!valid_dma_direction(direction)); | ||
162 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 179 | dma_ops->unmap_sg(hwdev, sg, nents, direction); |
163 | } | 180 | } |
164 | 181 | ||