diff options
Diffstat (limited to 'drivers/message/i2o/memory.c')
-rw-r--r-- | drivers/message/i2o/memory.c | 313 |
1 files changed, 313 insertions, 0 deletions
diff --git a/drivers/message/i2o/memory.c b/drivers/message/i2o/memory.c new file mode 100644 index 000000000000..f5cc95c564e2 --- /dev/null +++ b/drivers/message/i2o/memory.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O memory | ||
3 | * | ||
4 | * Pulled from the inlines in i2o headers and uninlined | ||
5 | * | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/i2o.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include "core.h" | ||
19 | |||
20 | /* Protects our 32/64bit mask switching */ | ||
21 | static DEFINE_MUTEX(mem_lock); | ||
22 | |||
23 | /** | ||
24 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | ||
25 | * @c: I2O controller for which the calculation should be done | ||
26 | * @body_size: maximum body size used for message in 32-bit words. | ||
27 | * | ||
28 | * Return the maximum number of SG elements in a SG list. | ||
29 | */ | ||
30 | u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | ||
31 | { | ||
32 | i2o_status_block *sb = c->status_block.virt; | ||
33 | u16 sg_count = | ||
34 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | ||
35 | body_size; | ||
36 | |||
37 | if (c->pae_support) { | ||
38 | /* | ||
39 | * for 64-bit a SG attribute element must be added and each | ||
40 | * SG element needs 12 bytes instead of 8. | ||
41 | */ | ||
42 | sg_count -= 2; | ||
43 | sg_count /= 3; | ||
44 | } else | ||
45 | sg_count /= 2; | ||
46 | |||
47 | if (c->short_req && (sg_count > 8)) | ||
48 | sg_count = 8; | ||
49 | |||
50 | return sg_count; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(i2o_sg_tablesize); | ||
53 | |||
54 | |||
55 | /** | ||
56 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | ||
57 | * @c: I2O controller | ||
58 | * @ptr: pointer to the data which should be mapped | ||
59 | * @size: size of data in bytes | ||
60 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
61 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
62 | * | ||
63 | * This function does all necessary DMA handling and also writes the I2O | ||
64 | * SGL elements into the I2O message. For details on DMA handling see also | ||
65 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | ||
66 | * SG list if the allocation was successful. | ||
67 | * | ||
68 | * Returns DMA address which must be checked for failures using | ||
69 | * dma_mapping_error(). | ||
70 | */ | ||
71 | dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | ||
72 | size_t size, | ||
73 | enum dma_data_direction direction, | ||
74 | u32 ** sg_ptr) | ||
75 | { | ||
76 | u32 sg_flags; | ||
77 | u32 *mptr = *sg_ptr; | ||
78 | dma_addr_t dma_addr; | ||
79 | |||
80 | switch (direction) { | ||
81 | case DMA_TO_DEVICE: | ||
82 | sg_flags = 0xd4000000; | ||
83 | break; | ||
84 | case DMA_FROM_DEVICE: | ||
85 | sg_flags = 0xd0000000; | ||
86 | break; | ||
87 | default: | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | ||
92 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { | ||
93 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
94 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
95 | *mptr++ = cpu_to_le32(0x7C020002); | ||
96 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | *mptr++ = cpu_to_le32(sg_flags | size); | ||
101 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | ||
102 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
103 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
104 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | ||
105 | #endif | ||
106 | *sg_ptr = mptr; | ||
107 | } | ||
108 | return dma_addr; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(i2o_dma_map_single); | ||
111 | |||
112 | /** | ||
113 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | ||
114 | * @c: I2O controller | ||
115 | * @sg: SG list to be mapped | ||
116 | * @sg_count: number of elements in the SG list | ||
117 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
118 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
119 | * | ||
120 | * This function does all necessary DMA handling and also writes the I2O | ||
121 | * SGL elements into the I2O message. For details on DMA handling see also | ||
122 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | ||
123 | * list if the allocation was successful. | ||
124 | * | ||
125 | * Returns 0 on failure or 1 on success. | ||
126 | */ | ||
127 | int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, | ||
128 | int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) | ||
129 | { | ||
130 | u32 sg_flags; | ||
131 | u32 *mptr = *sg_ptr; | ||
132 | |||
133 | switch (direction) { | ||
134 | case DMA_TO_DEVICE: | ||
135 | sg_flags = 0x14000000; | ||
136 | break; | ||
137 | case DMA_FROM_DEVICE: | ||
138 | sg_flags = 0x10000000; | ||
139 | break; | ||
140 | default: | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | ||
145 | if (!sg_count) | ||
146 | return 0; | ||
147 | |||
148 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
149 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
150 | *mptr++ = cpu_to_le32(0x7C020002); | ||
151 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
152 | } | ||
153 | #endif | ||
154 | |||
155 | while (sg_count-- > 0) { | ||
156 | if (!sg_count) | ||
157 | sg_flags |= 0xC0000000; | ||
158 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | ||
159 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | ||
160 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
161 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
162 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | ||
163 | #endif | ||
164 | sg = sg_next(sg); | ||
165 | } | ||
166 | *sg_ptr = mptr; | ||
167 | |||
168 | return 1; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(i2o_dma_map_sg); | ||
171 | |||
172 | /** | ||
173 | * i2o_dma_alloc - Allocate DMA memory | ||
174 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
175 | * @addr: i2o_dma struct which should get the DMA buffer | ||
176 | * @len: length of the new DMA memory | ||
177 | * | ||
178 | * Allocate a coherent DMA memory and write the pointers into addr. | ||
179 | * | ||
180 | * Returns 0 on success or -ENOMEM on failure. | ||
181 | */ | ||
182 | int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
183 | { | ||
184 | struct pci_dev *pdev = to_pci_dev(dev); | ||
185 | int dma_64 = 0; | ||
186 | |||
187 | mutex_lock(&mem_lock); | ||
188 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { | ||
189 | dma_64 = 1; | ||
190 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
191 | mutex_unlock(&mem_lock); | ||
192 | return -ENOMEM; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); | ||
197 | |||
198 | if ((sizeof(dma_addr_t) > 4) && dma_64) | ||
199 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
200 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | ||
201 | mutex_unlock(&mem_lock); | ||
202 | |||
203 | if (!addr->virt) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | memset(addr->virt, 0, len); | ||
207 | addr->len = len; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(i2o_dma_alloc); | ||
212 | |||
213 | |||
214 | /** | ||
215 | * i2o_dma_free - Free DMA memory | ||
216 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
217 | * @addr: i2o_dma struct which contains the DMA buffer | ||
218 | * | ||
219 | * Free a coherent DMA memory and set virtual address of addr to NULL. | ||
220 | */ | ||
221 | void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | ||
222 | { | ||
223 | if (addr->virt) { | ||
224 | if (addr->phys) | ||
225 | dma_free_coherent(dev, addr->len, addr->virt, | ||
226 | addr->phys); | ||
227 | else | ||
228 | kfree(addr->virt); | ||
229 | addr->virt = NULL; | ||
230 | } | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(i2o_dma_free); | ||
233 | |||
234 | |||
235 | /** | ||
236 | * i2o_dma_realloc - Realloc DMA memory | ||
237 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
238 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
239 | * @len: new length of memory | ||
240 | * | ||
241 | * If there was something allocated in the addr, free it first. If len > 0 | ||
242 | * than try to allocate it and write the addresses back to the addr | ||
243 | * structure. If len == 0 set the virtual address to NULL. | ||
244 | * | ||
245 | * Returns the 0 on success or negative error code on failure. | ||
246 | */ | ||
247 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
248 | { | ||
249 | i2o_dma_free(dev, addr); | ||
250 | |||
251 | if (len) | ||
252 | return i2o_dma_alloc(dev, addr, len); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | EXPORT_SYMBOL_GPL(i2o_dma_realloc); | ||
257 | |||
258 | /* | ||
259 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
260 | * @mempool: pointer to struct i2o_pool to write data into. | ||
261 | * @name: name which is used to identify cache | ||
262 | * @size: size of each object | ||
263 | * @min_nr: minimum number of objects | ||
264 | * | ||
265 | * First allocates a slab cache with name and size. Then allocates a | ||
266 | * mempool which uses the slab cache for allocation and freeing. | ||
267 | * | ||
268 | * Returns 0 on success or negative error code on failure. | ||
269 | */ | ||
270 | int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
271 | size_t size, int min_nr) | ||
272 | { | ||
273 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
274 | if (!pool->name) | ||
275 | goto exit; | ||
276 | strcpy(pool->name, name); | ||
277 | |||
278 | pool->slab = | ||
279 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
280 | if (!pool->slab) | ||
281 | goto free_name; | ||
282 | |||
283 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | ||
284 | if (!pool->mempool) | ||
285 | goto free_slab; | ||
286 | |||
287 | return 0; | ||
288 | |||
289 | free_slab: | ||
290 | kmem_cache_destroy(pool->slab); | ||
291 | |||
292 | free_name: | ||
293 | kfree(pool->name); | ||
294 | |||
295 | exit: | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(i2o_pool_alloc); | ||
299 | |||
300 | /* | ||
301 | * i2o_pool_free - Free slab cache and mempool again | ||
302 | * @mempool: pointer to struct i2o_pool which should be freed | ||
303 | * | ||
304 | * Note that you have to return all objects to the mempool again before | ||
305 | * calling i2o_pool_free(). | ||
306 | */ | ||
307 | void i2o_pool_free(struct i2o_pool *pool) | ||
308 | { | ||
309 | mempool_destroy(pool->mempool); | ||
310 | kmem_cache_destroy(pool->slab); | ||
311 | kfree(pool->name); | ||
312 | }; | ||
313 | EXPORT_SYMBOL_GPL(i2o_pool_free); | ||