diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2006-09-22 18:22:48 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:48 -0400 |
commit | f94b533d091a42da92d908eb7b3f9ade1923f90d (patch) | |
tree | e8deed557c293bdb5eeaf8ca87ddda69e1cf3586 /drivers/infiniband/hw/amso1100/c2_mm.c | |
parent | 07ebafbaaa72aa6a35472879008f5a1d1d469a0c (diff) |
RDMA/amso1100: Add driver for Ammasso 1100 RNIC
Add a driver for the Ammasso 1100 gigabit ethernet RNIC.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/amso1100/c2_mm.c')
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_mm.c | 375 |
1 files changed, 375 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c new file mode 100644 index 000000000000..1e4f46493fcb --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mm.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include "c2.h" | ||
34 | #include "c2_vq.h" | ||
35 | |||
36 | #define PBL_VIRT 1 | ||
37 | #define PBL_PHYS 2 | ||
38 | |||
39 | /* | ||
40 | * Send all the PBL messages to convey the remainder of the PBL | ||
41 | * Wait for the adapter's reply on the last one. | ||
42 | * This is indicated by setting the MEM_PBL_COMPLETE in the flags. | ||
43 | * | ||
44 | * NOTE: vq_req is _not_ freed by this function. The VQ Host | ||
45 | * Reply buffer _is_ freed by this function. | ||
46 | */ | ||
47 | static int | ||
48 | send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, | ||
49 | unsigned long va, u32 pbl_depth, | ||
50 | struct c2_vq_req *vq_req, int pbl_type) | ||
51 | { | ||
52 | u32 pbe_count; /* amt that fits in a PBL msg */ | ||
53 | u32 count; /* amt in this PBL MSG. */ | ||
54 | struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ | ||
55 | struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ | ||
56 | int err, pbl_virt, pbl_index, i; | ||
57 | |||
58 | switch (pbl_type) { | ||
59 | case PBL_VIRT: | ||
60 | pbl_virt = 1; | ||
61 | break; | ||
62 | case PBL_PHYS: | ||
63 | pbl_virt = 0; | ||
64 | break; | ||
65 | default: | ||
66 | return -EINVAL; | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | pbe_count = (c2dev->req_vq.msg_size - | ||
71 | sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); | ||
72 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
73 | if (!wr) { | ||
74 | return -ENOMEM; | ||
75 | } | ||
76 | c2_wr_set_id(wr, CCWR_NSMR_PBL); | ||
77 | |||
78 | /* | ||
79 | * Only the last PBL message will generate a reply from the verbs, | ||
80 | * so we set the context to 0 indicating there is no kernel verbs | ||
81 | * handler blocked awaiting this reply. | ||
82 | */ | ||
83 | wr->hdr.context = 0; | ||
84 | wr->rnic_handle = c2dev->adapter_handle; | ||
85 | wr->stag_index = stag_index; /* already swapped */ | ||
86 | wr->flags = 0; | ||
87 | pbl_index = 0; | ||
88 | while (pbl_depth) { | ||
89 | count = min(pbe_count, pbl_depth); | ||
90 | wr->addrs_length = cpu_to_be32(count); | ||
91 | |||
92 | /* | ||
93 | * If this is the last message, then reference the | ||
94 | * vq request struct cuz we're gonna wait for a reply. | ||
95 | * also make this PBL msg as the last one. | ||
96 | */ | ||
97 | if (count == pbl_depth) { | ||
98 | /* | ||
99 | * reference the request struct. dereferenced in the | ||
100 | * int handler. | ||
101 | */ | ||
102 | vq_req_get(c2dev, vq_req); | ||
103 | wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); | ||
104 | |||
105 | /* | ||
106 | * This is the last PBL message. | ||
107 | * Set the context to our VQ Request Object so we can | ||
108 | * wait for the reply. | ||
109 | */ | ||
110 | wr->hdr.context = (unsigned long) vq_req; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * If pbl_virt is set then va is a virtual address | ||
115 | * that describes a virtually contiguous memory | ||
116 | * allocation. The wr needs the start of each virtual page | ||
117 | * to be converted to the corresponding physical address | ||
118 | * of the page. If pbl_virt is not set then va is an array | ||
119 | * of physical addresses and there is no conversion to do. | ||
120 | * Just fill in the wr with what is in the array. | ||
121 | */ | ||
122 | for (i = 0; i < count; i++) { | ||
123 | if (pbl_virt) { | ||
124 | va += PAGE_SIZE; | ||
125 | } else { | ||
126 | wr->paddrs[i] = | ||
127 | cpu_to_be64(((u64 *)va)[pbl_index + i]); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Send WR to adapter | ||
133 | */ | ||
134 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
135 | if (err) { | ||
136 | if (count <= pbe_count) { | ||
137 | vq_req_put(c2dev, vq_req); | ||
138 | } | ||
139 | goto bail0; | ||
140 | } | ||
141 | pbl_depth -= count; | ||
142 | pbl_index += count; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Now wait for the reply... | ||
147 | */ | ||
148 | err = vq_wait_for_reply(c2dev, vq_req); | ||
149 | if (err) { | ||
150 | goto bail0; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Process reply | ||
155 | */ | ||
156 | reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; | ||
157 | if (!reply) { | ||
158 | err = -ENOMEM; | ||
159 | goto bail0; | ||
160 | } | ||
161 | |||
162 | err = c2_errno(reply); | ||
163 | |||
164 | vq_repbuf_free(c2dev, reply); | ||
165 | bail0: | ||
166 | kfree(wr); | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | #define C2_PBL_MAX_DEPTH 131072 | ||
171 | int | ||
172 | c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, | ||
173 | int page_size, int pbl_depth, u32 length, | ||
174 | u32 offset, u64 *va, enum c2_acf acf, | ||
175 | struct c2_mr *mr) | ||
176 | { | ||
177 | struct c2_vq_req *vq_req; | ||
178 | struct c2wr_nsmr_register_req *wr; | ||
179 | struct c2wr_nsmr_register_rep *reply; | ||
180 | u16 flags; | ||
181 | int i, pbe_count, count; | ||
182 | int err; | ||
183 | |||
184 | if (!va || !length || !addr_list || !pbl_depth) | ||
185 | return -EINTR; | ||
186 | |||
187 | /* | ||
188 | * Verify PBL depth is within rnic max | ||
189 | */ | ||
190 | if (pbl_depth > C2_PBL_MAX_DEPTH) { | ||
191 | return -EINTR; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * allocate verbs request object | ||
196 | */ | ||
197 | vq_req = vq_req_alloc(c2dev); | ||
198 | if (!vq_req) | ||
199 | return -ENOMEM; | ||
200 | |||
201 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
202 | if (!wr) { | ||
203 | err = -ENOMEM; | ||
204 | goto bail0; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * build the WR | ||
209 | */ | ||
210 | c2_wr_set_id(wr, CCWR_NSMR_REGISTER); | ||
211 | wr->hdr.context = (unsigned long) vq_req; | ||
212 | wr->rnic_handle = c2dev->adapter_handle; | ||
213 | |||
214 | flags = (acf | MEM_VA_BASED | MEM_REMOTE); | ||
215 | |||
216 | /* | ||
217 | * compute how many pbes can fit in the message | ||
218 | */ | ||
219 | pbe_count = (c2dev->req_vq.msg_size - | ||
220 | sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); | ||
221 | |||
222 | if (pbl_depth <= pbe_count) { | ||
223 | flags |= MEM_PBL_COMPLETE; | ||
224 | } | ||
225 | wr->flags = cpu_to_be16(flags); | ||
226 | wr->stag_key = 0; //stag_key; | ||
227 | wr->va = cpu_to_be64(*va); | ||
228 | wr->pd_id = mr->pd->pd_id; | ||
229 | wr->pbe_size = cpu_to_be32(page_size); | ||
230 | wr->length = cpu_to_be32(length); | ||
231 | wr->pbl_depth = cpu_to_be32(pbl_depth); | ||
232 | wr->fbo = cpu_to_be32(offset); | ||
233 | count = min(pbl_depth, pbe_count); | ||
234 | wr->addrs_length = cpu_to_be32(count); | ||
235 | |||
236 | /* | ||
237 | * fill out the PBL for this message | ||
238 | */ | ||
239 | for (i = 0; i < count; i++) { | ||
240 | wr->paddrs[i] = cpu_to_be64(addr_list[i]); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * regerence the request struct | ||
245 | */ | ||
246 | vq_req_get(c2dev, vq_req); | ||
247 | |||
248 | /* | ||
249 | * send the WR to the adapter | ||
250 | */ | ||
251 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
252 | if (err) { | ||
253 | vq_req_put(c2dev, vq_req); | ||
254 | goto bail1; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * wait for reply from adapter | ||
259 | */ | ||
260 | err = vq_wait_for_reply(c2dev, vq_req); | ||
261 | if (err) { | ||
262 | goto bail1; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * process reply | ||
267 | */ | ||
268 | reply = | ||
269 | (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); | ||
270 | if (!reply) { | ||
271 | err = -ENOMEM; | ||
272 | goto bail1; | ||
273 | } | ||
274 | if ((err = c2_errno(reply))) { | ||
275 | goto bail2; | ||
276 | } | ||
277 | //*p_pb_entries = be32_to_cpu(reply->pbl_depth); | ||
278 | mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); | ||
279 | vq_repbuf_free(c2dev, reply); | ||
280 | |||
281 | /* | ||
282 | * if there are still more PBEs we need to send them to | ||
283 | * the adapter and wait for a reply on the final one. | ||
284 | * reuse vq_req for this purpose. | ||
285 | */ | ||
286 | pbl_depth -= count; | ||
287 | if (pbl_depth) { | ||
288 | |||
289 | vq_req->reply_msg = (unsigned long) NULL; | ||
290 | atomic_set(&vq_req->reply_ready, 0); | ||
291 | err = send_pbl_messages(c2dev, | ||
292 | cpu_to_be32(mr->ibmr.lkey), | ||
293 | (unsigned long) &addr_list[i], | ||
294 | pbl_depth, vq_req, PBL_PHYS); | ||
295 | if (err) { | ||
296 | goto bail1; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | vq_req_free(c2dev, vq_req); | ||
301 | kfree(wr); | ||
302 | |||
303 | return err; | ||
304 | |||
305 | bail2: | ||
306 | vq_repbuf_free(c2dev, reply); | ||
307 | bail1: | ||
308 | kfree(wr); | ||
309 | bail0: | ||
310 | vq_req_free(c2dev, vq_req); | ||
311 | return err; | ||
312 | } | ||
313 | |||
314 | int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) | ||
315 | { | ||
316 | struct c2_vq_req *vq_req; /* verbs request object */ | ||
317 | struct c2wr_stag_dealloc_req wr; /* work request */ | ||
318 | struct c2wr_stag_dealloc_rep *reply; /* WR reply */ | ||
319 | int err; | ||
320 | |||
321 | |||
322 | /* | ||
323 | * allocate verbs request object | ||
324 | */ | ||
325 | vq_req = vq_req_alloc(c2dev); | ||
326 | if (!vq_req) { | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Build the WR | ||
332 | */ | ||
333 | c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); | ||
334 | wr.hdr.context = (u64) (unsigned long) vq_req; | ||
335 | wr.rnic_handle = c2dev->adapter_handle; | ||
336 | wr.stag_index = cpu_to_be32(stag_index); | ||
337 | |||
338 | /* | ||
339 | * reference the request struct. dereferenced in the int handler. | ||
340 | */ | ||
341 | vq_req_get(c2dev, vq_req); | ||
342 | |||
343 | /* | ||
344 | * Send WR to adapter | ||
345 | */ | ||
346 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
347 | if (err) { | ||
348 | vq_req_put(c2dev, vq_req); | ||
349 | goto bail0; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Wait for reply from adapter | ||
354 | */ | ||
355 | err = vq_wait_for_reply(c2dev, vq_req); | ||
356 | if (err) { | ||
357 | goto bail0; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Process reply | ||
362 | */ | ||
363 | reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; | ||
364 | if (!reply) { | ||
365 | err = -ENOMEM; | ||
366 | goto bail0; | ||
367 | } | ||
368 | |||
369 | err = c2_errno(reply); | ||
370 | |||
371 | vq_repbuf_free(c2dev, reply); | ||
372 | bail0: | ||
373 | vq_req_free(c2dev, vq_req); | ||
374 | return err; | ||
375 | } | ||