diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-03-29 18:23:36 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-31 16:14:20 -0500 |
commit | cef1cce5c87d84f76e44f0e7b4de72ab3818ac3a (patch) | |
tree | c9cb13413cae9dd636a699e3ec7d41882fd8e514 /drivers/infiniband | |
parent | 97f9efbc47f0b1bc88abac8724b505f0794a48d0 (diff) |
IB/ipath: misc infiniband code, part 1
Completion queues, local and remote memory keys, and memory region
support.
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_cq.c | 295 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_keys.c | 236 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mr.c | 383 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_srq.c | 273 |
4 files changed, 1187 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c new file mode 100644 index 000000000000..7ece1135ddfe --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/err.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | |||
36 | #include "ipath_verbs.h" | ||
37 | |||
38 | /** | ||
39 | * ipath_cq_enter - add a new entry to the completion queue | ||
40 | * @cq: completion queue | ||
41 | * @entry: work completion entry to add | ||
42 | * @sig: true if @entry is a solicitated entry | ||
43 | * | ||
44 | * This may be called with one of the qp->s_lock or qp->r_rq.lock held. | ||
45 | */ | ||
46 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | u32 next; | ||
50 | |||
51 | spin_lock_irqsave(&cq->lock, flags); | ||
52 | |||
53 | if (cq->head == cq->ibcq.cqe) | ||
54 | next = 0; | ||
55 | else | ||
56 | next = cq->head + 1; | ||
57 | if (unlikely(next == cq->tail)) { | ||
58 | spin_unlock_irqrestore(&cq->lock, flags); | ||
59 | if (cq->ibcq.event_handler) { | ||
60 | struct ib_event ev; | ||
61 | |||
62 | ev.device = cq->ibcq.device; | ||
63 | ev.element.cq = &cq->ibcq; | ||
64 | ev.event = IB_EVENT_CQ_ERR; | ||
65 | cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); | ||
66 | } | ||
67 | return; | ||
68 | } | ||
69 | cq->queue[cq->head] = *entry; | ||
70 | cq->head = next; | ||
71 | |||
72 | if (cq->notify == IB_CQ_NEXT_COMP || | ||
73 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | ||
74 | cq->notify = IB_CQ_NONE; | ||
75 | cq->triggered++; | ||
76 | /* | ||
77 | * This will cause send_complete() to be called in | ||
78 | * another thread. | ||
79 | */ | ||
80 | tasklet_hi_schedule(&cq->comptask); | ||
81 | } | ||
82 | |||
83 | spin_unlock_irqrestore(&cq->lock, flags); | ||
84 | |||
85 | if (entry->status != IB_WC_SUCCESS) | ||
86 | to_idev(cq->ibcq.device)->n_wqe_errs++; | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * ipath_poll_cq - poll for work completion entries | ||
91 | * @ibcq: the completion queue to poll | ||
92 | * @num_entries: the maximum number of entries to return | ||
93 | * @entry: pointer to array where work completions are placed | ||
94 | * | ||
95 | * Returns the number of completion entries polled. | ||
96 | * | ||
97 | * This may be called from interrupt context. Also called by ib_poll_cq() | ||
98 | * in the generic verbs code. | ||
99 | */ | ||
100 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | ||
101 | { | ||
102 | struct ipath_cq *cq = to_icq(ibcq); | ||
103 | unsigned long flags; | ||
104 | int npolled; | ||
105 | |||
106 | spin_lock_irqsave(&cq->lock, flags); | ||
107 | |||
108 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | ||
109 | if (cq->tail == cq->head) | ||
110 | break; | ||
111 | *entry = cq->queue[cq->tail]; | ||
112 | if (cq->tail == cq->ibcq.cqe) | ||
113 | cq->tail = 0; | ||
114 | else | ||
115 | cq->tail++; | ||
116 | } | ||
117 | |||
118 | spin_unlock_irqrestore(&cq->lock, flags); | ||
119 | |||
120 | return npolled; | ||
121 | } | ||
122 | |||
123 | static void send_complete(unsigned long data) | ||
124 | { | ||
125 | struct ipath_cq *cq = (struct ipath_cq *)data; | ||
126 | |||
127 | /* | ||
128 | * The completion handler will most likely rearm the notification | ||
129 | * and poll for all pending entries. If a new completion entry | ||
130 | * is added while we are in this routine, tasklet_hi_schedule() | ||
131 | * won't call us again until we return so we check triggered to | ||
132 | * see if we need to call the handler again. | ||
133 | */ | ||
134 | for (;;) { | ||
135 | u8 triggered = cq->triggered; | ||
136 | |||
137 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | ||
138 | |||
139 | if (cq->triggered == triggered) | ||
140 | return; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * ipath_create_cq - create a completion queue | ||
146 | * @ibdev: the device this completion queue is attached to | ||
147 | * @entries: the minimum size of the completion queue | ||
148 | * @context: unused by the InfiniPath driver | ||
149 | * @udata: unused by the InfiniPath driver | ||
150 | * | ||
151 | * Returns a pointer to the completion queue or negative errno values | ||
152 | * for failure. | ||
153 | * | ||
154 | * Called by ib_create_cq() in the generic verbs code. | ||
155 | */ | ||
156 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | ||
157 | struct ib_ucontext *context, | ||
158 | struct ib_udata *udata) | ||
159 | { | ||
160 | struct ipath_cq *cq; | ||
161 | struct ib_wc *wc; | ||
162 | struct ib_cq *ret; | ||
163 | |||
164 | /* | ||
165 | * Need to use vmalloc() if we want to support large #s of | ||
166 | * entries. | ||
167 | */ | ||
168 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | ||
169 | if (!cq) { | ||
170 | ret = ERR_PTR(-ENOMEM); | ||
171 | goto bail; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Need to use vmalloc() if we want to support large #s of entries. | ||
176 | */ | ||
177 | wc = vmalloc(sizeof(*wc) * (entries + 1)); | ||
178 | if (!wc) { | ||
179 | kfree(cq); | ||
180 | ret = ERR_PTR(-ENOMEM); | ||
181 | goto bail; | ||
182 | } | ||
183 | /* | ||
184 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. | ||
185 | * The number of entries should be >= the number requested or return | ||
186 | * an error. | ||
187 | */ | ||
188 | cq->ibcq.cqe = entries; | ||
189 | cq->notify = IB_CQ_NONE; | ||
190 | cq->triggered = 0; | ||
191 | spin_lock_init(&cq->lock); | ||
192 | tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); | ||
193 | cq->head = 0; | ||
194 | cq->tail = 0; | ||
195 | cq->queue = wc; | ||
196 | |||
197 | ret = &cq->ibcq; | ||
198 | |||
199 | bail: | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * ipath_destroy_cq - destroy a completion queue | ||
205 | * @ibcq: the completion queue to destroy. | ||
206 | * | ||
207 | * Returns 0 for success. | ||
208 | * | ||
209 | * Called by ib_destroy_cq() in the generic verbs code. | ||
210 | */ | ||
211 | int ipath_destroy_cq(struct ib_cq *ibcq) | ||
212 | { | ||
213 | struct ipath_cq *cq = to_icq(ibcq); | ||
214 | |||
215 | tasklet_kill(&cq->comptask); | ||
216 | vfree(cq->queue); | ||
217 | kfree(cq); | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * ipath_req_notify_cq - change the notification type for a completion queue | ||
224 | * @ibcq: the completion queue | ||
225 | * @notify: the type of notification to request | ||
226 | * | ||
227 | * Returns 0 for success. | ||
228 | * | ||
229 | * This may be called from interrupt context. Also called by | ||
230 | * ib_req_notify_cq() in the generic verbs code. | ||
231 | */ | ||
232 | int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | ||
233 | { | ||
234 | struct ipath_cq *cq = to_icq(ibcq); | ||
235 | unsigned long flags; | ||
236 | |||
237 | spin_lock_irqsave(&cq->lock, flags); | ||
238 | /* | ||
239 | * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow | ||
240 | * any other transitions. | ||
241 | */ | ||
242 | if (cq->notify != IB_CQ_NEXT_COMP) | ||
243 | cq->notify = notify; | ||
244 | spin_unlock_irqrestore(&cq->lock, flags); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | ||
249 | { | ||
250 | struct ipath_cq *cq = to_icq(ibcq); | ||
251 | struct ib_wc *wc, *old_wc; | ||
252 | u32 n; | ||
253 | int ret; | ||
254 | |||
255 | /* | ||
256 | * Need to use vmalloc() if we want to support large #s of entries. | ||
257 | */ | ||
258 | wc = vmalloc(sizeof(*wc) * (cqe + 1)); | ||
259 | if (!wc) { | ||
260 | ret = -ENOMEM; | ||
261 | goto bail; | ||
262 | } | ||
263 | |||
264 | spin_lock_irq(&cq->lock); | ||
265 | if (cq->head < cq->tail) | ||
266 | n = cq->ibcq.cqe + 1 + cq->head - cq->tail; | ||
267 | else | ||
268 | n = cq->head - cq->tail; | ||
269 | if (unlikely((u32)cqe < n)) { | ||
270 | spin_unlock_irq(&cq->lock); | ||
271 | vfree(wc); | ||
272 | ret = -EOVERFLOW; | ||
273 | goto bail; | ||
274 | } | ||
275 | for (n = 0; cq->tail != cq->head; n++) { | ||
276 | wc[n] = cq->queue[cq->tail]; | ||
277 | if (cq->tail == cq->ibcq.cqe) | ||
278 | cq->tail = 0; | ||
279 | else | ||
280 | cq->tail++; | ||
281 | } | ||
282 | cq->ibcq.cqe = cqe; | ||
283 | cq->head = n; | ||
284 | cq->tail = 0; | ||
285 | old_wc = cq->queue; | ||
286 | cq->queue = wc; | ||
287 | spin_unlock_irq(&cq->lock); | ||
288 | |||
289 | vfree(old_wc); | ||
290 | |||
291 | ret = 0; | ||
292 | |||
293 | bail: | ||
294 | return ret; | ||
295 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c new file mode 100644 index 000000000000..aa33b0e9f2f6 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | |||
35 | #include "ipath_verbs.h" | ||
36 | |||
37 | /** | ||
38 | * ipath_alloc_lkey - allocate an lkey | ||
39 | * @rkt: lkey table in which to allocate the lkey | ||
40 | * @mr: memory region that this lkey protects | ||
41 | * | ||
42 | * Returns 1 if successful, otherwise returns 0. | ||
43 | */ | ||
44 | |||
45 | int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) | ||
46 | { | ||
47 | unsigned long flags; | ||
48 | u32 r; | ||
49 | u32 n; | ||
50 | int ret; | ||
51 | |||
52 | spin_lock_irqsave(&rkt->lock, flags); | ||
53 | |||
54 | /* Find the next available LKEY */ | ||
55 | r = n = rkt->next; | ||
56 | for (;;) { | ||
57 | if (rkt->table[r] == NULL) | ||
58 | break; | ||
59 | r = (r + 1) & (rkt->max - 1); | ||
60 | if (r == n) { | ||
61 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
62 | _VERBS_INFO("LKEY table full\n"); | ||
63 | ret = 0; | ||
64 | goto bail; | ||
65 | } | ||
66 | } | ||
67 | rkt->next = (r + 1) & (rkt->max - 1); | ||
68 | /* | ||
69 | * Make sure lkey is never zero which is reserved to indicate an | ||
70 | * unrestricted LKEY. | ||
71 | */ | ||
72 | rkt->gen++; | ||
73 | mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) | | ||
74 | ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen) | ||
75 | << 8); | ||
76 | if (mr->lkey == 0) { | ||
77 | mr->lkey |= 1 << 8; | ||
78 | rkt->gen++; | ||
79 | } | ||
80 | rkt->table[r] = mr; | ||
81 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
82 | |||
83 | ret = 1; | ||
84 | |||
85 | bail: | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * ipath_free_lkey - free an lkey | ||
91 | * @rkt: table from which to free the lkey | ||
92 | * @lkey: lkey id to free | ||
93 | */ | ||
94 | void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | u32 r; | ||
98 | |||
99 | if (lkey == 0) | ||
100 | return; | ||
101 | r = lkey >> (32 - ib_ipath_lkey_table_size); | ||
102 | spin_lock_irqsave(&rkt->lock, flags); | ||
103 | rkt->table[r] = NULL; | ||
104 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * ipath_lkey_ok - check IB SGE for validity and initialize | ||
109 | * @rkt: table containing lkey to check SGE against | ||
110 | * @isge: outgoing internal SGE | ||
111 | * @sge: SGE to check | ||
112 | * @acc: access flags | ||
113 | * | ||
114 | * Return 1 if valid and successful, otherwise returns 0. | ||
115 | * | ||
116 | * Check the IB SGE for validity and initialize our internal version | ||
117 | * of it. | ||
118 | */ | ||
119 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | ||
120 | struct ib_sge *sge, int acc) | ||
121 | { | ||
122 | struct ipath_mregion *mr; | ||
123 | size_t off; | ||
124 | int ret; | ||
125 | |||
126 | /* | ||
127 | * We use LKEY == zero to mean a physical kmalloc() address. | ||
128 | * This is a bit of a hack since we rely on dma_map_single() | ||
129 | * being reversible by calling bus_to_virt(). | ||
130 | */ | ||
131 | if (sge->lkey == 0) { | ||
132 | isge->mr = NULL; | ||
133 | isge->vaddr = bus_to_virt(sge->addr); | ||
134 | isge->length = sge->length; | ||
135 | isge->sge_length = sge->length; | ||
136 | ret = 1; | ||
137 | goto bail; | ||
138 | } | ||
139 | spin_lock(&rkt->lock); | ||
140 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; | ||
141 | spin_unlock(&rkt->lock); | ||
142 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { | ||
143 | ret = 0; | ||
144 | goto bail; | ||
145 | } | ||
146 | |||
147 | off = sge->addr - mr->user_base; | ||
148 | if (unlikely(sge->addr < mr->user_base || | ||
149 | off + sge->length > mr->length || | ||
150 | (mr->access_flags & acc) != acc)) { | ||
151 | ret = 0; | ||
152 | goto bail; | ||
153 | } | ||
154 | |||
155 | off += mr->offset; | ||
156 | isge->mr = mr; | ||
157 | isge->m = 0; | ||
158 | isge->n = 0; | ||
159 | while (off >= mr->map[isge->m]->segs[isge->n].length) { | ||
160 | off -= mr->map[isge->m]->segs[isge->n].length; | ||
161 | isge->n++; | ||
162 | if (isge->n >= IPATH_SEGSZ) { | ||
163 | isge->m++; | ||
164 | isge->n = 0; | ||
165 | } | ||
166 | } | ||
167 | isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off; | ||
168 | isge->length = mr->map[isge->m]->segs[isge->n].length - off; | ||
169 | isge->sge_length = sge->length; | ||
170 | |||
171 | ret = 1; | ||
172 | |||
173 | bail: | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * ipath_rkey_ok - check the IB virtual address, length, and RKEY | ||
179 | * @dev: infiniband device | ||
180 | * @ss: SGE state | ||
181 | * @len: length of data | ||
182 | * @vaddr: virtual address to place data | ||
183 | * @rkey: rkey to check | ||
184 | * @acc: access flags | ||
185 | * | ||
186 | * Return 1 if successful, otherwise 0. | ||
187 | * | ||
188 | * The QP r_rq.lock should be held. | ||
189 | */ | ||
190 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | ||
191 | u32 len, u64 vaddr, u32 rkey, int acc) | ||
192 | { | ||
193 | struct ipath_lkey_table *rkt = &dev->lk_table; | ||
194 | struct ipath_sge *sge = &ss->sge; | ||
195 | struct ipath_mregion *mr; | ||
196 | size_t off; | ||
197 | int ret; | ||
198 | |||
199 | spin_lock(&rkt->lock); | ||
200 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; | ||
201 | spin_unlock(&rkt->lock); | ||
202 | if (unlikely(mr == NULL || mr->lkey != rkey)) { | ||
203 | ret = 0; | ||
204 | goto bail; | ||
205 | } | ||
206 | |||
207 | off = vaddr - mr->iova; | ||
208 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | ||
209 | (mr->access_flags & acc) == 0)) { | ||
210 | ret = 0; | ||
211 | goto bail; | ||
212 | } | ||
213 | |||
214 | off += mr->offset; | ||
215 | sge->mr = mr; | ||
216 | sge->m = 0; | ||
217 | sge->n = 0; | ||
218 | while (off >= mr->map[sge->m]->segs[sge->n].length) { | ||
219 | off -= mr->map[sge->m]->segs[sge->n].length; | ||
220 | sge->n++; | ||
221 | if (sge->n >= IPATH_SEGSZ) { | ||
222 | sge->m++; | ||
223 | sge->n = 0; | ||
224 | } | ||
225 | } | ||
226 | sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off; | ||
227 | sge->length = mr->map[sge->m]->segs[sge->n].length - off; | ||
228 | sge->sge_length = len; | ||
229 | ss->sg_list = NULL; | ||
230 | ss->num_sge = 1; | ||
231 | |||
232 | ret = 1; | ||
233 | |||
234 | bail: | ||
235 | return ret; | ||
236 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c new file mode 100644 index 000000000000..69ffec66d45d --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -0,0 +1,383 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <rdma/ib_pack.h> | ||
34 | #include <rdma/ib_smi.h> | ||
35 | |||
36 | #include "ipath_verbs.h" | ||
37 | |||
38 | /** | ||
39 | * ipath_get_dma_mr - get a DMA memory region | ||
40 | * @pd: protection domain for this memory region | ||
41 | * @acc: access flags | ||
42 | * | ||
43 | * Returns the memory region on success, otherwise returns an errno. | ||
44 | */ | ||
45 | struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) | ||
46 | { | ||
47 | struct ipath_mr *mr; | ||
48 | struct ib_mr *ret; | ||
49 | |||
50 | mr = kzalloc(sizeof *mr, GFP_KERNEL); | ||
51 | if (!mr) { | ||
52 | ret = ERR_PTR(-ENOMEM); | ||
53 | goto bail; | ||
54 | } | ||
55 | |||
56 | mr->mr.access_flags = acc; | ||
57 | ret = &mr->ibmr; | ||
58 | |||
59 | bail: | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static struct ipath_mr *alloc_mr(int count, | ||
64 | struct ipath_lkey_table *lk_table) | ||
65 | { | ||
66 | struct ipath_mr *mr; | ||
67 | int m, i = 0; | ||
68 | |||
69 | /* Allocate struct plus pointers to first level page tables. */ | ||
70 | m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | ||
71 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | ||
72 | if (!mr) | ||
73 | goto done; | ||
74 | |||
75 | /* Allocate first level page tables. */ | ||
76 | for (; i < m; i++) { | ||
77 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | ||
78 | if (!mr->mr.map[i]) | ||
79 | goto bail; | ||
80 | } | ||
81 | mr->mr.mapsz = m; | ||
82 | |||
83 | /* | ||
84 | * ib_reg_phys_mr() will initialize mr->ibmr except for | ||
85 | * lkey and rkey. | ||
86 | */ | ||
87 | if (!ipath_alloc_lkey(lk_table, &mr->mr)) | ||
88 | goto bail; | ||
89 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey; | ||
90 | |||
91 | goto done; | ||
92 | |||
93 | bail: | ||
94 | while (i) { | ||
95 | i--; | ||
96 | kfree(mr->mr.map[i]); | ||
97 | } | ||
98 | kfree(mr); | ||
99 | mr = NULL; | ||
100 | |||
101 | done: | ||
102 | return mr; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * ipath_reg_phys_mr - register a physical memory region | ||
107 | * @pd: protection domain for this memory region | ||
108 | * @buffer_list: pointer to the list of physical buffers to register | ||
109 | * @num_phys_buf: the number of physical buffers to register | ||
110 | * @iova_start: the starting address passed over IB which maps to this MR | ||
111 | * | ||
112 | * Returns the memory region on success, otherwise returns an errno. | ||
113 | */ | ||
114 | struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | ||
115 | struct ib_phys_buf *buffer_list, | ||
116 | int num_phys_buf, int acc, u64 *iova_start) | ||
117 | { | ||
118 | struct ipath_mr *mr; | ||
119 | int n, m, i; | ||
120 | struct ib_mr *ret; | ||
121 | |||
122 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | ||
123 | if (mr == NULL) { | ||
124 | ret = ERR_PTR(-ENOMEM); | ||
125 | goto bail; | ||
126 | } | ||
127 | |||
128 | mr->mr.user_base = *iova_start; | ||
129 | mr->mr.iova = *iova_start; | ||
130 | mr->mr.length = 0; | ||
131 | mr->mr.offset = 0; | ||
132 | mr->mr.access_flags = acc; | ||
133 | mr->mr.max_segs = num_phys_buf; | ||
134 | |||
135 | m = 0; | ||
136 | n = 0; | ||
137 | for (i = 0; i < num_phys_buf; i++) { | ||
138 | mr->mr.map[m]->segs[n].vaddr = | ||
139 | phys_to_virt(buffer_list[i].addr); | ||
140 | mr->mr.map[m]->segs[n].length = buffer_list[i].size; | ||
141 | mr->mr.length += buffer_list[i].size; | ||
142 | n++; | ||
143 | if (n == IPATH_SEGSZ) { | ||
144 | m++; | ||
145 | n = 0; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | ret = &mr->ibmr; | ||
150 | |||
151 | bail: | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * ipath_reg_user_mr - register a userspace memory region | ||
157 | * @pd: protection domain for this memory region | ||
158 | * @region: the user memory region | ||
159 | * @mr_access_flags: access flags for this memory region | ||
160 | * @udata: unused by the InfiniPath driver | ||
161 | * | ||
162 | * Returns the memory region on success, otherwise returns an errno. | ||
163 | */ | ||
164 | struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | ||
165 | int mr_access_flags, struct ib_udata *udata) | ||
166 | { | ||
167 | struct ipath_mr *mr; | ||
168 | struct ib_umem_chunk *chunk; | ||
169 | int n, m, i; | ||
170 | struct ib_mr *ret; | ||
171 | |||
172 | n = 0; | ||
173 | list_for_each_entry(chunk, ®ion->chunk_list, list) | ||
174 | n += chunk->nents; | ||
175 | |||
176 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | ||
177 | if (!mr) { | ||
178 | ret = ERR_PTR(-ENOMEM); | ||
179 | goto bail; | ||
180 | } | ||
181 | |||
182 | mr->mr.user_base = region->user_base; | ||
183 | mr->mr.iova = region->virt_base; | ||
184 | mr->mr.length = region->length; | ||
185 | mr->mr.offset = region->offset; | ||
186 | mr->mr.access_flags = mr_access_flags; | ||
187 | mr->mr.max_segs = n; | ||
188 | |||
189 | m = 0; | ||
190 | n = 0; | ||
191 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | ||
192 | for (i = 0; i < chunk->nmap; i++) { | ||
193 | mr->mr.map[m]->segs[n].vaddr = | ||
194 | page_address(chunk->page_list[i].page); | ||
195 | mr->mr.map[m]->segs[n].length = region->page_size; | ||
196 | n++; | ||
197 | if (n == IPATH_SEGSZ) { | ||
198 | m++; | ||
199 | n = 0; | ||
200 | } | ||
201 | } | ||
202 | } | ||
203 | ret = &mr->ibmr; | ||
204 | |||
205 | bail: | ||
206 | return ret; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * ipath_dereg_mr - unregister and free a memory region | ||
211 | * @ibmr: the memory region to free | ||
212 | * | ||
213 | * Returns 0 on success. | ||
214 | * | ||
215 | * Note that this is called to free MRs created by ipath_get_dma_mr() | ||
216 | * or ipath_reg_user_mr(). | ||
217 | */ | ||
218 | int ipath_dereg_mr(struct ib_mr *ibmr) | ||
219 | { | ||
220 | struct ipath_mr *mr = to_imr(ibmr); | ||
221 | int i; | ||
222 | |||
223 | ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey); | ||
224 | i = mr->mr.mapsz; | ||
225 | while (i) { | ||
226 | i--; | ||
227 | kfree(mr->mr.map[i]); | ||
228 | } | ||
229 | kfree(mr); | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * ipath_alloc_fmr - allocate a fast memory region | ||
235 | * @pd: the protection domain for this memory region | ||
236 | * @mr_access_flags: access flags for this memory region | ||
237 | * @fmr_attr: fast memory region attributes | ||
238 | * | ||
239 | * Returns the memory region on success, otherwise returns an errno. | ||
240 | */ | ||
241 | struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | ||
242 | struct ib_fmr_attr *fmr_attr) | ||
243 | { | ||
244 | struct ipath_fmr *fmr; | ||
245 | int m, i = 0; | ||
246 | struct ib_fmr *ret; | ||
247 | |||
248 | /* Allocate struct plus pointers to first level page tables. */ | ||
249 | m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | ||
250 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | ||
251 | if (!fmr) | ||
252 | goto bail; | ||
253 | |||
254 | /* Allocate first level page tables. */ | ||
255 | for (; i < m; i++) { | ||
256 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | ||
257 | GFP_KERNEL); | ||
258 | if (!fmr->mr.map[i]) | ||
259 | goto bail; | ||
260 | } | ||
261 | fmr->mr.mapsz = m; | ||
262 | |||
263 | /* | ||
264 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | ||
265 | * rkey. | ||
266 | */ | ||
267 | if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | ||
268 | goto bail; | ||
269 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey; | ||
270 | /* | ||
271 | * Resources are allocated but no valid mapping (RKEY can't be | ||
272 | * used). | ||
273 | */ | ||
274 | fmr->mr.user_base = 0; | ||
275 | fmr->mr.iova = 0; | ||
276 | fmr->mr.length = 0; | ||
277 | fmr->mr.offset = 0; | ||
278 | fmr->mr.access_flags = mr_access_flags; | ||
279 | fmr->mr.max_segs = fmr_attr->max_pages; | ||
280 | fmr->page_shift = fmr_attr->page_shift; | ||
281 | |||
282 | ret = &fmr->ibfmr; | ||
283 | goto done; | ||
284 | |||
285 | bail: | ||
286 | while (i) | ||
287 | kfree(fmr->mr.map[--i]); | ||
288 | kfree(fmr); | ||
289 | ret = ERR_PTR(-ENOMEM); | ||
290 | |||
291 | done: | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * ipath_map_phys_fmr - set up a fast memory region | ||
297 | * @ibmfr: the fast memory region to set up | ||
298 | * @page_list: the list of pages to associate with the fast memory region | ||
299 | * @list_len: the number of pages to associate with the fast memory region | ||
300 | * @iova: the virtual address of the start of the fast memory region | ||
301 | * | ||
302 | * This may be called from interrupt context. | ||
303 | */ | ||
304 | |||
305 | int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, | ||
306 | int list_len, u64 iova) | ||
307 | { | ||
308 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | ||
309 | struct ipath_lkey_table *rkt; | ||
310 | unsigned long flags; | ||
311 | int m, n, i; | ||
312 | u32 ps; | ||
313 | int ret; | ||
314 | |||
315 | if (list_len > fmr->mr.max_segs) { | ||
316 | ret = -EINVAL; | ||
317 | goto bail; | ||
318 | } | ||
319 | rkt = &to_idev(ibfmr->device)->lk_table; | ||
320 | spin_lock_irqsave(&rkt->lock, flags); | ||
321 | fmr->mr.user_base = iova; | ||
322 | fmr->mr.iova = iova; | ||
323 | ps = 1 << fmr->page_shift; | ||
324 | fmr->mr.length = list_len * ps; | ||
325 | m = 0; | ||
326 | n = 0; | ||
327 | ps = 1 << fmr->page_shift; | ||
328 | for (i = 0; i < list_len; i++) { | ||
329 | fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); | ||
330 | fmr->mr.map[m]->segs[n].length = ps; | ||
331 | if (++n == IPATH_SEGSZ) { | ||
332 | m++; | ||
333 | n = 0; | ||
334 | } | ||
335 | } | ||
336 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
337 | ret = 0; | ||
338 | |||
339 | bail: | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * ipath_unmap_fmr - unmap fast memory regions | ||
345 | * @fmr_list: the list of fast memory regions to unmap | ||
346 | * | ||
347 | * Returns 0 on success. | ||
348 | */ | ||
349 | int ipath_unmap_fmr(struct list_head *fmr_list) | ||
350 | { | ||
351 | struct ipath_fmr *fmr; | ||
352 | struct ipath_lkey_table *rkt; | ||
353 | unsigned long flags; | ||
354 | |||
355 | list_for_each_entry(fmr, fmr_list, ibfmr.list) { | ||
356 | rkt = &to_idev(fmr->ibfmr.device)->lk_table; | ||
357 | spin_lock_irqsave(&rkt->lock, flags); | ||
358 | fmr->mr.user_base = 0; | ||
359 | fmr->mr.iova = 0; | ||
360 | fmr->mr.length = 0; | ||
361 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * ipath_dealloc_fmr - deallocate a fast memory region | ||
368 | * @ibfmr: the fast memory region to deallocate | ||
369 | * | ||
370 | * Returns 0 on success. | ||
371 | */ | ||
372 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr) | ||
373 | { | ||
374 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | ||
375 | int i; | ||
376 | |||
377 | ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey); | ||
378 | i = fmr->mr.mapsz; | ||
379 | while (i) | ||
380 | kfree(fmr->mr.map[--i]); | ||
381 | kfree(fmr); | ||
382 | return 0; | ||
383 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c new file mode 100644 index 000000000000..01c4c6c56118 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/err.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | |||
36 | #include "ipath_verbs.h" | ||
37 | |||
38 | /** | ||
39 | * ipath_post_srq_receive - post a receive on a shared receive queue | ||
40 | * @ibsrq: the SRQ to post the receive on | ||
41 | * @wr: the list of work requests to post | ||
42 | * @bad_wr: the first WR to cause a problem is put here | ||
43 | * | ||
44 | * This may be called from interrupt context. | ||
45 | */ | ||
46 | int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
47 | struct ib_recv_wr **bad_wr) | ||
48 | { | ||
49 | struct ipath_srq *srq = to_isrq(ibsrq); | ||
50 | struct ipath_ibdev *dev = to_idev(ibsrq->device); | ||
51 | unsigned long flags; | ||
52 | int ret; | ||
53 | |||
54 | for (; wr; wr = wr->next) { | ||
55 | struct ipath_rwqe *wqe; | ||
56 | u32 next; | ||
57 | int i, j; | ||
58 | |||
59 | if (wr->num_sge > srq->rq.max_sge) { | ||
60 | *bad_wr = wr; | ||
61 | ret = -ENOMEM; | ||
62 | goto bail; | ||
63 | } | ||
64 | |||
65 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
66 | next = srq->rq.head + 1; | ||
67 | if (next >= srq->rq.size) | ||
68 | next = 0; | ||
69 | if (next == srq->rq.tail) { | ||
70 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
71 | *bad_wr = wr; | ||
72 | ret = -ENOMEM; | ||
73 | goto bail; | ||
74 | } | ||
75 | |||
76 | wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); | ||
77 | wqe->wr_id = wr->wr_id; | ||
78 | wqe->sg_list[0].mr = NULL; | ||
79 | wqe->sg_list[0].vaddr = NULL; | ||
80 | wqe->sg_list[0].length = 0; | ||
81 | wqe->sg_list[0].sge_length = 0; | ||
82 | wqe->length = 0; | ||
83 | for (i = 0, j = 0; i < wr->num_sge; i++) { | ||
84 | /* Check LKEY */ | ||
85 | if (to_ipd(srq->ibsrq.pd)->user && | ||
86 | wr->sg_list[i].lkey == 0) { | ||
87 | spin_unlock_irqrestore(&srq->rq.lock, | ||
88 | flags); | ||
89 | *bad_wr = wr; | ||
90 | ret = -EINVAL; | ||
91 | goto bail; | ||
92 | } | ||
93 | if (wr->sg_list[i].length == 0) | ||
94 | continue; | ||
95 | if (!ipath_lkey_ok(&dev->lk_table, | ||
96 | &wqe->sg_list[j], | ||
97 | &wr->sg_list[i], | ||
98 | IB_ACCESS_LOCAL_WRITE)) { | ||
99 | spin_unlock_irqrestore(&srq->rq.lock, | ||
100 | flags); | ||
101 | *bad_wr = wr; | ||
102 | ret = -EINVAL; | ||
103 | goto bail; | ||
104 | } | ||
105 | wqe->length += wr->sg_list[i].length; | ||
106 | j++; | ||
107 | } | ||
108 | wqe->num_sge = j; | ||
109 | srq->rq.head = next; | ||
110 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
111 | } | ||
112 | ret = 0; | ||
113 | |||
114 | bail: | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * ipath_create_srq - create a shared receive queue | ||
120 | * @ibpd: the protection domain of the SRQ to create | ||
121 | * @attr: the attributes of the SRQ | ||
122 | * @udata: not used by the InfiniPath verbs driver | ||
123 | */ | ||
124 | struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | ||
125 | struct ib_srq_init_attr *srq_init_attr, | ||
126 | struct ib_udata *udata) | ||
127 | { | ||
128 | struct ipath_srq *srq; | ||
129 | u32 sz; | ||
130 | struct ib_srq *ret; | ||
131 | |||
132 | if (srq_init_attr->attr.max_sge < 1) { | ||
133 | ret = ERR_PTR(-EINVAL); | ||
134 | goto bail; | ||
135 | } | ||
136 | |||
137 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||
138 | if (!srq) { | ||
139 | ret = ERR_PTR(-ENOMEM); | ||
140 | goto bail; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Need to use vmalloc() if we want to support large #s of entries. | ||
145 | */ | ||
146 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | ||
147 | sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge + | ||
148 | sizeof(struct ipath_rwqe); | ||
149 | srq->rq.wq = vmalloc(srq->rq.size * sz); | ||
150 | if (!srq->rq.wq) { | ||
151 | kfree(srq); | ||
152 | ret = ERR_PTR(-ENOMEM); | ||
153 | goto bail; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * ib_create_srq() will initialize srq->ibsrq. | ||
158 | */ | ||
159 | spin_lock_init(&srq->rq.lock); | ||
160 | srq->rq.head = 0; | ||
161 | srq->rq.tail = 0; | ||
162 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | ||
163 | srq->limit = srq_init_attr->attr.srq_limit; | ||
164 | |||
165 | ret = &srq->ibsrq; | ||
166 | |||
167 | bail: | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * ipath_modify_srq - modify a shared receive queue | ||
173 | * @ibsrq: the SRQ to modify | ||
174 | * @attr: the new attributes of the SRQ | ||
175 | * @attr_mask: indicates which attributes to modify | ||
176 | */ | ||
177 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
178 | enum ib_srq_attr_mask attr_mask) | ||
179 | { | ||
180 | struct ipath_srq *srq = to_isrq(ibsrq); | ||
181 | unsigned long flags; | ||
182 | int ret; | ||
183 | |||
184 | if (attr_mask & IB_SRQ_LIMIT) { | ||
185 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
186 | srq->limit = attr->srq_limit; | ||
187 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
188 | } | ||
189 | if (attr_mask & IB_SRQ_MAX_WR) { | ||
190 | u32 size = attr->max_wr + 1; | ||
191 | struct ipath_rwqe *wq, *p; | ||
192 | u32 n; | ||
193 | u32 sz; | ||
194 | |||
195 | if (attr->max_sge < srq->rq.max_sge) { | ||
196 | ret = -EINVAL; | ||
197 | goto bail; | ||
198 | } | ||
199 | |||
200 | sz = sizeof(struct ipath_rwqe) + | ||
201 | attr->max_sge * sizeof(struct ipath_sge); | ||
202 | wq = vmalloc(size * sz); | ||
203 | if (!wq) { | ||
204 | ret = -ENOMEM; | ||
205 | goto bail; | ||
206 | } | ||
207 | |||
208 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
209 | if (srq->rq.head < srq->rq.tail) | ||
210 | n = srq->rq.size + srq->rq.head - srq->rq.tail; | ||
211 | else | ||
212 | n = srq->rq.head - srq->rq.tail; | ||
213 | if (size <= n || size <= srq->limit) { | ||
214 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
215 | vfree(wq); | ||
216 | ret = -EINVAL; | ||
217 | goto bail; | ||
218 | } | ||
219 | n = 0; | ||
220 | p = wq; | ||
221 | while (srq->rq.tail != srq->rq.head) { | ||
222 | struct ipath_rwqe *wqe; | ||
223 | int i; | ||
224 | |||
225 | wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail); | ||
226 | p->wr_id = wqe->wr_id; | ||
227 | p->length = wqe->length; | ||
228 | p->num_sge = wqe->num_sge; | ||
229 | for (i = 0; i < wqe->num_sge; i++) | ||
230 | p->sg_list[i] = wqe->sg_list[i]; | ||
231 | n++; | ||
232 | p = (struct ipath_rwqe *)((char *) p + sz); | ||
233 | if (++srq->rq.tail >= srq->rq.size) | ||
234 | srq->rq.tail = 0; | ||
235 | } | ||
236 | vfree(srq->rq.wq); | ||
237 | srq->rq.wq = wq; | ||
238 | srq->rq.size = size; | ||
239 | srq->rq.head = n; | ||
240 | srq->rq.tail = 0; | ||
241 | srq->rq.max_sge = attr->max_sge; | ||
242 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
243 | } | ||
244 | |||
245 | ret = 0; | ||
246 | |||
247 | bail: | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | ||
252 | { | ||
253 | struct ipath_srq *srq = to_isrq(ibsrq); | ||
254 | |||
255 | attr->max_wr = srq->rq.size - 1; | ||
256 | attr->max_sge = srq->rq.max_sge; | ||
257 | attr->srq_limit = srq->limit; | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * ipath_destroy_srq - destroy a shared receive queue | ||
263 | * @ibsrq: the SRQ to destroy | ||
264 | */ | ||
265 | int ipath_destroy_srq(struct ib_srq *ibsrq) | ||
266 | { | ||
267 | struct ipath_srq *srq = to_isrq(ibsrq); | ||
268 | |||
269 | vfree(srq->rq.wq); | ||
270 | kfree(srq); | ||
271 | |||
272 | return 0; | ||
273 | } | ||