diff options
author | Faisal Latif <faisal.latif@intel.com> | 2016-01-20 14:40:09 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-02-29 17:10:53 -0500 |
commit | d37498417947cb2299fc749ae4af1d204c768cba (patch) | |
tree | 987296c0bdc391ff0e830fb19cf2858dfc625e4f | |
parent | 4e9042e647ff083239984c4051c6d1a4f927ecc3 (diff) |
i40iw: add files for iwarp interface
i40iw_verbs.[ch] are to handle iwarp interface.
Changes since v2:
Made infiniband interface changes for 4.5
removed i40iw_reg_phys_mr() for 4.5
made changes as made by Christoph Hellwig made for nes
in i40iw_get_dma_mr().
Changes since v1:
Following modification based on Christoph Hellwig's feedback
-remove kmap() calls and moved to i40iw_cm.c.
-cleanup some of casts
Acked-by: Anjali Singhai Jain <anjali.singhai@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Faisal Latif <faisal.latif@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_ucontext.h | 107 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2434 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_verbs.h | 173 |
3 files changed, 2714 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h new file mode 100644 index 000000000000..12acd688def4 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | ||
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
5 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
6 | * | ||
7 | * This software is available to you under a choice of one of two | ||
8 | * licenses. You may choose to be licensed under the terms of the GNU | ||
9 | * General Public License (GPL) Version 2, available from the file | ||
10 | * COPYING in the main directory of this source tree, or the | ||
11 | * OpenIB.org BSD license below: | ||
12 | * | ||
13 | * Redistribution and use in source and binary forms, with or | ||
14 | * without modification, are permitted provided that the following | ||
15 | * conditions are met: | ||
16 | * | ||
17 | * - Redistributions of source code must retain the above | ||
18 | * copyright notice, this list of conditions and the following | ||
19 | * disclaimer. | ||
20 | * | ||
21 | * - Redistributions in binary form must reproduce the above | ||
22 | * copyright notice, this list of conditions and the following | ||
23 | * disclaimer in the documentation and/or other materials | ||
24 | * provided with the distribution. | ||
25 | * | ||
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
33 | * SOFTWARE. | ||
34 | * | ||
35 | */ | ||
36 | |||
37 | #ifndef I40IW_USER_CONTEXT_H | ||
38 | #define I40IW_USER_CONTEXT_H | ||
39 | |||
40 | #include <linux/types.h> | ||
41 | |||
42 | #define I40IW_ABI_USERSPACE_VER 4 | ||
43 | #define I40IW_ABI_KERNEL_VER 4 | ||
44 | struct i40iw_alloc_ucontext_req { | ||
45 | __u32 reserved32; | ||
46 | __u8 userspace_ver; | ||
47 | __u8 reserved8[3]; | ||
48 | }; | ||
49 | |||
50 | struct i40iw_alloc_ucontext_resp { | ||
51 | __u32 max_pds; /* maximum pds allowed for this user process */ | ||
52 | __u32 max_qps; /* maximum qps allowed for this user process */ | ||
53 | __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ | ||
54 | __u8 kernel_ver; | ||
55 | __u8 reserved[3]; | ||
56 | }; | ||
57 | |||
58 | struct i40iw_alloc_pd_resp { | ||
59 | __u32 pd_id; | ||
60 | __u8 reserved[4]; | ||
61 | }; | ||
62 | |||
63 | struct i40iw_create_cq_req { | ||
64 | __u64 user_cq_buffer; | ||
65 | __u64 user_shadow_area; | ||
66 | }; | ||
67 | |||
68 | struct i40iw_create_qp_req { | ||
69 | __u64 user_wqe_buffers; | ||
70 | __u64 user_compl_ctx; | ||
71 | |||
72 | /* UDA QP PHB */ | ||
73 | __u64 user_sq_phb; /* place for VA of the sq phb buff */ | ||
74 | __u64 user_rq_phb; /* place for VA of the rq phb buff */ | ||
75 | }; | ||
76 | |||
77 | enum i40iw_memreg_type { | ||
78 | IW_MEMREG_TYPE_MEM = 0x0000, | ||
79 | IW_MEMREG_TYPE_QP = 0x0001, | ||
80 | IW_MEMREG_TYPE_CQ = 0x0002, | ||
81 | }; | ||
82 | |||
83 | struct i40iw_mem_reg_req { | ||
84 | __u16 reg_type; /* Memory, QP or CQ */ | ||
85 | __u16 cq_pages; | ||
86 | __u16 rq_pages; | ||
87 | __u16 sq_pages; | ||
88 | }; | ||
89 | |||
90 | struct i40iw_create_cq_resp { | ||
91 | __u32 cq_id; | ||
92 | __u32 cq_size; | ||
93 | __u32 mmap_db_index; | ||
94 | __u32 reserved; | ||
95 | }; | ||
96 | |||
97 | struct i40iw_create_qp_resp { | ||
98 | __u32 qp_id; | ||
99 | __u32 actual_sq_size; | ||
100 | __u32 actual_rq_size; | ||
101 | __u32 i40iw_drv_opt; | ||
102 | __u16 push_idx; | ||
103 | __u8 lsmm; | ||
104 | __u8 rsvd2; | ||
105 | }; | ||
106 | |||
107 | #endif | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c new file mode 100644 index 000000000000..c5c9805c9cb3 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -0,0 +1,2434 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenFabrics.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | *******************************************************************************/ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/moduleparam.h> | ||
37 | #include <linux/random.h> | ||
38 | #include <linux/highmem.h> | ||
39 | #include <linux/time.h> | ||
40 | #include <asm/byteorder.h> | ||
41 | #include <net/ip.h> | ||
42 | #include <rdma/ib_verbs.h> | ||
43 | #include <rdma/iw_cm.h> | ||
44 | #include <rdma/ib_user_verbs.h> | ||
45 | #include <rdma/ib_umem.h> | ||
46 | #include "i40iw.h" | ||
47 | |||
48 | /** | ||
49 | * i40iw_query_device - get device attributes | ||
50 | * @ibdev: device pointer from stack | ||
51 | * @props: returning device attributes | ||
52 | * @udata: user data | ||
53 | */ | ||
54 | static int i40iw_query_device(struct ib_device *ibdev, | ||
55 | struct ib_device_attr *props, | ||
56 | struct ib_udata *udata) | ||
57 | { | ||
58 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
59 | |||
60 | if (udata->inlen || udata->outlen) | ||
61 | return -EINVAL; | ||
62 | memset(props, 0, sizeof(*props)); | ||
63 | ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); | ||
64 | props->fw_ver = I40IW_FW_VERSION; | ||
65 | props->device_cap_flags = iwdev->device_cap_flags; | ||
66 | props->vendor_id = iwdev->vendor_id; | ||
67 | props->vendor_part_id = iwdev->vendor_part_id; | ||
68 | props->hw_ver = (u32)iwdev->sc_dev.hw_rev; | ||
69 | props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE; | ||
70 | props->max_qp = iwdev->max_qp; | ||
71 | props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1; | ||
72 | props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; | ||
73 | props->max_cq = iwdev->max_cq; | ||
74 | props->max_cqe = iwdev->max_cqe; | ||
75 | props->max_mr = iwdev->max_mr; | ||
76 | props->max_pd = iwdev->max_pd; | ||
77 | props->max_sge_rd = 1; | ||
78 | props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE; | ||
79 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; | ||
80 | props->atomic_cap = IB_ATOMIC_NONE; | ||
81 | props->max_map_per_fmr = 1; | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * i40iw_query_port - get port attrubutes | ||
87 | * @ibdev: device pointer from stack | ||
88 | * @port: port number for query | ||
89 | * @props: returning device attributes | ||
90 | */ | ||
91 | static int i40iw_query_port(struct ib_device *ibdev, | ||
92 | u8 port, | ||
93 | struct ib_port_attr *props) | ||
94 | { | ||
95 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
96 | struct net_device *netdev = iwdev->netdev; | ||
97 | |||
98 | memset(props, 0, sizeof(*props)); | ||
99 | |||
100 | props->max_mtu = IB_MTU_4096; | ||
101 | if (netdev->mtu >= 4096) | ||
102 | props->active_mtu = IB_MTU_4096; | ||
103 | else if (netdev->mtu >= 2048) | ||
104 | props->active_mtu = IB_MTU_2048; | ||
105 | else if (netdev->mtu >= 1024) | ||
106 | props->active_mtu = IB_MTU_1024; | ||
107 | else if (netdev->mtu >= 512) | ||
108 | props->active_mtu = IB_MTU_512; | ||
109 | else | ||
110 | props->active_mtu = IB_MTU_256; | ||
111 | |||
112 | props->lid = 1; | ||
113 | if (netif_carrier_ok(iwdev->netdev)) | ||
114 | props->state = IB_PORT_ACTIVE; | ||
115 | else | ||
116 | props->state = IB_PORT_DOWN; | ||
117 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | | ||
118 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | ||
119 | props->gid_tbl_len = 1; | ||
120 | props->pkey_tbl_len = 1; | ||
121 | props->active_width = IB_WIDTH_4X; | ||
122 | props->active_speed = 1; | ||
123 | props->max_msg_sz = 0x80000000; | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * i40iw_alloc_ucontext - Allocate the user context data structure | ||
129 | * @ibdev: device pointer from stack | ||
130 | * @udata: user data | ||
131 | * | ||
132 | * This keeps track of all objects associated with a particular | ||
133 | * user-mode client. | ||
134 | */ | ||
135 | static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev, | ||
136 | struct ib_udata *udata) | ||
137 | { | ||
138 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
139 | struct i40iw_alloc_ucontext_req req; | ||
140 | struct i40iw_alloc_ucontext_resp uresp; | ||
141 | struct i40iw_ucontext *ucontext; | ||
142 | |||
143 | if (ib_copy_from_udata(&req, udata, sizeof(req))) | ||
144 | return ERR_PTR(-EINVAL); | ||
145 | |||
146 | if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) { | ||
147 | i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n", | ||
148 | req.userspace_ver, I40IW_ABI_USERSPACE_VER); | ||
149 | return ERR_PTR(-EINVAL); | ||
150 | } | ||
151 | |||
152 | memset(&uresp, 0, sizeof(uresp)); | ||
153 | uresp.max_qps = iwdev->max_qp; | ||
154 | uresp.max_pds = iwdev->max_pd; | ||
155 | uresp.wq_size = iwdev->max_qp_wr * 2; | ||
156 | uresp.kernel_ver = I40IW_ABI_KERNEL_VER; | ||
157 | |||
158 | ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); | ||
159 | if (!ucontext) | ||
160 | return ERR_PTR(-ENOMEM); | ||
161 | |||
162 | ucontext->iwdev = iwdev; | ||
163 | |||
164 | if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { | ||
165 | kfree(ucontext); | ||
166 | return ERR_PTR(-EFAULT); | ||
167 | } | ||
168 | |||
169 | INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); | ||
170 | spin_lock_init(&ucontext->cq_reg_mem_list_lock); | ||
171 | INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); | ||
172 | spin_lock_init(&ucontext->qp_reg_mem_list_lock); | ||
173 | |||
174 | return &ucontext->ibucontext; | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * i40iw_dealloc_ucontext - deallocate the user context data structure | ||
179 | * @context: user context created during alloc | ||
180 | */ | ||
181 | static int i40iw_dealloc_ucontext(struct ib_ucontext *context) | ||
182 | { | ||
183 | struct i40iw_ucontext *ucontext = to_ucontext(context); | ||
184 | unsigned long flags; | ||
185 | |||
186 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | ||
187 | if (!list_empty(&ucontext->cq_reg_mem_list)) { | ||
188 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | ||
189 | return -EBUSY; | ||
190 | } | ||
191 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | ||
192 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); | ||
193 | if (!list_empty(&ucontext->qp_reg_mem_list)) { | ||
194 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | ||
195 | return -EBUSY; | ||
196 | } | ||
197 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | ||
198 | |||
199 | kfree(ucontext); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * i40iw_mmap - user memory map | ||
205 | * @context: context created during alloc | ||
206 | * @vma: kernel info for user memory map | ||
207 | */ | ||
208 | static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
209 | { | ||
210 | struct i40iw_ucontext *ucontext; | ||
211 | u64 db_addr_offset; | ||
212 | u64 push_offset; | ||
213 | |||
214 | ucontext = to_ucontext(context); | ||
215 | if (ucontext->iwdev->sc_dev.is_pf) { | ||
216 | db_addr_offset = I40IW_DB_ADDR_OFFSET; | ||
217 | push_offset = I40IW_PUSH_OFFSET; | ||
218 | if (vma->vm_pgoff) | ||
219 | vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1; | ||
220 | } else { | ||
221 | db_addr_offset = I40IW_VF_DB_ADDR_OFFSET; | ||
222 | push_offset = I40IW_VF_PUSH_OFFSET; | ||
223 | if (vma->vm_pgoff) | ||
224 | vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1; | ||
225 | } | ||
226 | |||
227 | vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT; | ||
228 | |||
229 | if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { | ||
230 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
231 | vma->vm_private_data = ucontext; | ||
232 | } else { | ||
233 | if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) | ||
234 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
235 | else | ||
236 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
237 | } | ||
238 | |||
239 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
240 | vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), | ||
241 | PAGE_SIZE, vma->vm_page_prot)) | ||
242 | return -EAGAIN; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * i40iw_alloc_push_page - allocate a push page for qp | ||
249 | * @iwdev: iwarp device | ||
250 | * @qp: hardware control qp | ||
251 | */ | ||
252 | static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) | ||
253 | { | ||
254 | struct i40iw_cqp_request *cqp_request; | ||
255 | struct cqp_commands_info *cqp_info; | ||
256 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
257 | enum i40iw_status_code status; | ||
258 | |||
259 | if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX) | ||
260 | return; | ||
261 | |||
262 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); | ||
263 | if (!cqp_request) | ||
264 | return; | ||
265 | |||
266 | atomic_inc(&cqp_request->refcount); | ||
267 | |||
268 | cqp_info = &cqp_request->info; | ||
269 | cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; | ||
270 | cqp_info->post_sq = 1; | ||
271 | |||
272 | cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle; | ||
273 | cqp_info->in.u.manage_push_page.info.free_page = 0; | ||
274 | cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; | ||
275 | cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; | ||
276 | |||
277 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
278 | if (!status) | ||
279 | qp->push_idx = cqp_request->compl_info.op_ret_val; | ||
280 | else | ||
281 | i40iw_pr_err("CQP-OP Push page fail"); | ||
282 | i40iw_put_cqp_request(&iwdev->cqp, cqp_request); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * i40iw_dealloc_push_page - free a push page for qp | ||
287 | * @iwdev: iwarp device | ||
288 | * @qp: hardware control qp | ||
289 | */ | ||
290 | static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) | ||
291 | { | ||
292 | struct i40iw_cqp_request *cqp_request; | ||
293 | struct cqp_commands_info *cqp_info; | ||
294 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
295 | enum i40iw_status_code status; | ||
296 | |||
297 | if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) | ||
298 | return; | ||
299 | |||
300 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); | ||
301 | if (!cqp_request) | ||
302 | return; | ||
303 | |||
304 | cqp_info = &cqp_request->info; | ||
305 | cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; | ||
306 | cqp_info->post_sq = 1; | ||
307 | |||
308 | cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; | ||
309 | cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle; | ||
310 | cqp_info->in.u.manage_push_page.info.free_page = 1; | ||
311 | cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; | ||
312 | cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; | ||
313 | |||
314 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
315 | if (!status) | ||
316 | qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; | ||
317 | else | ||
318 | i40iw_pr_err("CQP-OP Push page fail"); | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * i40iw_alloc_pd - allocate protection domain | ||
323 | * @ibdev: device pointer from stack | ||
324 | * @context: user context created during alloc | ||
325 | * @udata: user data | ||
326 | */ | ||
327 | static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, | ||
328 | struct ib_ucontext *context, | ||
329 | struct ib_udata *udata) | ||
330 | { | ||
331 | struct i40iw_pd *iwpd; | ||
332 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
333 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
334 | struct i40iw_alloc_pd_resp uresp; | ||
335 | struct i40iw_sc_pd *sc_pd; | ||
336 | u32 pd_id = 0; | ||
337 | int err; | ||
338 | |||
339 | err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds, | ||
340 | iwdev->max_pd, &pd_id, &iwdev->next_pd); | ||
341 | if (err) { | ||
342 | i40iw_pr_err("alloc resource failed\n"); | ||
343 | return ERR_PTR(err); | ||
344 | } | ||
345 | |||
346 | iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); | ||
347 | if (!iwpd) { | ||
348 | err = -ENOMEM; | ||
349 | goto free_res; | ||
350 | } | ||
351 | |||
352 | sc_pd = &iwpd->sc_pd; | ||
353 | dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id); | ||
354 | |||
355 | if (context) { | ||
356 | memset(&uresp, 0, sizeof(uresp)); | ||
357 | uresp.pd_id = pd_id; | ||
358 | if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { | ||
359 | err = -EFAULT; | ||
360 | goto error; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | i40iw_add_pdusecount(iwpd); | ||
365 | return &iwpd->ibpd; | ||
366 | error: | ||
367 | kfree(iwpd); | ||
368 | free_res: | ||
369 | i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id); | ||
370 | return ERR_PTR(err); | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * i40iw_dealloc_pd - deallocate pd | ||
375 | * @ibpd: ptr of pd to be deallocated | ||
376 | */ | ||
377 | static int i40iw_dealloc_pd(struct ib_pd *ibpd) | ||
378 | { | ||
379 | struct i40iw_pd *iwpd = to_iwpd(ibpd); | ||
380 | struct i40iw_device *iwdev = to_iwdev(ibpd->device); | ||
381 | |||
382 | i40iw_rem_pdusecount(iwpd, iwdev); | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * i40iw_qp_roundup - return round up qp ring size | ||
388 | * @wr_ring_size: ring size to round up | ||
389 | */ | ||
390 | static int i40iw_qp_roundup(u32 wr_ring_size) | ||
391 | { | ||
392 | int scount = 1; | ||
393 | |||
394 | if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE) | ||
395 | wr_ring_size = I40IWQP_SW_MIN_WQSIZE; | ||
396 | |||
397 | for (wr_ring_size--; scount <= 16; scount *= 2) | ||
398 | wr_ring_size |= wr_ring_size >> scount; | ||
399 | return ++wr_ring_size; | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * i40iw_get_pbl - Retrieve pbl from a list given a virtual | ||
404 | * address | ||
405 | * @va: user virtual address | ||
406 | * @pbl_list: pbl list to search in (QP's or CQ's) | ||
407 | */ | ||
408 | static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, | ||
409 | struct list_head *pbl_list) | ||
410 | { | ||
411 | struct i40iw_pbl *iwpbl; | ||
412 | |||
413 | list_for_each_entry(iwpbl, pbl_list, list) { | ||
414 | if (iwpbl->user_base == va) { | ||
415 | list_del(&iwpbl->list); | ||
416 | return iwpbl; | ||
417 | } | ||
418 | } | ||
419 | return NULL; | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * i40iw_free_qp_resources - free up memory resources for qp | ||
424 | * @iwdev: iwarp device | ||
425 | * @iwqp: qp ptr (user or kernel) | ||
426 | * @qp_num: qp number assigned | ||
427 | */ | ||
428 | void i40iw_free_qp_resources(struct i40iw_device *iwdev, | ||
429 | struct i40iw_qp *iwqp, | ||
430 | u32 qp_num) | ||
431 | { | ||
432 | i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); | ||
433 | if (qp_num) | ||
434 | i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); | ||
435 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); | ||
436 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); | ||
437 | kfree(iwqp->kqp.wrid_mem); | ||
438 | iwqp->kqp.wrid_mem = NULL; | ||
439 | kfree(iwqp->allocated_buffer); | ||
440 | iwqp->allocated_buffer = NULL; | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * i40iw_clean_cqes - clean cq entries for qp | ||
445 | * @iwqp: qp ptr (user or kernel) | ||
446 | * @iwcq: cq ptr | ||
447 | */ | ||
448 | static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) | ||
449 | { | ||
450 | struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; | ||
451 | |||
452 | ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq); | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * i40iw_destroy_qp - destroy qp | ||
457 | * @ibqp: qp's ib pointer also to get to device's qp address | ||
458 | */ | ||
459 | static int i40iw_destroy_qp(struct ib_qp *ibqp) | ||
460 | { | ||
461 | struct i40iw_qp *iwqp = to_iwqp(ibqp); | ||
462 | |||
463 | iwqp->destroyed = 1; | ||
464 | |||
465 | if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS) | ||
466 | i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0); | ||
467 | |||
468 | if (!iwqp->user_mode) { | ||
469 | if (iwqp->iwscq) { | ||
470 | i40iw_clean_cqes(iwqp, iwqp->iwscq); | ||
471 | if (iwqp->iwrcq != iwqp->iwscq) | ||
472 | i40iw_clean_cqes(iwqp, iwqp->iwrcq); | ||
473 | } | ||
474 | } | ||
475 | |||
476 | i40iw_rem_ref(&iwqp->ibqp); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | /** | ||
481 | * i40iw_setup_virt_qp - setup for allocation of virtual qp | ||
482 | * @dev: iwarp device | ||
483 | * @qp: qp ptr | ||
484 | * @init_info: initialize info to return | ||
485 | */ | ||
486 | static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, | ||
487 | struct i40iw_qp *iwqp, | ||
488 | struct i40iw_qp_init_info *init_info) | ||
489 | { | ||
490 | struct i40iw_pbl *iwpbl = iwqp->iwpbl; | ||
491 | struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; | ||
492 | |||
493 | iwqp->page = qpmr->sq_page; | ||
494 | init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow); | ||
495 | if (iwpbl->pbl_allocated) { | ||
496 | init_info->virtual_map = true; | ||
497 | init_info->sq_pa = qpmr->sq_pbl.idx; | ||
498 | init_info->rq_pa = qpmr->rq_pbl.idx; | ||
499 | } else { | ||
500 | init_info->sq_pa = qpmr->sq_pbl.addr; | ||
501 | init_info->rq_pa = qpmr->rq_pbl.addr; | ||
502 | } | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | /** | ||
507 | * i40iw_setup_kmode_qp - setup initialization for kernel mode qp | ||
508 | * @iwdev: iwarp device | ||
509 | * @iwqp: qp ptr (user or kernel) | ||
510 | * @info: initialize info to return | ||
511 | */ | ||
512 | static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev, | ||
513 | struct i40iw_qp *iwqp, | ||
514 | struct i40iw_qp_init_info *info) | ||
515 | { | ||
516 | struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem; | ||
517 | u32 sqdepth, rqdepth; | ||
518 | u32 sq_size, rq_size; | ||
519 | u8 sqshift, rqshift; | ||
520 | u32 size; | ||
521 | enum i40iw_status_code status; | ||
522 | struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; | ||
523 | |||
524 | ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT; | ||
525 | |||
526 | sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1); | ||
527 | rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1); | ||
528 | |||
529 | status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift); | ||
530 | if (!status) | ||
531 | status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift); | ||
532 | |||
533 | if (status) | ||
534 | return -ENOSYS; | ||
535 | |||
536 | sqdepth = sq_size << sqshift; | ||
537 | rqdepth = rq_size << rqshift; | ||
538 | |||
539 | size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3); | ||
540 | iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL); | ||
541 | |||
542 | ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem; | ||
543 | if (!ukinfo->sq_wrtrk_array) | ||
544 | return -ENOMEM; | ||
545 | |||
546 | ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth]; | ||
547 | |||
548 | size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE; | ||
549 | size += (I40IW_SHADOW_AREA_SIZE << 3); | ||
550 | |||
551 | status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256); | ||
552 | if (status) { | ||
553 | kfree(ukinfo->sq_wrtrk_array); | ||
554 | ukinfo->sq_wrtrk_array = NULL; | ||
555 | return -ENOMEM; | ||
556 | } | ||
557 | |||
558 | ukinfo->sq = mem->va; | ||
559 | info->sq_pa = mem->pa; | ||
560 | |||
561 | ukinfo->rq = &ukinfo->sq[sqdepth]; | ||
562 | info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE); | ||
563 | |||
564 | ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; | ||
565 | info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE); | ||
566 | |||
567 | ukinfo->sq_size = sq_size; | ||
568 | ukinfo->rq_size = rq_size; | ||
569 | ukinfo->qp_id = iwqp->ibqp.qp_num; | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * i40iw_create_qp - create qp | ||
575 | * @ibpd: ptr of pd | ||
576 | * @init_attr: attributes for qp | ||
577 | * @udata: user data for create qp | ||
578 | */ | ||
579 | static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | ||
580 | struct ib_qp_init_attr *init_attr, | ||
581 | struct ib_udata *udata) | ||
582 | { | ||
583 | struct i40iw_pd *iwpd = to_iwpd(ibpd); | ||
584 | struct i40iw_device *iwdev = to_iwdev(ibpd->device); | ||
585 | struct i40iw_cqp *iwcqp = &iwdev->cqp; | ||
586 | struct i40iw_qp *iwqp; | ||
587 | struct i40iw_ucontext *ucontext; | ||
588 | struct i40iw_create_qp_req req; | ||
589 | struct i40iw_create_qp_resp uresp; | ||
590 | u32 qp_num = 0; | ||
591 | void *mem; | ||
592 | enum i40iw_status_code ret; | ||
593 | int err_code; | ||
594 | int sq_size; | ||
595 | int rq_size; | ||
596 | struct i40iw_sc_qp *qp; | ||
597 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
598 | struct i40iw_qp_init_info init_info; | ||
599 | struct i40iw_create_qp_info *qp_info; | ||
600 | struct i40iw_cqp_request *cqp_request; | ||
601 | struct cqp_commands_info *cqp_info; | ||
602 | |||
603 | struct i40iw_qp_host_ctx_info *ctx_info; | ||
604 | struct i40iwarp_offload_info *iwarp_info; | ||
605 | unsigned long flags; | ||
606 | |||
607 | if (init_attr->create_flags) | ||
608 | return ERR_PTR(-EINVAL); | ||
609 | if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE) | ||
610 | init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; | ||
611 | |||
612 | memset(&init_info, 0, sizeof(init_info)); | ||
613 | |||
614 | sq_size = init_attr->cap.max_send_wr; | ||
615 | rq_size = init_attr->cap.max_recv_wr; | ||
616 | |||
617 | init_info.qp_uk_init_info.sq_size = sq_size; | ||
618 | init_info.qp_uk_init_info.rq_size = rq_size; | ||
619 | init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; | ||
620 | init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; | ||
621 | |||
622 | mem = kzalloc(sizeof(*iwqp), GFP_KERNEL); | ||
623 | if (!mem) | ||
624 | return ERR_PTR(-ENOMEM); | ||
625 | |||
626 | iwqp = (struct i40iw_qp *)mem; | ||
627 | qp = &iwqp->sc_qp; | ||
628 | qp->back_qp = (void *)iwqp; | ||
629 | qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; | ||
630 | |||
631 | iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info; | ||
632 | |||
633 | if (i40iw_allocate_dma_mem(dev->hw, | ||
634 | &iwqp->q2_ctx_mem, | ||
635 | I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE, | ||
636 | 256)) { | ||
637 | i40iw_pr_err("dma_mem failed\n"); | ||
638 | err_code = -ENOMEM; | ||
639 | goto error; | ||
640 | } | ||
641 | |||
642 | init_info.q2 = iwqp->q2_ctx_mem.va; | ||
643 | init_info.q2_pa = iwqp->q2_ctx_mem.pa; | ||
644 | |||
645 | init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE; | ||
646 | init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE; | ||
647 | |||
648 | err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp, | ||
649 | &qp_num, &iwdev->next_qp); | ||
650 | if (err_code) { | ||
651 | i40iw_pr_err("qp resource\n"); | ||
652 | goto error; | ||
653 | } | ||
654 | |||
655 | iwqp->allocated_buffer = mem; | ||
656 | iwqp->iwdev = iwdev; | ||
657 | iwqp->iwpd = iwpd; | ||
658 | iwqp->ibqp.qp_num = qp_num; | ||
659 | qp = &iwqp->sc_qp; | ||
660 | iwqp->iwscq = to_iwcq(init_attr->send_cq); | ||
661 | iwqp->iwrcq = to_iwcq(init_attr->recv_cq); | ||
662 | |||
663 | iwqp->host_ctx.va = init_info.host_ctx; | ||
664 | iwqp->host_ctx.pa = init_info.host_ctx_pa; | ||
665 | iwqp->host_ctx.size = I40IW_QP_CTX_SIZE; | ||
666 | |||
667 | init_info.pd = &iwpd->sc_pd; | ||
668 | init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; | ||
669 | iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; | ||
670 | |||
671 | if (init_attr->qp_type != IB_QPT_RC) { | ||
672 | err_code = -ENOSYS; | ||
673 | goto error; | ||
674 | } | ||
675 | if (iwdev->push_mode) | ||
676 | i40iw_alloc_push_page(iwdev, qp); | ||
677 | if (udata) { | ||
678 | err_code = ib_copy_from_udata(&req, udata, sizeof(req)); | ||
679 | if (err_code) { | ||
680 | i40iw_pr_err("ib_copy_from_data\n"); | ||
681 | goto error; | ||
682 | } | ||
683 | iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; | ||
684 | if (ibpd->uobject && ibpd->uobject->context) { | ||
685 | iwqp->user_mode = 1; | ||
686 | ucontext = to_ucontext(ibpd->uobject->context); | ||
687 | |||
688 | if (req.user_wqe_buffers) { | ||
689 | spin_lock_irqsave( | ||
690 | &ucontext->qp_reg_mem_list_lock, flags); | ||
691 | iwqp->iwpbl = i40iw_get_pbl( | ||
692 | (unsigned long)req.user_wqe_buffers, | ||
693 | &ucontext->qp_reg_mem_list); | ||
694 | spin_unlock_irqrestore( | ||
695 | &ucontext->qp_reg_mem_list_lock, flags); | ||
696 | |||
697 | if (!iwqp->iwpbl) { | ||
698 | err_code = -ENODATA; | ||
699 | i40iw_pr_err("no pbl info\n"); | ||
700 | goto error; | ||
701 | } | ||
702 | } | ||
703 | } | ||
704 | err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); | ||
705 | } else { | ||
706 | err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info); | ||
707 | } | ||
708 | |||
709 | if (err_code) { | ||
710 | i40iw_pr_err("setup qp failed\n"); | ||
711 | goto error; | ||
712 | } | ||
713 | |||
714 | init_info.type = I40IW_QP_TYPE_IWARP; | ||
715 | ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info); | ||
716 | if (ret) { | ||
717 | err_code = -EPROTO; | ||
718 | i40iw_pr_err("qp_init fail\n"); | ||
719 | goto error; | ||
720 | } | ||
721 | ctx_info = &iwqp->ctx_info; | ||
722 | iwarp_info = &iwqp->iwarp_info; | ||
723 | iwarp_info->rd_enable = true; | ||
724 | iwarp_info->wr_rdresp_en = true; | ||
725 | if (!iwqp->user_mode) | ||
726 | iwarp_info->priv_mode_en = true; | ||
727 | iwarp_info->ddp_ver = 1; | ||
728 | iwarp_info->rdmap_ver = 1; | ||
729 | |||
730 | ctx_info->iwarp_info_valid = true; | ||
731 | ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; | ||
732 | ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; | ||
733 | if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) { | ||
734 | ctx_info->push_mode_en = false; | ||
735 | } else { | ||
736 | ctx_info->push_mode_en = true; | ||
737 | ctx_info->push_idx = qp->push_idx; | ||
738 | } | ||
739 | |||
740 | ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, | ||
741 | (u64 *)iwqp->host_ctx.va, | ||
742 | ctx_info); | ||
743 | ctx_info->iwarp_info_valid = false; | ||
744 | cqp_request = i40iw_get_cqp_request(iwcqp, true); | ||
745 | if (!cqp_request) { | ||
746 | err_code = -ENOMEM; | ||
747 | goto error; | ||
748 | } | ||
749 | cqp_info = &cqp_request->info; | ||
750 | qp_info = &cqp_request->info.in.u.qp_create.info; | ||
751 | |||
752 | memset(qp_info, 0, sizeof(*qp_info)); | ||
753 | |||
754 | qp_info->cq_num_valid = true; | ||
755 | qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE; | ||
756 | |||
757 | cqp_info->cqp_cmd = OP_QP_CREATE; | ||
758 | cqp_info->post_sq = 1; | ||
759 | cqp_info->in.u.qp_create.qp = qp; | ||
760 | cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; | ||
761 | ret = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
762 | if (ret) { | ||
763 | i40iw_pr_err("CQP-OP QP create fail"); | ||
764 | err_code = -EACCES; | ||
765 | goto error; | ||
766 | } | ||
767 | |||
768 | i40iw_add_ref(&iwqp->ibqp); | ||
769 | spin_lock_init(&iwqp->lock); | ||
770 | iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; | ||
771 | iwdev->qp_table[qp_num] = iwqp; | ||
772 | i40iw_add_pdusecount(iwqp->iwpd); | ||
773 | if (ibpd->uobject && udata) { | ||
774 | memset(&uresp, 0, sizeof(uresp)); | ||
775 | uresp.actual_sq_size = sq_size; | ||
776 | uresp.actual_rq_size = rq_size; | ||
777 | uresp.qp_id = qp_num; | ||
778 | uresp.push_idx = qp->push_idx; | ||
779 | err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
780 | if (err_code) { | ||
781 | i40iw_pr_err("copy_to_udata failed\n"); | ||
782 | i40iw_destroy_qp(&iwqp->ibqp); | ||
783 | /* let the completion of the qp destroy free the qp */ | ||
784 | return ERR_PTR(err_code); | ||
785 | } | ||
786 | } | ||
787 | |||
788 | return &iwqp->ibqp; | ||
789 | error: | ||
790 | i40iw_free_qp_resources(iwdev, iwqp, qp_num); | ||
791 | kfree(mem); | ||
792 | return ERR_PTR(err_code); | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * i40iw_query - query qp attributes | ||
797 | * @ibqp: qp pointer | ||
798 | * @attr: attributes pointer | ||
799 | * @attr_mask: Not used | ||
800 | * @init_attr: qp attributes to return | ||
801 | */ | ||
802 | static int i40iw_query_qp(struct ib_qp *ibqp, | ||
803 | struct ib_qp_attr *attr, | ||
804 | int attr_mask, | ||
805 | struct ib_qp_init_attr *init_attr) | ||
806 | { | ||
807 | struct i40iw_qp *iwqp = to_iwqp(ibqp); | ||
808 | struct i40iw_sc_qp *qp = &iwqp->sc_qp; | ||
809 | |||
810 | attr->qp_access_flags = 0; | ||
811 | attr->cap.max_send_wr = qp->qp_uk.sq_size; | ||
812 | attr->cap.max_recv_wr = qp->qp_uk.rq_size; | ||
813 | attr->cap.max_recv_sge = 1; | ||
814 | attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; | ||
815 | init_attr->event_handler = iwqp->ibqp.event_handler; | ||
816 | init_attr->qp_context = iwqp->ibqp.qp_context; | ||
817 | init_attr->send_cq = iwqp->ibqp.send_cq; | ||
818 | init_attr->recv_cq = iwqp->ibqp.recv_cq; | ||
819 | init_attr->srq = iwqp->ibqp.srq; | ||
820 | init_attr->cap = attr->cap; | ||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * i40iw_hw_modify_qp - setup cqp for modify qp | ||
826 | * @iwdev: iwarp device | ||
827 | * @iwqp: qp ptr (user or kernel) | ||
828 | * @info: info for modify qp | ||
829 | * @wait: flag to wait or not for modify qp completion | ||
830 | */ | ||
831 | void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, | ||
832 | struct i40iw_modify_qp_info *info, bool wait) | ||
833 | { | ||
834 | enum i40iw_status_code status; | ||
835 | struct i40iw_cqp_request *cqp_request; | ||
836 | struct cqp_commands_info *cqp_info; | ||
837 | struct i40iw_modify_qp_info *m_info; | ||
838 | |||
839 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); | ||
840 | if (!cqp_request) | ||
841 | return; | ||
842 | |||
843 | cqp_info = &cqp_request->info; | ||
844 | m_info = &cqp_info->in.u.qp_modify.info; | ||
845 | memcpy(m_info, info, sizeof(*m_info)); | ||
846 | cqp_info->cqp_cmd = OP_QP_MODIFY; | ||
847 | cqp_info->post_sq = 1; | ||
848 | cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; | ||
849 | cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; | ||
850 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
851 | if (status) | ||
852 | i40iw_pr_err("CQP-OP Modify QP fail"); | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * i40iw_modify_qp - modify qp request | ||
857 | * @ibqp: qp's pointer for modify | ||
858 | * @attr: access attributes | ||
859 | * @attr_mask: state mask | ||
860 | * @udata: user data | ||
861 | */ | ||
862 | int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
863 | int attr_mask, struct ib_udata *udata) | ||
864 | { | ||
865 | struct i40iw_qp *iwqp = to_iwqp(ibqp); | ||
866 | struct i40iw_device *iwdev = iwqp->iwdev; | ||
867 | struct i40iw_qp_host_ctx_info *ctx_info; | ||
868 | struct i40iwarp_offload_info *iwarp_info; | ||
869 | struct i40iw_modify_qp_info info; | ||
870 | u8 issue_modify_qp = 0; | ||
871 | u8 dont_wait = 0; | ||
872 | u32 err; | ||
873 | unsigned long flags; | ||
874 | |||
875 | memset(&info, 0, sizeof(info)); | ||
876 | ctx_info = &iwqp->ctx_info; | ||
877 | iwarp_info = &iwqp->iwarp_info; | ||
878 | |||
879 | spin_lock_irqsave(&iwqp->lock, flags); | ||
880 | |||
881 | if (attr_mask & IB_QP_STATE) { | ||
882 | switch (attr->qp_state) { | ||
883 | case IB_QPS_INIT: | ||
884 | case IB_QPS_RTR: | ||
885 | if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) { | ||
886 | err = -EINVAL; | ||
887 | goto exit; | ||
888 | } | ||
889 | if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) { | ||
890 | info.next_iwarp_state = I40IW_QP_STATE_IDLE; | ||
891 | issue_modify_qp = 1; | ||
892 | } | ||
893 | break; | ||
894 | case IB_QPS_RTS: | ||
895 | if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) || | ||
896 | (!iwqp->cm_id)) { | ||
897 | err = -EINVAL; | ||
898 | goto exit; | ||
899 | } | ||
900 | |||
901 | issue_modify_qp = 1; | ||
902 | iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED; | ||
903 | iwqp->hte_added = 1; | ||
904 | info.next_iwarp_state = I40IW_QP_STATE_RTS; | ||
905 | info.tcp_ctx_valid = true; | ||
906 | info.ord_valid = true; | ||
907 | info.arp_cache_idx_valid = true; | ||
908 | info.cq_num_valid = true; | ||
909 | break; | ||
910 | case IB_QPS_SQD: | ||
911 | if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) { | ||
912 | err = 0; | ||
913 | goto exit; | ||
914 | } | ||
915 | if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) || | ||
916 | (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) { | ||
917 | err = 0; | ||
918 | goto exit; | ||
919 | } | ||
920 | if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) { | ||
921 | err = -EINVAL; | ||
922 | goto exit; | ||
923 | } | ||
924 | info.next_iwarp_state = I40IW_QP_STATE_CLOSING; | ||
925 | issue_modify_qp = 1; | ||
926 | break; | ||
927 | case IB_QPS_SQE: | ||
928 | if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) { | ||
929 | err = -EINVAL; | ||
930 | goto exit; | ||
931 | } | ||
932 | info.next_iwarp_state = I40IW_QP_STATE_TERMINATE; | ||
933 | issue_modify_qp = 1; | ||
934 | break; | ||
935 | case IB_QPS_ERR: | ||
936 | case IB_QPS_RESET: | ||
937 | if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) { | ||
938 | err = -EINVAL; | ||
939 | goto exit; | ||
940 | } | ||
941 | if (iwqp->sc_qp.term_flags) | ||
942 | del_timer(&iwqp->terminate_timer); | ||
943 | info.next_iwarp_state = I40IW_QP_STATE_ERROR; | ||
944 | if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) && | ||
945 | iwdev->iw_status && | ||
946 | (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT)) | ||
947 | info.reset_tcp_conn = true; | ||
948 | else | ||
949 | dont_wait = 1; | ||
950 | issue_modify_qp = 1; | ||
951 | info.next_iwarp_state = I40IW_QP_STATE_ERROR; | ||
952 | break; | ||
953 | default: | ||
954 | err = -EINVAL; | ||
955 | goto exit; | ||
956 | } | ||
957 | |||
958 | iwqp->ibqp_state = attr->qp_state; | ||
959 | |||
960 | if (issue_modify_qp) | ||
961 | iwqp->iwarp_state = info.next_iwarp_state; | ||
962 | else | ||
963 | info.next_iwarp_state = iwqp->iwarp_state; | ||
964 | } | ||
965 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | ||
966 | ctx_info->iwarp_info_valid = true; | ||
967 | if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) | ||
968 | iwarp_info->wr_rdresp_en = true; | ||
969 | if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) | ||
970 | iwarp_info->wr_rdresp_en = true; | ||
971 | if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) | ||
972 | iwarp_info->rd_enable = true; | ||
973 | if (attr->qp_access_flags & IB_ACCESS_MW_BIND) | ||
974 | iwarp_info->bind_en = true; | ||
975 | |||
976 | if (iwqp->user_mode) { | ||
977 | iwarp_info->rd_enable = true; | ||
978 | iwarp_info->wr_rdresp_en = true; | ||
979 | iwarp_info->priv_mode_en = false; | ||
980 | } | ||
981 | } | ||
982 | |||
983 | if (ctx_info->iwarp_info_valid) { | ||
984 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
985 | int ret; | ||
986 | |||
987 | ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; | ||
988 | ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; | ||
989 | ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, | ||
990 | (u64 *)iwqp->host_ctx.va, | ||
991 | ctx_info); | ||
992 | if (ret) { | ||
993 | i40iw_pr_err("setting QP context\n"); | ||
994 | err = -EINVAL; | ||
995 | goto exit; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
1000 | |||
1001 | if (issue_modify_qp) | ||
1002 | i40iw_hw_modify_qp(iwdev, iwqp, &info, true); | ||
1003 | |||
1004 | if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) { | ||
1005 | if (dont_wait) { | ||
1006 | if (iwqp->cm_id && iwqp->hw_tcp_state) { | ||
1007 | spin_lock_irqsave(&iwqp->lock, flags); | ||
1008 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; | ||
1009 | iwqp->last_aeq = I40IW_AE_RESET_SENT; | ||
1010 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
1011 | } | ||
1012 | } | ||
1013 | } | ||
1014 | return 0; | ||
1015 | exit: | ||
1016 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
1017 | return err; | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * cq_free_resources - free up recources for cq | ||
1022 | * @iwdev: iwarp device | ||
1023 | * @iwcq: cq ptr | ||
1024 | */ | ||
1025 | static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq) | ||
1026 | { | ||
1027 | struct i40iw_sc_cq *cq = &iwcq->sc_cq; | ||
1028 | |||
1029 | if (!iwcq->user_mode) | ||
1030 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem); | ||
1031 | i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id); | ||
1032 | } | ||
1033 | |||
1034 | /** | ||
1035 | * cq_wq_destroy - send cq destroy cqp | ||
1036 | * @iwdev: iwarp device | ||
1037 | * @cq: hardware control cq | ||
1038 | */ | ||
1039 | static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) | ||
1040 | { | ||
1041 | enum i40iw_status_code status; | ||
1042 | struct i40iw_cqp_request *cqp_request; | ||
1043 | struct cqp_commands_info *cqp_info; | ||
1044 | |||
1045 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); | ||
1046 | if (!cqp_request) | ||
1047 | return; | ||
1048 | |||
1049 | cqp_info = &cqp_request->info; | ||
1050 | |||
1051 | cqp_info->cqp_cmd = OP_CQ_DESTROY; | ||
1052 | cqp_info->post_sq = 1; | ||
1053 | cqp_info->in.u.cq_destroy.cq = cq; | ||
1054 | cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; | ||
1055 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
1056 | if (status) | ||
1057 | i40iw_pr_err("CQP-OP Destroy QP fail"); | ||
1058 | } | ||
1059 | |||
1060 | /** | ||
1061 | * i40iw_destroy_cq - destroy cq | ||
1062 | * @ib_cq: cq pointer | ||
1063 | */ | ||
1064 | static int i40iw_destroy_cq(struct ib_cq *ib_cq) | ||
1065 | { | ||
1066 | struct i40iw_cq *iwcq; | ||
1067 | struct i40iw_device *iwdev; | ||
1068 | struct i40iw_sc_cq *cq; | ||
1069 | |||
1070 | if (!ib_cq) { | ||
1071 | i40iw_pr_err("ib_cq == NULL\n"); | ||
1072 | return 0; | ||
1073 | } | ||
1074 | |||
1075 | iwcq = to_iwcq(ib_cq); | ||
1076 | iwdev = to_iwdev(ib_cq->device); | ||
1077 | cq = &iwcq->sc_cq; | ||
1078 | cq_wq_destroy(iwdev, cq); | ||
1079 | cq_free_resources(iwdev, iwcq); | ||
1080 | kfree(iwcq); | ||
1081 | return 0; | ||
1082 | } | ||
1083 | |||
1084 | /** | ||
1085 | * i40iw_create_cq - create cq | ||
1086 | * @ibdev: device pointer from stack | ||
1087 | * @attr: attributes for cq | ||
1088 | * @context: user context created during alloc | ||
1089 | * @udata: user data | ||
1090 | */ | ||
1091 | static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, | ||
1092 | const struct ib_cq_init_attr *attr, | ||
1093 | struct ib_ucontext *context, | ||
1094 | struct ib_udata *udata) | ||
1095 | { | ||
1096 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
1097 | struct i40iw_cq *iwcq; | ||
1098 | struct i40iw_pbl *iwpbl; | ||
1099 | u32 cq_num = 0; | ||
1100 | struct i40iw_sc_cq *cq; | ||
1101 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
1102 | struct i40iw_cq_init_info info; | ||
1103 | enum i40iw_status_code status; | ||
1104 | struct i40iw_cqp_request *cqp_request; | ||
1105 | struct cqp_commands_info *cqp_info; | ||
1106 | struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; | ||
1107 | unsigned long flags; | ||
1108 | int err_code; | ||
1109 | int entries = attr->cqe; | ||
1110 | |||
1111 | if (entries > iwdev->max_cqe) | ||
1112 | return ERR_PTR(-EINVAL); | ||
1113 | |||
1114 | iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); | ||
1115 | if (!iwcq) | ||
1116 | return ERR_PTR(-ENOMEM); | ||
1117 | |||
1118 | memset(&info, 0, sizeof(info)); | ||
1119 | |||
1120 | err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs, | ||
1121 | iwdev->max_cq, &cq_num, | ||
1122 | &iwdev->next_cq); | ||
1123 | if (err_code) | ||
1124 | goto error; | ||
1125 | |||
1126 | cq = &iwcq->sc_cq; | ||
1127 | cq->back_cq = (void *)iwcq; | ||
1128 | spin_lock_init(&iwcq->lock); | ||
1129 | |||
1130 | info.dev = dev; | ||
1131 | ukinfo->cq_size = max(entries, 4); | ||
1132 | ukinfo->cq_id = cq_num; | ||
1133 | iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; | ||
1134 | info.ceqe_mask = 0; | ||
1135 | info.ceq_id = 0; | ||
1136 | info.ceq_id_valid = true; | ||
1137 | info.ceqe_mask = 1; | ||
1138 | info.type = I40IW_CQ_TYPE_IWARP; | ||
1139 | if (context) { | ||
1140 | struct i40iw_ucontext *ucontext; | ||
1141 | struct i40iw_create_cq_req req; | ||
1142 | struct i40iw_cq_mr *cqmr; | ||
1143 | |||
1144 | memset(&req, 0, sizeof(req)); | ||
1145 | iwcq->user_mode = true; | ||
1146 | ucontext = to_ucontext(context); | ||
1147 | if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) | ||
1148 | goto cq_free_resources; | ||
1149 | |||
1150 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | ||
1151 | iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, | ||
1152 | &ucontext->cq_reg_mem_list); | ||
1153 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | ||
1154 | if (!iwpbl) { | ||
1155 | err_code = -EPROTO; | ||
1156 | goto cq_free_resources; | ||
1157 | } | ||
1158 | |||
1159 | iwcq->iwpbl = iwpbl; | ||
1160 | iwcq->cq_mem_size = 0; | ||
1161 | cqmr = &iwpbl->cq_mr; | ||
1162 | info.shadow_area_pa = cpu_to_le64(cqmr->shadow); | ||
1163 | if (iwpbl->pbl_allocated) { | ||
1164 | info.virtual_map = true; | ||
1165 | info.pbl_chunk_size = 1; | ||
1166 | info.first_pm_pbl_idx = cqmr->cq_pbl.idx; | ||
1167 | } else { | ||
1168 | info.cq_base_pa = cqmr->cq_pbl.addr; | ||
1169 | } | ||
1170 | } else { | ||
1171 | /* Kmode allocations */ | ||
1172 | int rsize; | ||
1173 | int shadow; | ||
1174 | |||
1175 | rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe); | ||
1176 | rsize = round_up(rsize, 256); | ||
1177 | shadow = I40IW_SHADOW_AREA_SIZE << 3; | ||
1178 | status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem, | ||
1179 | rsize + shadow, 256); | ||
1180 | if (status) { | ||
1181 | err_code = -ENOMEM; | ||
1182 | goto cq_free_resources; | ||
1183 | } | ||
1184 | ukinfo->cq_base = iwcq->kmem.va; | ||
1185 | info.cq_base_pa = iwcq->kmem.pa; | ||
1186 | info.shadow_area_pa = info.cq_base_pa + rsize; | ||
1187 | ukinfo->shadow_area = iwcq->kmem.va + rsize; | ||
1188 | } | ||
1189 | |||
1190 | if (dev->iw_priv_cq_ops->cq_init(cq, &info)) { | ||
1191 | i40iw_pr_err("init cq fail\n"); | ||
1192 | err_code = -EPROTO; | ||
1193 | goto cq_free_resources; | ||
1194 | } | ||
1195 | |||
1196 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); | ||
1197 | if (!cqp_request) { | ||
1198 | err_code = -ENOMEM; | ||
1199 | goto cq_free_resources; | ||
1200 | } | ||
1201 | |||
1202 | cqp_info = &cqp_request->info; | ||
1203 | cqp_info->cqp_cmd = OP_CQ_CREATE; | ||
1204 | cqp_info->post_sq = 1; | ||
1205 | cqp_info->in.u.cq_create.cq = cq; | ||
1206 | cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; | ||
1207 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
1208 | if (status) { | ||
1209 | i40iw_pr_err("CQP-OP Create QP fail"); | ||
1210 | err_code = -EPROTO; | ||
1211 | goto cq_free_resources; | ||
1212 | } | ||
1213 | |||
1214 | if (context) { | ||
1215 | struct i40iw_create_cq_resp resp; | ||
1216 | |||
1217 | memset(&resp, 0, sizeof(resp)); | ||
1218 | resp.cq_id = info.cq_uk_init_info.cq_id; | ||
1219 | resp.cq_size = info.cq_uk_init_info.cq_size; | ||
1220 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | ||
1221 | i40iw_pr_err("copy to user data\n"); | ||
1222 | err_code = -EPROTO; | ||
1223 | goto cq_destroy; | ||
1224 | } | ||
1225 | } | ||
1226 | |||
1227 | return (struct ib_cq *)iwcq; | ||
1228 | |||
1229 | cq_destroy: | ||
1230 | cq_wq_destroy(iwdev, cq); | ||
1231 | cq_free_resources: | ||
1232 | cq_free_resources(iwdev, iwcq); | ||
1233 | error: | ||
1234 | kfree(iwcq); | ||
1235 | return ERR_PTR(err_code); | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * i40iw_get_user_access - get hw access from IB access | ||
1240 | * @acc: IB access to return hw access | ||
1241 | */ | ||
1242 | static inline u16 i40iw_get_user_access(int acc) | ||
1243 | { | ||
1244 | u16 access = 0; | ||
1245 | |||
1246 | access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0; | ||
1247 | access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0; | ||
1248 | access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0; | ||
1249 | access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0; | ||
1250 | return access; | ||
1251 | } | ||
1252 | |||
1253 | /** | ||
1254 | * i40iw_free_stag - free stag resource | ||
1255 | * @iwdev: iwarp device | ||
1256 | * @stag: stag to free | ||
1257 | */ | ||
1258 | static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag) | ||
1259 | { | ||
1260 | u32 stag_idx; | ||
1261 | |||
1262 | stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT; | ||
1263 | i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx); | ||
1264 | } | ||
1265 | |||
1266 | /** | ||
1267 | * i40iw_create_stag - create random stag | ||
1268 | * @iwdev: iwarp device | ||
1269 | */ | ||
1270 | static u32 i40iw_create_stag(struct i40iw_device *iwdev) | ||
1271 | { | ||
1272 | u32 stag = 0; | ||
1273 | u32 stag_index = 0; | ||
1274 | u32 next_stag_index; | ||
1275 | u32 driver_key; | ||
1276 | u32 random; | ||
1277 | u8 consumer_key; | ||
1278 | int ret; | ||
1279 | |||
1280 | get_random_bytes(&random, sizeof(random)); | ||
1281 | consumer_key = (u8)random; | ||
1282 | |||
1283 | driver_key = random & ~iwdev->mr_stagmask; | ||
1284 | next_stag_index = (random & iwdev->mr_stagmask) >> 8; | ||
1285 | next_stag_index %= iwdev->max_mr; | ||
1286 | |||
1287 | ret = i40iw_alloc_resource(iwdev, | ||
1288 | iwdev->allocated_mrs, iwdev->max_mr, | ||
1289 | &stag_index, &next_stag_index); | ||
1290 | if (!ret) { | ||
1291 | stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT; | ||
1292 | stag |= driver_key; | ||
1293 | stag += (u32)consumer_key; | ||
1294 | } | ||
1295 | return stag; | ||
1296 | } | ||
1297 | |||
1298 | /** | ||
1299 | * i40iw_next_pbl_addr - Get next pbl address | ||
1300 | * @palloc: Poiner to allocated pbles | ||
1301 | * @pbl: pointer to a pble | ||
1302 | * @pinfo: info pointer | ||
1303 | * @idx: index | ||
1304 | */ | ||
1305 | static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc, | ||
1306 | u64 *pbl, | ||
1307 | struct i40iw_pble_info **pinfo, | ||
1308 | u32 *idx) | ||
1309 | { | ||
1310 | *idx += 1; | ||
1311 | if ((!(*pinfo)) || (*idx != (*pinfo)->cnt)) | ||
1312 | return ++pbl; | ||
1313 | *idx = 0; | ||
1314 | (*pinfo)++; | ||
1315 | return (u64 *)(*pinfo)->addr; | ||
1316 | } | ||
1317 | |||
1318 | /** | ||
1319 | * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally | ||
1320 | * @iwmr: iwmr for IB's user page addresses | ||
1321 | * @pbl: ple pointer to save 1 level or 0 level pble | ||
1322 | * @level: indicated level 0, 1 or 2 | ||
1323 | */ | ||
1324 | static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, | ||
1325 | u64 *pbl, | ||
1326 | enum i40iw_pble_level level) | ||
1327 | { | ||
1328 | struct ib_umem *region = iwmr->region; | ||
1329 | struct i40iw_pbl *iwpbl = &iwmr->iwpbl; | ||
1330 | int chunk_pages, entry, pg_shift, i; | ||
1331 | struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; | ||
1332 | struct i40iw_pble_info *pinfo; | ||
1333 | struct scatterlist *sg; | ||
1334 | u32 idx = 0; | ||
1335 | |||
1336 | pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf; | ||
1337 | pg_shift = ffs(region->page_size) - 1; | ||
1338 | for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { | ||
1339 | chunk_pages = sg_dma_len(sg) >> pg_shift; | ||
1340 | if ((iwmr->type == IW_MEMREG_TYPE_QP) && | ||
1341 | !iwpbl->qp_mr.sq_page) | ||
1342 | iwpbl->qp_mr.sq_page = sg_page(sg); | ||
1343 | for (i = 0; i < chunk_pages; i++) { | ||
1344 | *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i); | ||
1345 | pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx); | ||
1346 | } | ||
1347 | } | ||
1348 | } | ||
1349 | |||
1350 | /** | ||
1351 | * i40iw_setup_pbles - copy user pg address to pble's | ||
1352 | * @iwdev: iwarp device | ||
1353 | * @iwmr: mr pointer for this memory registration | ||
1354 | * @use_pbles: flag if to use pble's or memory (level 0) | ||
1355 | */ | ||
1356 | static int i40iw_setup_pbles(struct i40iw_device *iwdev, | ||
1357 | struct i40iw_mr *iwmr, | ||
1358 | bool use_pbles) | ||
1359 | { | ||
1360 | struct i40iw_pbl *iwpbl = &iwmr->iwpbl; | ||
1361 | struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; | ||
1362 | struct i40iw_pble_info *pinfo; | ||
1363 | u64 *pbl; | ||
1364 | enum i40iw_status_code status; | ||
1365 | enum i40iw_pble_level level = I40IW_LEVEL_1; | ||
1366 | |||
1367 | if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS)) | ||
1368 | return -ENOMEM; | ||
1369 | |||
1370 | if (use_pbles) { | ||
1371 | mutex_lock(&iwdev->pbl_mutex); | ||
1372 | status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); | ||
1373 | mutex_unlock(&iwdev->pbl_mutex); | ||
1374 | if (status) | ||
1375 | return -ENOMEM; | ||
1376 | |||
1377 | iwpbl->pbl_allocated = true; | ||
1378 | level = palloc->level; | ||
1379 | pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf; | ||
1380 | pbl = (u64 *)pinfo->addr; | ||
1381 | } else { | ||
1382 | pbl = iwmr->pgaddrmem; | ||
1383 | } | ||
1384 | |||
1385 | i40iw_copy_user_pgaddrs(iwmr, pbl, level); | ||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | /** | ||
1390 | * i40iw_handle_q_mem - handle memory for qp and cq | ||
1391 | * @iwdev: iwarp device | ||
1392 | * @req: information for q memory management | ||
1393 | * @iwpbl: pble struct | ||
1394 | * @use_pbles: flag to use pble | ||
1395 | */ | ||
1396 | static int i40iw_handle_q_mem(struct i40iw_device *iwdev, | ||
1397 | struct i40iw_mem_reg_req *req, | ||
1398 | struct i40iw_pbl *iwpbl, | ||
1399 | bool use_pbles) | ||
1400 | { | ||
1401 | struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; | ||
1402 | struct i40iw_mr *iwmr = iwpbl->iwmr; | ||
1403 | struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; | ||
1404 | struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr; | ||
1405 | struct i40iw_hmc_pble *hmc_p; | ||
1406 | u64 *arr = iwmr->pgaddrmem; | ||
1407 | int err; | ||
1408 | int total; | ||
1409 | |||
1410 | total = req->sq_pages + req->rq_pages + req->cq_pages; | ||
1411 | |||
1412 | err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); | ||
1413 | if (err) | ||
1414 | return err; | ||
1415 | if (use_pbles && (palloc->level != I40IW_LEVEL_1)) { | ||
1416 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | ||
1417 | iwpbl->pbl_allocated = false; | ||
1418 | return -ENOMEM; | ||
1419 | } | ||
1420 | |||
1421 | if (use_pbles) | ||
1422 | arr = (u64 *)palloc->level1.addr; | ||
1423 | if (req->reg_type == IW_MEMREG_TYPE_QP) { | ||
1424 | hmc_p = &qpmr->sq_pbl; | ||
1425 | qpmr->shadow = (dma_addr_t)arr[total]; | ||
1426 | if (use_pbles) { | ||
1427 | hmc_p->idx = palloc->level1.idx; | ||
1428 | hmc_p = &qpmr->rq_pbl; | ||
1429 | hmc_p->idx = palloc->level1.idx + req->sq_pages; | ||
1430 | } else { | ||
1431 | hmc_p->addr = arr[0]; | ||
1432 | hmc_p = &qpmr->rq_pbl; | ||
1433 | hmc_p->addr = arr[1]; | ||
1434 | } | ||
1435 | } else { /* CQ */ | ||
1436 | hmc_p = &cqmr->cq_pbl; | ||
1437 | cqmr->shadow = (dma_addr_t)arr[total]; | ||
1438 | if (use_pbles) | ||
1439 | hmc_p->idx = palloc->level1.idx; | ||
1440 | else | ||
1441 | hmc_p->addr = arr[0]; | ||
1442 | } | ||
1443 | return err; | ||
1444 | } | ||
1445 | |||
1446 | /** | ||
1447 | * i40iw_hwreg_mr - send cqp command for memory registration | ||
1448 | * @iwdev: iwarp device | ||
1449 | * @iwmr: iwarp mr pointer | ||
1450 | * @access: access for MR | ||
1451 | */ | ||
1452 | static int i40iw_hwreg_mr(struct i40iw_device *iwdev, | ||
1453 | struct i40iw_mr *iwmr, | ||
1454 | u16 access) | ||
1455 | { | ||
1456 | struct i40iw_pbl *iwpbl = &iwmr->iwpbl; | ||
1457 | struct i40iw_reg_ns_stag_info *stag_info; | ||
1458 | struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); | ||
1459 | struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; | ||
1460 | enum i40iw_status_code status; | ||
1461 | int err = 0; | ||
1462 | struct i40iw_cqp_request *cqp_request; | ||
1463 | struct cqp_commands_info *cqp_info; | ||
1464 | |||
1465 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); | ||
1466 | if (!cqp_request) | ||
1467 | return -ENOMEM; | ||
1468 | |||
1469 | cqp_info = &cqp_request->info; | ||
1470 | stag_info = &cqp_info->in.u.mr_reg_non_shared.info; | ||
1471 | memset(stag_info, 0, sizeof(*stag_info)); | ||
1472 | stag_info->va = (void *)(unsigned long)iwpbl->user_base; | ||
1473 | stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; | ||
1474 | stag_info->stag_key = (u8)iwmr->stag; | ||
1475 | stag_info->total_len = iwmr->length; | ||
1476 | stag_info->access_rights = access; | ||
1477 | stag_info->pd_id = iwpd->sc_pd.pd_id; | ||
1478 | stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; | ||
1479 | |||
1480 | if (iwmr->page_cnt > 1) { | ||
1481 | if (palloc->level == I40IW_LEVEL_1) { | ||
1482 | stag_info->first_pm_pbl_index = palloc->level1.idx; | ||
1483 | stag_info->chunk_size = 1; | ||
1484 | } else { | ||
1485 | stag_info->first_pm_pbl_index = palloc->level2.root.idx; | ||
1486 | stag_info->chunk_size = 3; | ||
1487 | } | ||
1488 | } else { | ||
1489 | stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; | ||
1490 | } | ||
1491 | |||
1492 | cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED; | ||
1493 | cqp_info->post_sq = 1; | ||
1494 | cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev; | ||
1495 | cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; | ||
1496 | |||
1497 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
1498 | if (status) { | ||
1499 | err = -ENOMEM; | ||
1500 | i40iw_pr_err("CQP-OP MR Reg fail"); | ||
1501 | } | ||
1502 | return err; | ||
1503 | } | ||
1504 | |||
1505 | /** | ||
1506 | * i40iw_reg_user_mr - Register a user memory region | ||
1507 | * @pd: ptr of pd | ||
1508 | * @start: virtual start address | ||
1509 | * @length: length of mr | ||
1510 | * @virt: virtual address | ||
1511 | * @acc: access of mr | ||
1512 | * @udata: user data | ||
1513 | */ | ||
1514 | static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, | ||
1515 | u64 start, | ||
1516 | u64 length, | ||
1517 | u64 virt, | ||
1518 | int acc, | ||
1519 | struct ib_udata *udata) | ||
1520 | { | ||
1521 | struct i40iw_pd *iwpd = to_iwpd(pd); | ||
1522 | struct i40iw_device *iwdev = to_iwdev(pd->device); | ||
1523 | struct i40iw_ucontext *ucontext; | ||
1524 | struct i40iw_pble_alloc *palloc; | ||
1525 | struct i40iw_pbl *iwpbl; | ||
1526 | struct i40iw_mr *iwmr; | ||
1527 | struct ib_umem *region; | ||
1528 | struct i40iw_mem_reg_req req; | ||
1529 | u32 pbl_depth = 0; | ||
1530 | u32 stag = 0; | ||
1531 | u16 access; | ||
1532 | u32 region_length; | ||
1533 | bool use_pbles = false; | ||
1534 | unsigned long flags; | ||
1535 | int err = -ENOSYS; | ||
1536 | |||
1537 | region = ib_umem_get(pd->uobject->context, start, length, acc, 0); | ||
1538 | if (IS_ERR(region)) | ||
1539 | return (struct ib_mr *)region; | ||
1540 | |||
1541 | if (ib_copy_from_udata(&req, udata, sizeof(req))) { | ||
1542 | ib_umem_release(region); | ||
1543 | return ERR_PTR(-EFAULT); | ||
1544 | } | ||
1545 | |||
1546 | iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); | ||
1547 | if (!iwmr) { | ||
1548 | ib_umem_release(region); | ||
1549 | return ERR_PTR(-ENOMEM); | ||
1550 | } | ||
1551 | |||
1552 | iwpbl = &iwmr->iwpbl; | ||
1553 | iwpbl->iwmr = iwmr; | ||
1554 | iwmr->region = region; | ||
1555 | iwmr->ibmr.pd = pd; | ||
1556 | iwmr->ibmr.device = pd->device; | ||
1557 | ucontext = to_ucontext(pd->uobject->context); | ||
1558 | region_length = region->length + (start & 0xfff); | ||
1559 | pbl_depth = region_length >> 12; | ||
1560 | pbl_depth += (region_length & (4096 - 1)) ? 1 : 0; | ||
1561 | iwmr->length = region->length; | ||
1562 | |||
1563 | iwpbl->user_base = virt; | ||
1564 | palloc = &iwpbl->pble_alloc; | ||
1565 | |||
1566 | iwmr->type = req.reg_type; | ||
1567 | iwmr->page_cnt = pbl_depth; | ||
1568 | |||
1569 | switch (req.reg_type) { | ||
1570 | case IW_MEMREG_TYPE_QP: | ||
1571 | use_pbles = ((req.sq_pages + req.rq_pages) > 2); | ||
1572 | err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); | ||
1573 | if (err) | ||
1574 | goto error; | ||
1575 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); | ||
1576 | list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); | ||
1577 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | ||
1578 | break; | ||
1579 | case IW_MEMREG_TYPE_CQ: | ||
1580 | use_pbles = (req.cq_pages > 1); | ||
1581 | err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); | ||
1582 | if (err) | ||
1583 | goto error; | ||
1584 | |||
1585 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | ||
1586 | list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); | ||
1587 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | ||
1588 | break; | ||
1589 | case IW_MEMREG_TYPE_MEM: | ||
1590 | access = I40IW_ACCESS_FLAGS_LOCALREAD; | ||
1591 | |||
1592 | use_pbles = (iwmr->page_cnt != 1); | ||
1593 | err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); | ||
1594 | if (err) | ||
1595 | goto error; | ||
1596 | |||
1597 | access |= i40iw_get_user_access(acc); | ||
1598 | stag = i40iw_create_stag(iwdev); | ||
1599 | if (!stag) { | ||
1600 | err = -ENOMEM; | ||
1601 | goto error; | ||
1602 | } | ||
1603 | |||
1604 | iwmr->stag = stag; | ||
1605 | iwmr->ibmr.rkey = stag; | ||
1606 | iwmr->ibmr.lkey = stag; | ||
1607 | |||
1608 | err = i40iw_hwreg_mr(iwdev, iwmr, access); | ||
1609 | if (err) { | ||
1610 | i40iw_free_stag(iwdev, stag); | ||
1611 | goto error; | ||
1612 | } | ||
1613 | break; | ||
1614 | default: | ||
1615 | goto error; | ||
1616 | } | ||
1617 | |||
1618 | iwmr->type = req.reg_type; | ||
1619 | if (req.reg_type == IW_MEMREG_TYPE_MEM) | ||
1620 | i40iw_add_pdusecount(iwpd); | ||
1621 | return &iwmr->ibmr; | ||
1622 | |||
1623 | error: | ||
1624 | if (palloc->level != I40IW_LEVEL_0) | ||
1625 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | ||
1626 | ib_umem_release(region); | ||
1627 | kfree(iwmr); | ||
1628 | return ERR_PTR(err); | ||
1629 | } | ||
1630 | |||
1631 | /** | ||
1632 | * i40iw_reg_phys_mr - register kernel physical memory | ||
1633 | * @pd: ibpd pointer | ||
1634 | * @addr: physical address of memory to register | ||
1635 | * @size: size of memory to register | ||
1636 | * @acc: Access rights | ||
1637 | * @iova_start: start of virtual address for physical buffers | ||
1638 | */ | ||
1639 | struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd, | ||
1640 | u64 addr, | ||
1641 | u64 size, | ||
1642 | int acc, | ||
1643 | u64 *iova_start) | ||
1644 | { | ||
1645 | struct i40iw_pd *iwpd = to_iwpd(pd); | ||
1646 | struct i40iw_device *iwdev = to_iwdev(pd->device); | ||
1647 | struct i40iw_pbl *iwpbl; | ||
1648 | struct i40iw_mr *iwmr; | ||
1649 | enum i40iw_status_code status; | ||
1650 | u32 stag; | ||
1651 | u16 access = I40IW_ACCESS_FLAGS_LOCALREAD; | ||
1652 | int ret; | ||
1653 | |||
1654 | iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); | ||
1655 | if (!iwmr) | ||
1656 | return ERR_PTR(-ENOMEM); | ||
1657 | iwmr->ibmr.pd = pd; | ||
1658 | iwmr->ibmr.device = pd->device; | ||
1659 | iwpbl = &iwmr->iwpbl; | ||
1660 | iwpbl->iwmr = iwmr; | ||
1661 | iwmr->type = IW_MEMREG_TYPE_MEM; | ||
1662 | iwpbl->user_base = *iova_start; | ||
1663 | stag = i40iw_create_stag(iwdev); | ||
1664 | if (!stag) { | ||
1665 | ret = -EOVERFLOW; | ||
1666 | goto err; | ||
1667 | } | ||
1668 | access |= i40iw_get_user_access(acc); | ||
1669 | iwmr->stag = stag; | ||
1670 | iwmr->ibmr.rkey = stag; | ||
1671 | iwmr->ibmr.lkey = stag; | ||
1672 | iwmr->page_cnt = 1; | ||
1673 | iwmr->pgaddrmem[0] = addr; | ||
1674 | status = i40iw_hwreg_mr(iwdev, iwmr, access); | ||
1675 | if (status) { | ||
1676 | i40iw_free_stag(iwdev, stag); | ||
1677 | ret = -ENOMEM; | ||
1678 | goto err; | ||
1679 | } | ||
1680 | |||
1681 | i40iw_add_pdusecount(iwpd); | ||
1682 | return &iwmr->ibmr; | ||
1683 | err: | ||
1684 | kfree(iwmr); | ||
1685 | return ERR_PTR(ret); | ||
1686 | } | ||
1687 | |||
1688 | /** | ||
1689 | * i40iw_get_dma_mr - register physical mem | ||
1690 | * @pd: ptr of pd | ||
1691 | * @acc: access for memory | ||
1692 | */ | ||
1693 | static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc) | ||
1694 | { | ||
1695 | u64 kva = 0; | ||
1696 | |||
1697 | return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva); | ||
1698 | } | ||
1699 | |||
1700 | /** | ||
1701 | * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP | ||
1702 | * @iwmr: iwmr for IB's user page addresses | ||
1703 | * @ucontext: ptr to user context | ||
1704 | */ | ||
1705 | static void i40iw_del_memlist(struct i40iw_mr *iwmr, | ||
1706 | struct i40iw_ucontext *ucontext) | ||
1707 | { | ||
1708 | struct i40iw_pbl *iwpbl = &iwmr->iwpbl; | ||
1709 | unsigned long flags; | ||
1710 | |||
1711 | switch (iwmr->type) { | ||
1712 | case IW_MEMREG_TYPE_CQ: | ||
1713 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | ||
1714 | if (!list_empty(&ucontext->cq_reg_mem_list)) | ||
1715 | list_del(&iwpbl->list); | ||
1716 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | ||
1717 | break; | ||
1718 | case IW_MEMREG_TYPE_QP: | ||
1719 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); | ||
1720 | if (!list_empty(&ucontext->qp_reg_mem_list)) | ||
1721 | list_del(&iwpbl->list); | ||
1722 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | ||
1723 | break; | ||
1724 | default: | ||
1725 | break; | ||
1726 | } | ||
1727 | } | ||
1728 | |||
1729 | /** | ||
1730 | * i40iw_dereg_mr - deregister mr | ||
1731 | * @ib_mr: mr ptr for dereg | ||
1732 | */ | ||
1733 | static int i40iw_dereg_mr(struct ib_mr *ib_mr) | ||
1734 | { | ||
1735 | struct ib_pd *ibpd = ib_mr->pd; | ||
1736 | struct i40iw_pd *iwpd = to_iwpd(ibpd); | ||
1737 | struct i40iw_mr *iwmr = to_iwmr(ib_mr); | ||
1738 | struct i40iw_device *iwdev = to_iwdev(ib_mr->device); | ||
1739 | enum i40iw_status_code status; | ||
1740 | struct i40iw_dealloc_stag_info *info; | ||
1741 | struct i40iw_pbl *iwpbl = &iwmr->iwpbl; | ||
1742 | struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; | ||
1743 | struct i40iw_cqp_request *cqp_request; | ||
1744 | struct cqp_commands_info *cqp_info; | ||
1745 | u32 stag_idx; | ||
1746 | |||
1747 | if (iwmr->region) | ||
1748 | ib_umem_release(iwmr->region); | ||
1749 | |||
1750 | if (iwmr->type != IW_MEMREG_TYPE_MEM) { | ||
1751 | if (ibpd->uobject) { | ||
1752 | struct i40iw_ucontext *ucontext; | ||
1753 | |||
1754 | ucontext = to_ucontext(ibpd->uobject->context); | ||
1755 | i40iw_del_memlist(iwmr, ucontext); | ||
1756 | } | ||
1757 | if (iwpbl->pbl_allocated) | ||
1758 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | ||
1759 | kfree(iwpbl->iwmr); | ||
1760 | iwpbl->iwmr = NULL; | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); | ||
1765 | if (!cqp_request) | ||
1766 | return -ENOMEM; | ||
1767 | |||
1768 | cqp_info = &cqp_request->info; | ||
1769 | info = &cqp_info->in.u.dealloc_stag.info; | ||
1770 | memset(info, 0, sizeof(*info)); | ||
1771 | |||
1772 | info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff); | ||
1773 | info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT); | ||
1774 | stag_idx = info->stag_idx; | ||
1775 | info->mr = true; | ||
1776 | if (iwpbl->pbl_allocated) | ||
1777 | info->dealloc_pbl = true; | ||
1778 | |||
1779 | cqp_info->cqp_cmd = OP_DEALLOC_STAG; | ||
1780 | cqp_info->post_sq = 1; | ||
1781 | cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev; | ||
1782 | cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; | ||
1783 | status = i40iw_handle_cqp_op(iwdev, cqp_request); | ||
1784 | if (status) | ||
1785 | i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx); | ||
1786 | i40iw_rem_pdusecount(iwpd, iwdev); | ||
1787 | i40iw_free_stag(iwdev, iwmr->stag); | ||
1788 | if (iwpbl->pbl_allocated) | ||
1789 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | ||
1790 | kfree(iwmr); | ||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | /** | ||
1795 | * i40iw_show_rev | ||
1796 | */ | ||
1797 | static ssize_t i40iw_show_rev(struct device *dev, | ||
1798 | struct device_attribute *attr, char *buf) | ||
1799 | { | ||
1800 | struct i40iw_ib_device *iwibdev = container_of(dev, | ||
1801 | struct i40iw_ib_device, | ||
1802 | ibdev.dev); | ||
1803 | u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev; | ||
1804 | |||
1805 | return sprintf(buf, "%x\n", hw_rev); | ||
1806 | } | ||
1807 | |||
1808 | /** | ||
1809 | * i40iw_show_fw_ver | ||
1810 | */ | ||
1811 | static ssize_t i40iw_show_fw_ver(struct device *dev, | ||
1812 | struct device_attribute *attr, char *buf) | ||
1813 | { | ||
1814 | u32 firmware_version = I40IW_FW_VERSION; | ||
1815 | |||
1816 | return sprintf(buf, "%u.%u\n", firmware_version, | ||
1817 | (firmware_version & 0x000000ff)); | ||
1818 | } | ||
1819 | |||
1820 | /** | ||
1821 | * i40iw_show_hca | ||
1822 | */ | ||
1823 | static ssize_t i40iw_show_hca(struct device *dev, | ||
1824 | struct device_attribute *attr, char *buf) | ||
1825 | { | ||
1826 | return sprintf(buf, "I40IW\n"); | ||
1827 | } | ||
1828 | |||
1829 | /** | ||
1830 | * i40iw_show_board | ||
1831 | */ | ||
1832 | static ssize_t i40iw_show_board(struct device *dev, | ||
1833 | struct device_attribute *attr, | ||
1834 | char *buf) | ||
1835 | { | ||
1836 | return sprintf(buf, "%.*s\n", 32, "I40IW Board ID"); | ||
1837 | } | ||
1838 | |||
1839 | static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL); | ||
1840 | static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL); | ||
1841 | static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL); | ||
1842 | static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL); | ||
1843 | |||
1844 | static struct device_attribute *i40iw_dev_attributes[] = { | ||
1845 | &dev_attr_hw_rev, | ||
1846 | &dev_attr_fw_ver, | ||
1847 | &dev_attr_hca_type, | ||
1848 | &dev_attr_board_id | ||
1849 | }; | ||
1850 | |||
1851 | /** | ||
1852 | * i40iw_copy_sg_list - copy sg list for qp | ||
1853 | * @sg_list: copied into sg_list | ||
1854 | * @sgl: copy from sgl | ||
1855 | * @num_sges: count of sg entries | ||
1856 | */ | ||
1857 | static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges) | ||
1858 | { | ||
1859 | unsigned int i; | ||
1860 | |||
1861 | for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) { | ||
1862 | sg_list[i].tag_off = sgl[i].addr; | ||
1863 | sg_list[i].len = sgl[i].length; | ||
1864 | sg_list[i].stag = sgl[i].lkey; | ||
1865 | } | ||
1866 | } | ||
1867 | |||
1868 | /** | ||
1869 | * i40iw_post_send - kernel application wr | ||
1870 | * @ibqp: qp ptr for wr | ||
1871 | * @ib_wr: work request ptr | ||
1872 | * @bad_wr: return of bad wr if err | ||
1873 | */ | ||
1874 | static int i40iw_post_send(struct ib_qp *ibqp, | ||
1875 | struct ib_send_wr *ib_wr, | ||
1876 | struct ib_send_wr **bad_wr) | ||
1877 | { | ||
1878 | struct i40iw_qp *iwqp; | ||
1879 | struct i40iw_qp_uk *ukqp; | ||
1880 | struct i40iw_post_sq_info info; | ||
1881 | enum i40iw_status_code ret; | ||
1882 | int err = 0; | ||
1883 | unsigned long flags; | ||
1884 | |||
1885 | iwqp = (struct i40iw_qp *)ibqp; | ||
1886 | ukqp = &iwqp->sc_qp.qp_uk; | ||
1887 | |||
1888 | spin_lock_irqsave(&iwqp->lock, flags); | ||
1889 | while (ib_wr) { | ||
1890 | memset(&info, 0, sizeof(info)); | ||
1891 | info.wr_id = (u64)(ib_wr->wr_id); | ||
1892 | if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) | ||
1893 | info.signaled = true; | ||
1894 | if (ib_wr->send_flags & IB_SEND_FENCE) | ||
1895 | info.read_fence = true; | ||
1896 | |||
1897 | switch (ib_wr->opcode) { | ||
1898 | case IB_WR_SEND: | ||
1899 | if (ib_wr->send_flags & IB_SEND_SOLICITED) | ||
1900 | info.op_type = I40IW_OP_TYPE_SEND_SOL; | ||
1901 | else | ||
1902 | info.op_type = I40IW_OP_TYPE_SEND; | ||
1903 | |||
1904 | if (ib_wr->send_flags & IB_SEND_INLINE) { | ||
1905 | info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; | ||
1906 | info.op.inline_send.len = ib_wr->sg_list[0].length; | ||
1907 | ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false); | ||
1908 | } else { | ||
1909 | info.op.send.num_sges = ib_wr->num_sge; | ||
1910 | info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list; | ||
1911 | ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false); | ||
1912 | } | ||
1913 | |||
1914 | if (ret) | ||
1915 | err = -EIO; | ||
1916 | break; | ||
1917 | case IB_WR_RDMA_WRITE: | ||
1918 | info.op_type = I40IW_OP_TYPE_RDMA_WRITE; | ||
1919 | |||
1920 | if (ib_wr->send_flags & IB_SEND_INLINE) { | ||
1921 | info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; | ||
1922 | info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; | ||
1923 | info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; | ||
1924 | info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; | ||
1925 | info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length; | ||
1926 | ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false); | ||
1927 | } else { | ||
1928 | info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; | ||
1929 | info.op.rdma_write.num_lo_sges = ib_wr->num_sge; | ||
1930 | info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; | ||
1931 | info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; | ||
1932 | info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length; | ||
1933 | ret = ukqp->ops.iw_rdma_write(ukqp, &info, false); | ||
1934 | } | ||
1935 | |||
1936 | if (ret) | ||
1937 | err = -EIO; | ||
1938 | break; | ||
1939 | case IB_WR_RDMA_READ: | ||
1940 | info.op_type = I40IW_OP_TYPE_RDMA_READ; | ||
1941 | info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; | ||
1942 | info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; | ||
1943 | info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length; | ||
1944 | info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr; | ||
1945 | info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey; | ||
1946 | info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length; | ||
1947 | ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false); | ||
1948 | if (ret) | ||
1949 | err = -EIO; | ||
1950 | break; | ||
1951 | default: | ||
1952 | err = -EINVAL; | ||
1953 | i40iw_pr_err(" upost_send bad opcode = 0x%x\n", | ||
1954 | ib_wr->opcode); | ||
1955 | break; | ||
1956 | } | ||
1957 | |||
1958 | if (err) | ||
1959 | break; | ||
1960 | ib_wr = ib_wr->next; | ||
1961 | } | ||
1962 | |||
1963 | if (err) | ||
1964 | *bad_wr = ib_wr; | ||
1965 | else | ||
1966 | ukqp->ops.iw_qp_post_wr(ukqp); | ||
1967 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
1968 | |||
1969 | return err; | ||
1970 | } | ||
1971 | |||
1972 | /** | ||
1973 | * i40iw_post_recv - post receive wr for kernel application | ||
1974 | * @ibqp: ib qp pointer | ||
1975 | * @ib_wr: work request for receive | ||
1976 | * @bad_wr: bad wr caused an error | ||
1977 | */ | ||
1978 | static int i40iw_post_recv(struct ib_qp *ibqp, | ||
1979 | struct ib_recv_wr *ib_wr, | ||
1980 | struct ib_recv_wr **bad_wr) | ||
1981 | { | ||
1982 | struct i40iw_qp *iwqp; | ||
1983 | struct i40iw_qp_uk *ukqp; | ||
1984 | struct i40iw_post_rq_info post_recv; | ||
1985 | struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT]; | ||
1986 | enum i40iw_status_code ret = 0; | ||
1987 | unsigned long flags; | ||
1988 | |||
1989 | iwqp = (struct i40iw_qp *)ibqp; | ||
1990 | ukqp = &iwqp->sc_qp.qp_uk; | ||
1991 | |||
1992 | memset(&post_recv, 0, sizeof(post_recv)); | ||
1993 | spin_lock_irqsave(&iwqp->lock, flags); | ||
1994 | while (ib_wr) { | ||
1995 | post_recv.num_sges = ib_wr->num_sge; | ||
1996 | post_recv.wr_id = ib_wr->wr_id; | ||
1997 | i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); | ||
1998 | post_recv.sg_list = sg_list; | ||
1999 | ret = ukqp->ops.iw_post_receive(ukqp, &post_recv); | ||
2000 | if (ret) { | ||
2001 | i40iw_pr_err(" post_recv err %d\n", ret); | ||
2002 | *bad_wr = ib_wr; | ||
2003 | goto out; | ||
2004 | } | ||
2005 | ib_wr = ib_wr->next; | ||
2006 | } | ||
2007 | out: | ||
2008 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
2009 | if (ret) | ||
2010 | return -ENOSYS; | ||
2011 | return 0; | ||
2012 | } | ||
2013 | |||
2014 | /** | ||
2015 | * i40iw_poll_cq - poll cq for completion (kernel apps) | ||
2016 | * @ibcq: cq to poll | ||
2017 | * @num_entries: number of entries to poll | ||
2018 | * @entry: wr of entry completed | ||
2019 | */ | ||
2020 | static int i40iw_poll_cq(struct ib_cq *ibcq, | ||
2021 | int num_entries, | ||
2022 | struct ib_wc *entry) | ||
2023 | { | ||
2024 | struct i40iw_cq *iwcq; | ||
2025 | int cqe_count = 0; | ||
2026 | struct i40iw_cq_poll_info cq_poll_info; | ||
2027 | enum i40iw_status_code ret; | ||
2028 | struct i40iw_cq_uk *ukcq; | ||
2029 | struct i40iw_sc_qp *qp; | ||
2030 | unsigned long flags; | ||
2031 | |||
2032 | iwcq = (struct i40iw_cq *)ibcq; | ||
2033 | ukcq = &iwcq->sc_cq.cq_uk; | ||
2034 | |||
2035 | spin_lock_irqsave(&iwcq->lock, flags); | ||
2036 | while (cqe_count < num_entries) { | ||
2037 | ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true); | ||
2038 | if (ret == I40IW_ERR_QUEUE_EMPTY) { | ||
2039 | break; | ||
2040 | } else if (ret) { | ||
2041 | if (!cqe_count) | ||
2042 | cqe_count = -1; | ||
2043 | break; | ||
2044 | } | ||
2045 | entry->wc_flags = 0; | ||
2046 | entry->wr_id = cq_poll_info.wr_id; | ||
2047 | if (!cq_poll_info.error) | ||
2048 | entry->status = IB_WC_SUCCESS; | ||
2049 | else | ||
2050 | entry->status = IB_WC_WR_FLUSH_ERR; | ||
2051 | |||
2052 | switch (cq_poll_info.op_type) { | ||
2053 | case I40IW_OP_TYPE_RDMA_WRITE: | ||
2054 | entry->opcode = IB_WC_RDMA_WRITE; | ||
2055 | break; | ||
2056 | case I40IW_OP_TYPE_RDMA_READ_INV_STAG: | ||
2057 | case I40IW_OP_TYPE_RDMA_READ: | ||
2058 | entry->opcode = IB_WC_RDMA_READ; | ||
2059 | break; | ||
2060 | case I40IW_OP_TYPE_SEND_SOL: | ||
2061 | case I40IW_OP_TYPE_SEND_SOL_INV: | ||
2062 | case I40IW_OP_TYPE_SEND_INV: | ||
2063 | case I40IW_OP_TYPE_SEND: | ||
2064 | entry->opcode = IB_WC_SEND; | ||
2065 | break; | ||
2066 | case I40IW_OP_TYPE_REC: | ||
2067 | entry->opcode = IB_WC_RECV; | ||
2068 | break; | ||
2069 | default: | ||
2070 | entry->opcode = IB_WC_RECV; | ||
2071 | break; | ||
2072 | } | ||
2073 | |||
2074 | entry->vendor_err = | ||
2075 | cq_poll_info.major_err << 16 | cq_poll_info.minor_err; | ||
2076 | entry->ex.imm_data = 0; | ||
2077 | qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle; | ||
2078 | entry->qp = (struct ib_qp *)qp->back_qp; | ||
2079 | entry->src_qp = cq_poll_info.qp_id; | ||
2080 | entry->byte_len = cq_poll_info.bytes_xfered; | ||
2081 | entry++; | ||
2082 | cqe_count++; | ||
2083 | } | ||
2084 | spin_unlock_irqrestore(&iwcq->lock, flags); | ||
2085 | return cqe_count; | ||
2086 | } | ||
2087 | |||
2088 | /** | ||
2089 | * i40iw_req_notify_cq - arm cq kernel application | ||
2090 | * @ibcq: cq to arm | ||
2091 | * @notify_flags: notofication flags | ||
2092 | */ | ||
2093 | static int i40iw_req_notify_cq(struct ib_cq *ibcq, | ||
2094 | enum ib_cq_notify_flags notify_flags) | ||
2095 | { | ||
2096 | struct i40iw_cq *iwcq; | ||
2097 | struct i40iw_cq_uk *ukcq; | ||
2098 | enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; | ||
2099 | |||
2100 | iwcq = (struct i40iw_cq *)ibcq; | ||
2101 | ukcq = &iwcq->sc_cq.cq_uk; | ||
2102 | if (notify_flags == IB_CQ_NEXT_COMP) | ||
2103 | cq_notify = IW_CQ_COMPL_EVENT; | ||
2104 | ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); | ||
2105 | return 0; | ||
2106 | } | ||
2107 | |||
2108 | /** | ||
2109 | * i40iw_port_immutable - return port's immutable data | ||
2110 | * @ibdev: ib dev struct | ||
2111 | * @port_num: port number | ||
2112 | * @immutable: immutable data for the port return | ||
2113 | */ | ||
2114 | static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, | ||
2115 | struct ib_port_immutable *immutable) | ||
2116 | { | ||
2117 | struct ib_port_attr attr; | ||
2118 | int err; | ||
2119 | |||
2120 | err = i40iw_query_port(ibdev, port_num, &attr); | ||
2121 | |||
2122 | if (err) | ||
2123 | return err; | ||
2124 | |||
2125 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | ||
2126 | immutable->gid_tbl_len = attr.gid_tbl_len; | ||
2127 | immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; | ||
2128 | |||
2129 | return 0; | ||
2130 | } | ||
2131 | |||
2132 | /** | ||
2133 | * i40iw_get_protocol_stats - Populates the rdma_stats structure | ||
2134 | * @ibdev: ib dev struct | ||
2135 | * @stats: iw protocol stats struct | ||
2136 | */ | ||
2137 | static int i40iw_get_protocol_stats(struct ib_device *ibdev, | ||
2138 | union rdma_protocol_stats *stats) | ||
2139 | { | ||
2140 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
2141 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
2142 | struct i40iw_dev_pestat *devstat = &dev->dev_pestat; | ||
2143 | struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; | ||
2144 | struct timespec curr_time; | ||
2145 | static struct timespec last_rd_time = {0, 0}; | ||
2146 | enum i40iw_status_code status = 0; | ||
2147 | unsigned long flags; | ||
2148 | |||
2149 | curr_time = current_kernel_time(); | ||
2150 | memset(stats, 0, sizeof(*stats)); | ||
2151 | |||
2152 | if (dev->is_pf) { | ||
2153 | spin_lock_irqsave(&devstat->stats_lock, flags); | ||
2154 | devstat->ops.iw_hw_stat_read_all(devstat, | ||
2155 | &devstat->hw_stats); | ||
2156 | spin_unlock_irqrestore(&devstat->stats_lock, flags); | ||
2157 | } else { | ||
2158 | if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1) | ||
2159 | status = i40iw_vchnl_vf_get_pe_stats(dev, | ||
2160 | &devstat->hw_stats); | ||
2161 | |||
2162 | if (status) | ||
2163 | return -ENOSYS; | ||
2164 | } | ||
2165 | |||
2166 | stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] + | ||
2167 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS]; | ||
2168 | stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] + | ||
2169 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC]; | ||
2170 | stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] + | ||
2171 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD]; | ||
2172 | stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] + | ||
2173 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE]; | ||
2174 | stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] + | ||
2175 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS]; | ||
2176 | stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] + | ||
2177 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS]; | ||
2178 | stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] + | ||
2179 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS]; | ||
2180 | stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] + | ||
2181 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS]; | ||
2182 | stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG]; | ||
2183 | stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS]; | ||
2184 | stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG]; | ||
2185 | |||
2186 | last_rd_time = curr_time; | ||
2187 | return 0; | ||
2188 | } | ||
2189 | |||
2190 | /** | ||
2191 | * i40iw_query_gid - Query port GID | ||
2192 | * @ibdev: device pointer from stack | ||
2193 | * @port: port number | ||
2194 | * @index: Entry index | ||
2195 | * @gid: Global ID | ||
2196 | */ | ||
2197 | static int i40iw_query_gid(struct ib_device *ibdev, | ||
2198 | u8 port, | ||
2199 | int index, | ||
2200 | union ib_gid *gid) | ||
2201 | { | ||
2202 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
2203 | |||
2204 | memset(gid->raw, 0, sizeof(gid->raw)); | ||
2205 | ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); | ||
2206 | return 0; | ||
2207 | } | ||
2208 | |||
2209 | /** | ||
2210 | * i40iw_modify_port Modify port properties | ||
2211 | * @ibdev: device pointer from stack | ||
2212 | * @port: port number | ||
2213 | * @port_modify_mask: mask for port modifications | ||
2214 | * @props: port properties | ||
2215 | */ | ||
2216 | static int i40iw_modify_port(struct ib_device *ibdev, | ||
2217 | u8 port, | ||
2218 | int port_modify_mask, | ||
2219 | struct ib_port_modify *props) | ||
2220 | { | ||
2221 | return 0; | ||
2222 | } | ||
2223 | |||
2224 | /** | ||
2225 | * i40iw_query_pkey - Query partition key | ||
2226 | * @ibdev: device pointer from stack | ||
2227 | * @port: port number | ||
2228 | * @index: index of pkey | ||
2229 | * @pkey: pointer to store the pkey | ||
2230 | */ | ||
2231 | static int i40iw_query_pkey(struct ib_device *ibdev, | ||
2232 | u8 port, | ||
2233 | u16 index, | ||
2234 | u16 *pkey) | ||
2235 | { | ||
2236 | *pkey = 0; | ||
2237 | return 0; | ||
2238 | } | ||
2239 | |||
2240 | /** | ||
2241 | * i40iw_create_ah - create address handle | ||
2242 | * @ibpd: ptr of pd | ||
2243 | * @ah_attr: address handle attributes | ||
2244 | */ | ||
2245 | static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd, | ||
2246 | struct ib_ah_attr *attr) | ||
2247 | { | ||
2248 | return ERR_PTR(-ENOSYS); | ||
2249 | } | ||
2250 | |||
2251 | /** | ||
2252 | * i40iw_destroy_ah - Destroy address handle | ||
2253 | * @ah: pointer to address handle | ||
2254 | */ | ||
2255 | static int i40iw_destroy_ah(struct ib_ah *ah) | ||
2256 | { | ||
2257 | return -ENOSYS; | ||
2258 | } | ||
2259 | |||
2260 | /** | ||
2261 | * i40iw_init_rdma_device - initialization of iwarp device | ||
2262 | * @iwdev: iwarp device | ||
2263 | */ | ||
2264 | static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev) | ||
2265 | { | ||
2266 | struct i40iw_ib_device *iwibdev; | ||
2267 | struct net_device *netdev = iwdev->netdev; | ||
2268 | struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context; | ||
2269 | |||
2270 | iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev)); | ||
2271 | if (!iwibdev) { | ||
2272 | i40iw_pr_err("iwdev == NULL\n"); | ||
2273 | return NULL; | ||
2274 | } | ||
2275 | strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX); | ||
2276 | iwibdev->ibdev.owner = THIS_MODULE; | ||
2277 | iwdev->iwibdev = iwibdev; | ||
2278 | iwibdev->iwdev = iwdev; | ||
2279 | |||
2280 | iwibdev->ibdev.node_type = RDMA_NODE_RNIC; | ||
2281 | ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr); | ||
2282 | |||
2283 | iwibdev->ibdev.uverbs_cmd_mask = | ||
2284 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
2285 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
2286 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
2287 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
2288 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
2289 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
2290 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
2291 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
2292 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
2293 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
2294 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | ||
2295 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
2296 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
2297 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
2298 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | ||
2299 | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | | ||
2300 | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | | ||
2301 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
2302 | (1ull << IB_USER_VERBS_CMD_POST_RECV) | | ||
2303 | (1ull << IB_USER_VERBS_CMD_POST_SEND); | ||
2304 | iwibdev->ibdev.phys_port_cnt = 1; | ||
2305 | iwibdev->ibdev.num_comp_vectors = 1; | ||
2306 | iwibdev->ibdev.dma_device = &pcidev->dev; | ||
2307 | iwibdev->ibdev.dev.parent = &pcidev->dev; | ||
2308 | iwibdev->ibdev.query_port = i40iw_query_port; | ||
2309 | iwibdev->ibdev.modify_port = i40iw_modify_port; | ||
2310 | iwibdev->ibdev.query_pkey = i40iw_query_pkey; | ||
2311 | iwibdev->ibdev.query_gid = i40iw_query_gid; | ||
2312 | iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext; | ||
2313 | iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext; | ||
2314 | iwibdev->ibdev.mmap = i40iw_mmap; | ||
2315 | iwibdev->ibdev.alloc_pd = i40iw_alloc_pd; | ||
2316 | iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd; | ||
2317 | iwibdev->ibdev.create_qp = i40iw_create_qp; | ||
2318 | iwibdev->ibdev.modify_qp = i40iw_modify_qp; | ||
2319 | iwibdev->ibdev.query_qp = i40iw_query_qp; | ||
2320 | iwibdev->ibdev.destroy_qp = i40iw_destroy_qp; | ||
2321 | iwibdev->ibdev.create_cq = i40iw_create_cq; | ||
2322 | iwibdev->ibdev.destroy_cq = i40iw_destroy_cq; | ||
2323 | iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; | ||
2324 | iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; | ||
2325 | iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; | ||
2326 | iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats; | ||
2327 | iwibdev->ibdev.query_device = i40iw_query_device; | ||
2328 | iwibdev->ibdev.create_ah = i40iw_create_ah; | ||
2329 | iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; | ||
2330 | iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL); | ||
2331 | if (!iwibdev->ibdev.iwcm) { | ||
2332 | ib_dealloc_device(&iwibdev->ibdev); | ||
2333 | i40iw_pr_err("iwcm == NULL\n"); | ||
2334 | return NULL; | ||
2335 | } | ||
2336 | |||
2337 | iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref; | ||
2338 | iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref; | ||
2339 | iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp; | ||
2340 | iwibdev->ibdev.iwcm->connect = i40iw_connect; | ||
2341 | iwibdev->ibdev.iwcm->accept = i40iw_accept; | ||
2342 | iwibdev->ibdev.iwcm->reject = i40iw_reject; | ||
2343 | iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen; | ||
2344 | iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen; | ||
2345 | iwibdev->ibdev.get_port_immutable = i40iw_port_immutable; | ||
2346 | iwibdev->ibdev.poll_cq = i40iw_poll_cq; | ||
2347 | iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq; | ||
2348 | iwibdev->ibdev.post_send = i40iw_post_send; | ||
2349 | iwibdev->ibdev.post_recv = i40iw_post_recv; | ||
2350 | return iwibdev; | ||
2351 | } | ||
2352 | |||
2353 | /** | ||
2354 | * i40iw_port_ibevent - indicate port event | ||
2355 | * @iwdev: iwarp device | ||
2356 | */ | ||
2357 | void i40iw_port_ibevent(struct i40iw_device *iwdev) | ||
2358 | { | ||
2359 | struct i40iw_ib_device *iwibdev = iwdev->iwibdev; | ||
2360 | struct ib_event event; | ||
2361 | |||
2362 | event.device = &iwibdev->ibdev; | ||
2363 | event.element.port_num = 1; | ||
2364 | event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | ||
2365 | ib_dispatch_event(&event); | ||
2366 | } | ||
2367 | |||
2368 | /** | ||
2369 | * i40iw_unregister_rdma_device - unregister of iwarp from IB | ||
2370 | * @iwibdev: rdma device ptr | ||
2371 | */ | ||
2372 | static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev) | ||
2373 | { | ||
2374 | int i; | ||
2375 | |||
2376 | for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) | ||
2377 | device_remove_file(&iwibdev->ibdev.dev, | ||
2378 | i40iw_dev_attributes[i]); | ||
2379 | ib_unregister_device(&iwibdev->ibdev); | ||
2380 | } | ||
2381 | |||
2382 | /** | ||
2383 | * i40iw_destroy_rdma_device - destroy rdma device and free resources | ||
2384 | * @iwibdev: IB device ptr | ||
2385 | */ | ||
2386 | void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev) | ||
2387 | { | ||
2388 | if (!iwibdev) | ||
2389 | return; | ||
2390 | |||
2391 | i40iw_unregister_rdma_device(iwibdev); | ||
2392 | kfree(iwibdev->ibdev.iwcm); | ||
2393 | iwibdev->ibdev.iwcm = NULL; | ||
2394 | ib_dealloc_device(&iwibdev->ibdev); | ||
2395 | } | ||
2396 | |||
2397 | /** | ||
2398 | * i40iw_register_rdma_device - register iwarp device to IB | ||
2399 | * @iwdev: iwarp device | ||
2400 | */ | ||
2401 | int i40iw_register_rdma_device(struct i40iw_device *iwdev) | ||
2402 | { | ||
2403 | int i, ret; | ||
2404 | struct i40iw_ib_device *iwibdev; | ||
2405 | |||
2406 | iwdev->iwibdev = i40iw_init_rdma_device(iwdev); | ||
2407 | if (!iwdev->iwibdev) | ||
2408 | return -ENOSYS; | ||
2409 | iwibdev = iwdev->iwibdev; | ||
2410 | |||
2411 | ret = ib_register_device(&iwibdev->ibdev, NULL); | ||
2412 | if (ret) | ||
2413 | goto error; | ||
2414 | |||
2415 | for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) { | ||
2416 | ret = | ||
2417 | device_create_file(&iwibdev->ibdev.dev, | ||
2418 | i40iw_dev_attributes[i]); | ||
2419 | if (ret) { | ||
2420 | while (i > 0) { | ||
2421 | i--; | ||
2422 | device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]); | ||
2423 | } | ||
2424 | ib_unregister_device(&iwibdev->ibdev); | ||
2425 | goto error; | ||
2426 | } | ||
2427 | } | ||
2428 | return 0; | ||
2429 | error: | ||
2430 | kfree(iwdev->iwibdev->ibdev.iwcm); | ||
2431 | iwdev->iwibdev->ibdev.iwcm = NULL; | ||
2432 | ib_dealloc_device(&iwdev->iwibdev->ibdev); | ||
2433 | return -ENOSYS; | ||
2434 | } | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h new file mode 100644 index 000000000000..1101f77080e6 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h | |||
@@ -0,0 +1,173 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenFabrics.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | *******************************************************************************/ | ||
34 | |||
35 | #ifndef I40IW_VERBS_H | ||
36 | #define I40IW_VERBS_H | ||
37 | |||
38 | struct i40iw_ucontext { | ||
39 | struct ib_ucontext ibucontext; | ||
40 | struct i40iw_device *iwdev; | ||
41 | struct list_head cq_reg_mem_list; | ||
42 | spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */ | ||
43 | struct list_head qp_reg_mem_list; | ||
44 | spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */ | ||
45 | }; | ||
46 | |||
47 | struct i40iw_pd { | ||
48 | struct ib_pd ibpd; | ||
49 | struct i40iw_sc_pd sc_pd; | ||
50 | atomic_t usecount; | ||
51 | }; | ||
52 | |||
53 | struct i40iw_hmc_pble { | ||
54 | union { | ||
55 | u32 idx; | ||
56 | dma_addr_t addr; | ||
57 | }; | ||
58 | }; | ||
59 | |||
60 | struct i40iw_cq_mr { | ||
61 | struct i40iw_hmc_pble cq_pbl; | ||
62 | dma_addr_t shadow; | ||
63 | }; | ||
64 | |||
65 | struct i40iw_qp_mr { | ||
66 | struct i40iw_hmc_pble sq_pbl; | ||
67 | struct i40iw_hmc_pble rq_pbl; | ||
68 | dma_addr_t shadow; | ||
69 | struct page *sq_page; | ||
70 | }; | ||
71 | |||
72 | struct i40iw_pbl { | ||
73 | struct list_head list; | ||
74 | union { | ||
75 | struct i40iw_qp_mr qp_mr; | ||
76 | struct i40iw_cq_mr cq_mr; | ||
77 | }; | ||
78 | |||
79 | bool pbl_allocated; | ||
80 | u64 user_base; | ||
81 | struct i40iw_pble_alloc pble_alloc; | ||
82 | struct i40iw_mr *iwmr; | ||
83 | }; | ||
84 | |||
85 | #define MAX_SAVE_PAGE_ADDRS 4 | ||
86 | struct i40iw_mr { | ||
87 | union { | ||
88 | struct ib_mr ibmr; | ||
89 | struct ib_mw ibmw; | ||
90 | struct ib_fmr ibfmr; | ||
91 | }; | ||
92 | struct ib_umem *region; | ||
93 | u16 type; | ||
94 | u32 page_cnt; | ||
95 | u32 stag; | ||
96 | u64 length; | ||
97 | u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS]; | ||
98 | struct i40iw_pbl iwpbl; | ||
99 | }; | ||
100 | |||
101 | struct i40iw_cq { | ||
102 | struct ib_cq ibcq; | ||
103 | struct i40iw_sc_cq sc_cq; | ||
104 | u16 cq_head; | ||
105 | u16 cq_size; | ||
106 | u16 cq_number; | ||
107 | bool user_mode; | ||
108 | u32 polled_completions; | ||
109 | u32 cq_mem_size; | ||
110 | struct i40iw_dma_mem kmem; | ||
111 | spinlock_t lock; /* for poll cq */ | ||
112 | struct i40iw_pbl *iwpbl; | ||
113 | }; | ||
114 | |||
115 | struct disconn_work { | ||
116 | struct work_struct work; | ||
117 | struct i40iw_qp *iwqp; | ||
118 | }; | ||
119 | |||
120 | struct iw_cm_id; | ||
121 | struct ietf_mpa_frame; | ||
122 | struct i40iw_ud_file; | ||
123 | |||
124 | struct i40iw_qp_kmode { | ||
125 | struct i40iw_dma_mem dma_mem; | ||
126 | u64 *wrid_mem; | ||
127 | }; | ||
128 | |||
129 | struct i40iw_qp { | ||
130 | struct ib_qp ibqp; | ||
131 | struct i40iw_sc_qp sc_qp; | ||
132 | struct i40iw_device *iwdev; | ||
133 | struct i40iw_cq *iwscq; | ||
134 | struct i40iw_cq *iwrcq; | ||
135 | struct i40iw_pd *iwpd; | ||
136 | struct i40iw_qp_host_ctx_info ctx_info; | ||
137 | struct i40iwarp_offload_info iwarp_info; | ||
138 | void *allocated_buffer; | ||
139 | atomic_t refcount; | ||
140 | struct iw_cm_id *cm_id; | ||
141 | void *cm_node; | ||
142 | struct ib_mr *lsmm_mr; | ||
143 | struct work_struct work; | ||
144 | enum ib_qp_state ibqp_state; | ||
145 | u32 iwarp_state; | ||
146 | u32 qp_mem_size; | ||
147 | u32 last_aeq; | ||
148 | atomic_t close_timer_started; | ||
149 | spinlock_t lock; /* for post work requests */ | ||
150 | struct i40iw_qp_context *iwqp_context; | ||
151 | void *pbl_vbase; | ||
152 | dma_addr_t pbl_pbase; | ||
153 | struct page *page; | ||
154 | u8 active_conn:1; | ||
155 | u8 user_mode:1; | ||
156 | u8 hte_added:1; | ||
157 | u8 flush_issued:1; | ||
158 | u8 destroyed:1; | ||
159 | u8 sig_all:1; | ||
160 | u8 pau_mode:1; | ||
161 | u8 rsvd:1; | ||
162 | u16 term_sq_flush_code; | ||
163 | u16 term_rq_flush_code; | ||
164 | u8 hw_iwarp_state; | ||
165 | u8 hw_tcp_state; | ||
166 | struct i40iw_qp_kmode kqp; | ||
167 | struct i40iw_dma_mem host_ctx; | ||
168 | struct timer_list terminate_timer; | ||
169 | struct i40iw_pbl *iwpbl; | ||
170 | struct i40iw_dma_mem q2_ctx_mem; | ||
171 | struct i40iw_dma_mem ietf_mem; | ||
172 | }; | ||
173 | #endif | ||