aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h40
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c365
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h11
4 files changed, 425 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 13ba47b7b99f..bfc287c7d72e 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -100,7 +100,9 @@ static int qedr_register_device(struct qedr_dev *dev)
100 QEDR_UVERBS(CREATE_QP) | 100 QEDR_UVERBS(CREATE_QP) |
101 QEDR_UVERBS(MODIFY_QP) | 101 QEDR_UVERBS(MODIFY_QP) |
102 QEDR_UVERBS(QUERY_QP) | 102 QEDR_UVERBS(QUERY_QP) |
103 QEDR_UVERBS(DESTROY_QP); 103 QEDR_UVERBS(DESTROY_QP) |
104 QEDR_UVERBS(REG_MR) |
105 QEDR_UVERBS(DEREG_MR);
104 106
105 dev->ibdev.phys_port_cnt = 1; 107 dev->ibdev.phys_port_cnt = 1;
106 dev->ibdev.num_comp_vectors = dev->num_cnq; 108 dev->ibdev.num_comp_vectors = dev->num_cnq;
@@ -133,6 +135,12 @@ static int qedr_register_device(struct qedr_dev *dev)
133 135
134 dev->ibdev.query_pkey = qedr_query_pkey; 136 dev->ibdev.query_pkey = qedr_query_pkey;
135 137
138 dev->ibdev.get_dma_mr = qedr_get_dma_mr;
139 dev->ibdev.dereg_mr = qedr_dereg_mr;
140 dev->ibdev.reg_user_mr = qedr_reg_user_mr;
141 dev->ibdev.alloc_mr = qedr_alloc_mr;
142 dev->ibdev.map_mr_sg = qedr_map_mr_sg;
143
136 dev->ibdev.dma_device = &dev->pdev->dev; 144 dev->ibdev.dma_device = &dev->pdev->dev;
137 145
138 dev->ibdev.get_link_layer = qedr_link_layer; 146 dev->ibdev.get_link_layer = qedr_link_layer;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index e9fe941c48ad..655955697995 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -375,6 +375,41 @@ struct qedr_qp {
375 struct qedr_userq urq; 375 struct qedr_userq urq;
376}; 376};
377 377
378struct qedr_ah {
379 struct ib_ah ibah;
380 struct ib_ah_attr attr;
381};
382
383enum qedr_mr_type {
384 QEDR_MR_USER,
385 QEDR_MR_KERNEL,
386 QEDR_MR_DMA,
387 QEDR_MR_FRMR,
388};
389
390struct mr_info {
391 struct qedr_pbl *pbl_table;
392 struct qedr_pbl_info pbl_info;
393 struct list_head free_pbl_list;
394 struct list_head inuse_pbl_list;
395 u32 completed;
396 u32 completed_handled;
397};
398
399struct qedr_mr {
400 struct ib_mr ibmr;
401 struct ib_umem *umem;
402
403 struct qed_rdma_register_tid_in_params hw_mr;
404 enum qedr_mr_type type;
405
406 struct qedr_dev *dev;
407 struct mr_info info;
408
409 u64 *pages;
410 u32 npages;
411};
412
378static inline int qedr_get_dmac(struct qedr_dev *dev, 413static inline int qedr_get_dmac(struct qedr_dev *dev,
379 struct ib_ah_attr *ah_attr, u8 *mac_addr) 414 struct ib_ah_attr *ah_attr, u8 *mac_addr)
380{ 415{
@@ -418,4 +453,9 @@ static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
418{ 453{
419 return container_of(ibqp, struct qedr_qp, ibqp); 454 return container_of(ibqp, struct qedr_qp, ibqp);
420} 455}
456
457static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
458{
459 return container_of(ibmr, struct qedr_mr, ibmr);
460}
421#endif 461#endif
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a0d1c5fffb63..e70e808e2b9b 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2082,3 +2082,368 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
2082 2082
2083 return rc; 2083 return rc;
2084} 2084}
2085
2086static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2087{
2088 struct qedr_pbl *pbl, *tmp;
2089
2090 if (info->pbl_table)
2091 list_add_tail(&info->pbl_table->list_entry,
2092 &info->free_pbl_list);
2093
2094 if (!list_empty(&info->inuse_pbl_list))
2095 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2096
2097 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2098 list_del(&pbl->list_entry);
2099 qedr_free_pbl(dev, &info->pbl_info, pbl);
2100 }
2101}
2102
2103static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2104 size_t page_list_len, bool two_layered)
2105{
2106 struct qedr_pbl *tmp;
2107 int rc;
2108
2109 INIT_LIST_HEAD(&info->free_pbl_list);
2110 INIT_LIST_HEAD(&info->inuse_pbl_list);
2111
2112 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2113 page_list_len, two_layered);
2114 if (rc)
2115 goto done;
2116
2117 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2118 if (!info->pbl_table) {
2119 rc = -ENOMEM;
2120 goto done;
2121 }
2122
2123 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2124 &info->pbl_table->pa);
2125
2126 /* in usual case we use 2 PBLs, so we add one to free
2127 * list and allocating another one
2128 */
2129 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2130 if (!tmp) {
2131 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2132 goto done;
2133 }
2134
2135 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2136
2137 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2138
2139done:
2140 if (rc)
2141 free_mr_info(dev, info);
2142
2143 return rc;
2144}
2145
2146struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2147 u64 usr_addr, int acc, struct ib_udata *udata)
2148{
2149 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2150 struct qedr_mr *mr;
2151 struct qedr_pd *pd;
2152 int rc = -ENOMEM;
2153
2154 pd = get_qedr_pd(ibpd);
2155 DP_DEBUG(dev, QEDR_MSG_MR,
2156 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2157 pd->pd_id, start, len, usr_addr, acc);
2158
2159 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2160 return ERR_PTR(-EINVAL);
2161
2162 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2163 if (!mr)
2164 return ERR_PTR(rc);
2165
2166 mr->type = QEDR_MR_USER;
2167
2168 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2169 if (IS_ERR(mr->umem)) {
2170 rc = -EFAULT;
2171 goto err0;
2172 }
2173
2174 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2175 if (rc)
2176 goto err1;
2177
2178 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2179 &mr->info.pbl_info);
2180
2181 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2182 if (rc) {
2183 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2184 goto err1;
2185 }
2186
2187 /* Index only, 18 bit long, lkey = itid << 8 | key */
2188 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2189 mr->hw_mr.key = 0;
2190 mr->hw_mr.pd = pd->pd_id;
2191 mr->hw_mr.local_read = 1;
2192 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2193 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2194 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2195 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2196 mr->hw_mr.mw_bind = false;
2197 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2198 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2199 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2200 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2201 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2202 mr->hw_mr.length = len;
2203 mr->hw_mr.vaddr = usr_addr;
2204 mr->hw_mr.zbva = false;
2205 mr->hw_mr.phy_mr = false;
2206 mr->hw_mr.dma_mr = false;
2207
2208 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2209 if (rc) {
2210 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2211 goto err2;
2212 }
2213
2214 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2215 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2216 mr->hw_mr.remote_atomic)
2217 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2218
2219 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2220 mr->ibmr.lkey);
2221 return &mr->ibmr;
2222
2223err2:
2224 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2225err1:
2226 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2227err0:
2228 kfree(mr);
2229 return ERR_PTR(rc);
2230}
2231
2232int qedr_dereg_mr(struct ib_mr *ib_mr)
2233{
2234 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2235 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2236 int rc = 0;
2237
2238 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2239 if (rc)
2240 return rc;
2241
2242 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2243
2244 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2245 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2246
2247 /* it could be user registered memory. */
2248 if (mr->umem)
2249 ib_umem_release(mr->umem);
2250
2251 kfree(mr);
2252
2253 return rc;
2254}
2255
2256struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2257{
2258 struct qedr_pd *pd = get_qedr_pd(ibpd);
2259 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2260 struct qedr_mr *mr;
2261 int rc = -ENOMEM;
2262
2263 DP_DEBUG(dev, QEDR_MSG_MR,
2264 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2265 max_page_list_len);
2266
2267 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2268 if (!mr)
2269 return ERR_PTR(rc);
2270
2271 mr->dev = dev;
2272 mr->type = QEDR_MR_FRMR;
2273
2274 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2275 if (rc)
2276 goto err0;
2277
2278 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2279 if (rc) {
2280 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2281 goto err0;
2282 }
2283
2284 /* Index only, 18 bit long, lkey = itid << 8 | key */
2285 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2286 mr->hw_mr.key = 0;
2287 mr->hw_mr.pd = pd->pd_id;
2288 mr->hw_mr.local_read = 1;
2289 mr->hw_mr.local_write = 0;
2290 mr->hw_mr.remote_read = 0;
2291 mr->hw_mr.remote_write = 0;
2292 mr->hw_mr.remote_atomic = 0;
2293 mr->hw_mr.mw_bind = false;
2294 mr->hw_mr.pbl_ptr = 0;
2295 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2296 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2297 mr->hw_mr.fbo = 0;
2298 mr->hw_mr.length = 0;
2299 mr->hw_mr.vaddr = 0;
2300 mr->hw_mr.zbva = false;
2301 mr->hw_mr.phy_mr = true;
2302 mr->hw_mr.dma_mr = false;
2303
2304 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2305 if (rc) {
2306 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2307 goto err1;
2308 }
2309
2310 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2311 mr->ibmr.rkey = mr->ibmr.lkey;
2312
2313 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2314 return mr;
2315
2316err1:
2317 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2318err0:
2319 kfree(mr);
2320 return ERR_PTR(rc);
2321}
2322
2323struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2324 enum ib_mr_type mr_type, u32 max_num_sg)
2325{
2326 struct qedr_dev *dev;
2327 struct qedr_mr *mr;
2328
2329 if (mr_type != IB_MR_TYPE_MEM_REG)
2330 return ERR_PTR(-EINVAL);
2331
2332 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2333
2334 if (IS_ERR(mr))
2335 return ERR_PTR(-EINVAL);
2336
2337 dev = mr->dev;
2338
2339 return &mr->ibmr;
2340}
2341
2342static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2343{
2344 struct qedr_mr *mr = get_qedr_mr(ibmr);
2345 struct qedr_pbl *pbl_table;
2346 struct regpair *pbe;
2347 u32 pbes_in_page;
2348
2349 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2350 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2351 return -ENOMEM;
2352 }
2353
2354 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2355 mr->npages, addr);
2356
2357 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2358 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2359 pbe = (struct regpair *)pbl_table->va;
2360 pbe += mr->npages % pbes_in_page;
2361 pbe->lo = cpu_to_le32((u32)addr);
2362 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2363
2364 mr->npages++;
2365
2366 return 0;
2367}
2368
2369static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2370{
2371 int work = info->completed - info->completed_handled - 1;
2372
2373 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2374 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2375 struct qedr_pbl *pbl;
2376
2377 /* Free all the page list that are possible to be freed
2378 * (all the ones that were invalidated), under the assumption
2379 * that if an FMR was completed successfully that means that
2380 * if there was an invalidate operation before it also ended
2381 */
2382 pbl = list_first_entry(&info->inuse_pbl_list,
2383 struct qedr_pbl, list_entry);
2384 list_del(&pbl->list_entry);
2385 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2386 info->completed_handled++;
2387 }
2388}
2389
2390int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2391 int sg_nents, unsigned int *sg_offset)
2392{
2393 struct qedr_mr *mr = get_qedr_mr(ibmr);
2394
2395 mr->npages = 0;
2396
2397 handle_completed_mrs(mr->dev, &mr->info);
2398 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2399}
2400
2401struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2402{
2403 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2404 struct qedr_pd *pd = get_qedr_pd(ibpd);
2405 struct qedr_mr *mr;
2406 int rc;
2407
2408 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2409 if (!mr)
2410 return ERR_PTR(-ENOMEM);
2411
2412 mr->type = QEDR_MR_DMA;
2413
2414 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2415 if (rc) {
2416 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2417 goto err1;
2418 }
2419
2420 /* index only, 18 bit long, lkey = itid << 8 | key */
2421 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2422 mr->hw_mr.pd = pd->pd_id;
2423 mr->hw_mr.local_read = 1;
2424 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2425 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2426 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2427 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2428 mr->hw_mr.dma_mr = true;
2429
2430 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2431 if (rc) {
2432 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2433 goto err2;
2434 }
2435
2436 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2437 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2438 mr->hw_mr.remote_atomic)
2439 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2440
2441 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2442 return &mr->ibmr;
2443
2444err2:
2445 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2446err1:
2447 kfree(mr);
2448 return ERR_PTR(rc);
2449}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 056d6cb31fa2..4853f4af9983 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,4 +70,15 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
70 int qp_attr_mask, struct ib_qp_init_attr *); 70 int qp_attr_mask, struct ib_qp_init_attr *);
71int qedr_destroy_qp(struct ib_qp *ibqp); 71int qedr_destroy_qp(struct ib_qp *ibqp);
72 72
73int qedr_dereg_mr(struct ib_mr *);
74struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
75
76struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
77 u64 virt, int acc, struct ib_udata *);
78
79int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
80 int sg_nents, unsigned int *sg_offset);
81
82struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
83 u32 max_num_sg);
73#endif 84#endif