diff options
86 files changed, 4609 insertions, 1410 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0601b9daf840..c3239170d8b7 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -349,23 +349,6 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, | |||
| 349 | grh, &av->ah_attr); | 349 | grh, &av->ah_attr); |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac) | ||
| 353 | { | ||
| 354 | struct cm_id_private *cm_id_priv; | ||
| 355 | |||
| 356 | cm_id_priv = container_of(id, struct cm_id_private, id); | ||
| 357 | |||
| 358 | if (smac != NULL) | ||
| 359 | memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac)); | ||
| 360 | |||
| 361 | if (alt_smac != NULL) | ||
| 362 | memcpy(cm_id_priv->alt_av.smac, alt_smac, | ||
| 363 | sizeof(cm_id_priv->alt_av.smac)); | ||
| 364 | |||
| 365 | return 0; | ||
| 366 | } | ||
| 367 | EXPORT_SYMBOL(ib_update_cm_av); | ||
| 368 | |||
| 369 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | 352 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) |
| 370 | { | 353 | { |
| 371 | struct cm_device *cm_dev; | 354 | struct cm_device *cm_dev; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 199958d9ddc8..42c3058e6e9c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -1284,15 +1284,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1284 | struct rdma_id_private *listen_id, *conn_id; | 1284 | struct rdma_id_private *listen_id, *conn_id; |
| 1285 | struct rdma_cm_event event; | 1285 | struct rdma_cm_event event; |
| 1286 | int offset, ret; | 1286 | int offset, ret; |
| 1287 | u8 smac[ETH_ALEN]; | ||
| 1288 | u8 alt_smac[ETH_ALEN]; | ||
| 1289 | u8 *psmac = smac; | ||
| 1290 | u8 *palt_smac = alt_smac; | ||
| 1291 | int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) == | ||
| 1292 | RDMA_TRANSPORT_IB) && | ||
| 1293 | (rdma_port_get_link_layer(cm_id->device, | ||
| 1294 | ib_event->param.req_rcvd.port) == | ||
| 1295 | IB_LINK_LAYER_ETHERNET)); | ||
| 1296 | 1287 | ||
| 1297 | listen_id = cm_id->context; | 1288 | listen_id = cm_id->context; |
| 1298 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) | 1289 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) |
| @@ -1336,28 +1327,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1336 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1327 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
| 1337 | if (ret) | 1328 | if (ret) |
| 1338 | goto err3; | 1329 | goto err3; |
| 1339 | |||
| 1340 | if (is_iboe) { | ||
| 1341 | if (ib_event->param.req_rcvd.primary_path != NULL) | ||
| 1342 | rdma_addr_find_smac_by_sgid( | ||
| 1343 | &ib_event->param.req_rcvd.primary_path->sgid, | ||
| 1344 | psmac, NULL); | ||
| 1345 | else | ||
| 1346 | psmac = NULL; | ||
| 1347 | if (ib_event->param.req_rcvd.alternate_path != NULL) | ||
| 1348 | rdma_addr_find_smac_by_sgid( | ||
| 1349 | &ib_event->param.req_rcvd.alternate_path->sgid, | ||
| 1350 | palt_smac, NULL); | ||
| 1351 | else | ||
| 1352 | palt_smac = NULL; | ||
| 1353 | } | ||
| 1354 | /* | 1330 | /* |
| 1355 | * Acquire mutex to prevent user executing rdma_destroy_id() | 1331 | * Acquire mutex to prevent user executing rdma_destroy_id() |
| 1356 | * while we're accessing the cm_id. | 1332 | * while we're accessing the cm_id. |
| 1357 | */ | 1333 | */ |
| 1358 | mutex_lock(&lock); | 1334 | mutex_lock(&lock); |
| 1359 | if (is_iboe) | ||
| 1360 | ib_update_cm_av(cm_id, psmac, palt_smac); | ||
| 1361 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && | 1335 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && |
| 1362 | (conn_id->id.qp_type != IB_QPT_UD)) | 1336 | (conn_id->id.qp_type != IB_QPT_UD)) |
| 1363 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1337 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 4c837e66516b..ab31f136d04b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1022,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
| 1022 | mad_send_wr->send_buf.mad, | 1022 | mad_send_wr->send_buf.mad, |
| 1023 | sge[0].length, | 1023 | sge[0].length, |
| 1024 | DMA_TO_DEVICE); | 1024 | DMA_TO_DEVICE); |
| 1025 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) | ||
| 1026 | return -ENOMEM; | ||
| 1027 | |||
| 1025 | mad_send_wr->header_mapping = sge[0].addr; | 1028 | mad_send_wr->header_mapping = sge[0].addr; |
| 1026 | 1029 | ||
| 1027 | sge[1].addr = ib_dma_map_single(mad_agent->device, | 1030 | sge[1].addr = ib_dma_map_single(mad_agent->device, |
| 1028 | ib_get_payload(mad_send_wr), | 1031 | ib_get_payload(mad_send_wr), |
| 1029 | sge[1].length, | 1032 | sge[1].length, |
| 1030 | DMA_TO_DEVICE); | 1033 | DMA_TO_DEVICE); |
| 1034 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { | ||
| 1035 | ib_dma_unmap_single(mad_agent->device, | ||
| 1036 | mad_send_wr->header_mapping, | ||
| 1037 | sge[0].length, DMA_TO_DEVICE); | ||
| 1038 | return -ENOMEM; | ||
| 1039 | } | ||
| 1031 | mad_send_wr->payload_mapping = sge[1].addr; | 1040 | mad_send_wr->payload_mapping = sge[1].addr; |
| 1032 | 1041 | ||
| 1033 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 1042 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
| @@ -2590,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
| 2590 | sizeof *mad_priv - | 2599 | sizeof *mad_priv - |
| 2591 | sizeof mad_priv->header, | 2600 | sizeof mad_priv->header, |
| 2592 | DMA_FROM_DEVICE); | 2601 | DMA_FROM_DEVICE); |
| 2602 | if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, | ||
| 2603 | sg_list.addr))) { | ||
| 2604 | ret = -ENOMEM; | ||
| 2605 | break; | ||
| 2606 | } | ||
| 2593 | mad_priv->header.mapping = sg_list.addr; | 2607 | mad_priv->header.mapping = sg_list.addr; |
| 2594 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | 2608 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
| 2595 | mad_priv->header.mad_list.mad_queue = recv_queue; | 2609 | mad_priv->header.mad_list.mad_queue = recv_queue; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a84112322071..a3a2e9c1639b 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -42,29 +42,29 @@ | |||
| 42 | 42 | ||
| 43 | #include "uverbs.h" | 43 | #include "uverbs.h" |
| 44 | 44 | ||
| 45 | #define IB_UMEM_MAX_PAGE_CHUNK \ | ||
| 46 | ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ | ||
| 47 | ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ | ||
| 48 | (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) | ||
| 49 | 45 | ||
| 50 | static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) | 46 | static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) |
| 51 | { | 47 | { |
| 52 | struct ib_umem_chunk *chunk, *tmp; | 48 | struct scatterlist *sg; |
| 49 | struct page *page; | ||
| 53 | int i; | 50 | int i; |
| 54 | 51 | ||
| 55 | list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { | 52 | if (umem->nmap > 0) |
| 56 | ib_dma_unmap_sg(dev, chunk->page_list, | 53 | ib_dma_unmap_sg(dev, umem->sg_head.sgl, |
| 57 | chunk->nents, DMA_BIDIRECTIONAL); | 54 | umem->nmap, |
| 58 | for (i = 0; i < chunk->nents; ++i) { | 55 | DMA_BIDIRECTIONAL); |
| 59 | struct page *page = sg_page(&chunk->page_list[i]); | ||
| 60 | 56 | ||
| 61 | if (umem->writable && dirty) | 57 | for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { |
| 62 | set_page_dirty_lock(page); | ||
| 63 | put_page(page); | ||
| 64 | } | ||
| 65 | 58 | ||
| 66 | kfree(chunk); | 59 | page = sg_page(sg); |
| 60 | if (umem->writable && dirty) | ||
| 61 | set_page_dirty_lock(page); | ||
| 62 | put_page(page); | ||
| 67 | } | 63 | } |
| 64 | |||
| 65 | sg_free_table(&umem->sg_head); | ||
| 66 | return; | ||
| 67 | |||
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | /** | 70 | /** |
| @@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 81 | struct ib_umem *umem; | 81 | struct ib_umem *umem; |
| 82 | struct page **page_list; | 82 | struct page **page_list; |
| 83 | struct vm_area_struct **vma_list; | 83 | struct vm_area_struct **vma_list; |
| 84 | struct ib_umem_chunk *chunk; | ||
| 85 | unsigned long locked; | 84 | unsigned long locked; |
| 86 | unsigned long lock_limit; | 85 | unsigned long lock_limit; |
| 87 | unsigned long cur_base; | 86 | unsigned long cur_base; |
| 88 | unsigned long npages; | 87 | unsigned long npages; |
| 89 | int ret; | 88 | int ret; |
| 90 | int off; | ||
| 91 | int i; | 89 | int i; |
| 92 | DEFINE_DMA_ATTRS(attrs); | 90 | DEFINE_DMA_ATTRS(attrs); |
| 91 | struct scatterlist *sg, *sg_list_start; | ||
| 92 | int need_release = 0; | ||
| 93 | 93 | ||
| 94 | if (dmasync) | 94 | if (dmasync) |
| 95 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | 95 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); |
| @@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 97 | if (!can_do_mlock()) | 97 | if (!can_do_mlock()) |
| 98 | return ERR_PTR(-EPERM); | 98 | return ERR_PTR(-EPERM); |
| 99 | 99 | ||
| 100 | umem = kmalloc(sizeof *umem, GFP_KERNEL); | 100 | umem = kzalloc(sizeof *umem, GFP_KERNEL); |
| 101 | if (!umem) | 101 | if (!umem) |
| 102 | return ERR_PTR(-ENOMEM); | 102 | return ERR_PTR(-ENOMEM); |
| 103 | 103 | ||
| @@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 117 | /* We assume the memory is from hugetlb until proved otherwise */ | 117 | /* We assume the memory is from hugetlb until proved otherwise */ |
| 118 | umem->hugetlb = 1; | 118 | umem->hugetlb = 1; |
| 119 | 119 | ||
| 120 | INIT_LIST_HEAD(&umem->chunk_list); | ||
| 121 | |||
| 122 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | 120 | page_list = (struct page **) __get_free_page(GFP_KERNEL); |
| 123 | if (!page_list) { | 121 | if (!page_list) { |
| 124 | kfree(umem); | 122 | kfree(umem); |
| @@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 147 | 145 | ||
| 148 | cur_base = addr & PAGE_MASK; | 146 | cur_base = addr & PAGE_MASK; |
| 149 | 147 | ||
| 150 | ret = 0; | 148 | if (npages == 0) { |
| 149 | ret = -EINVAL; | ||
| 150 | goto out; | ||
| 151 | } | ||
| 152 | |||
| 153 | ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); | ||
| 154 | if (ret) | ||
| 155 | goto out; | ||
| 156 | |||
| 157 | need_release = 1; | ||
| 158 | sg_list_start = umem->sg_head.sgl; | ||
| 159 | |||
| 151 | while (npages) { | 160 | while (npages) { |
| 152 | ret = get_user_pages(current, current->mm, cur_base, | 161 | ret = get_user_pages(current, current->mm, cur_base, |
| 153 | min_t(unsigned long, npages, | 162 | min_t(unsigned long, npages, |
| @@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 157 | if (ret < 0) | 166 | if (ret < 0) |
| 158 | goto out; | 167 | goto out; |
| 159 | 168 | ||
| 169 | umem->npages += ret; | ||
| 160 | cur_base += ret * PAGE_SIZE; | 170 | cur_base += ret * PAGE_SIZE; |
| 161 | npages -= ret; | 171 | npages -= ret; |
| 162 | 172 | ||
| 163 | off = 0; | 173 | for_each_sg(sg_list_start, sg, ret, i) { |
| 164 | 174 | if (vma_list && !is_vm_hugetlb_page(vma_list[i])) | |
| 165 | while (ret) { | 175 | umem->hugetlb = 0; |
| 166 | chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) * | 176 | |
| 167 | min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), | 177 | sg_set_page(sg, page_list[i], PAGE_SIZE, 0); |
| 168 | GFP_KERNEL); | ||
| 169 | if (!chunk) { | ||
| 170 | ret = -ENOMEM; | ||
| 171 | goto out; | ||
| 172 | } | ||
| 173 | |||
| 174 | chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); | ||
| 175 | sg_init_table(chunk->page_list, chunk->nents); | ||
| 176 | for (i = 0; i < chunk->nents; ++i) { | ||
| 177 | if (vma_list && | ||
| 178 | !is_vm_hugetlb_page(vma_list[i + off])) | ||
| 179 | umem->hugetlb = 0; | ||
| 180 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); | ||
| 181 | } | ||
| 182 | |||
| 183 | chunk->nmap = ib_dma_map_sg_attrs(context->device, | ||
| 184 | &chunk->page_list[0], | ||
| 185 | chunk->nents, | ||
| 186 | DMA_BIDIRECTIONAL, | ||
| 187 | &attrs); | ||
| 188 | if (chunk->nmap <= 0) { | ||
| 189 | for (i = 0; i < chunk->nents; ++i) | ||
| 190 | put_page(sg_page(&chunk->page_list[i])); | ||
| 191 | kfree(chunk); | ||
| 192 | |||
| 193 | ret = -ENOMEM; | ||
| 194 | goto out; | ||
| 195 | } | ||
| 196 | |||
| 197 | ret -= chunk->nents; | ||
| 198 | off += chunk->nents; | ||
| 199 | list_add_tail(&chunk->list, &umem->chunk_list); | ||
| 200 | } | 178 | } |
| 201 | 179 | ||
| 202 | ret = 0; | 180 | /* preparing for next loop */ |
| 181 | sg_list_start = sg; | ||
| 203 | } | 182 | } |
| 204 | 183 | ||
| 184 | umem->nmap = ib_dma_map_sg_attrs(context->device, | ||
| 185 | umem->sg_head.sgl, | ||
| 186 | umem->npages, | ||
| 187 | DMA_BIDIRECTIONAL, | ||
| 188 | &attrs); | ||
| 189 | |||
| 190 | if (umem->nmap <= 0) { | ||
| 191 | ret = -ENOMEM; | ||
| 192 | goto out; | ||
| 193 | } | ||
| 194 | |||
| 195 | ret = 0; | ||
| 196 | |||
| 205 | out: | 197 | out: |
| 206 | if (ret < 0) { | 198 | if (ret < 0) { |
| 207 | __ib_umem_release(context->device, umem, 0); | 199 | if (need_release) |
| 200 | __ib_umem_release(context->device, umem, 0); | ||
| 208 | kfree(umem); | 201 | kfree(umem); |
| 209 | } else | 202 | } else |
| 210 | current->mm->pinned_vm = locked; | 203 | current->mm->pinned_vm = locked; |
| @@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release); | |||
| 278 | 271 | ||
| 279 | int ib_umem_page_count(struct ib_umem *umem) | 272 | int ib_umem_page_count(struct ib_umem *umem) |
| 280 | { | 273 | { |
| 281 | struct ib_umem_chunk *chunk; | ||
| 282 | int shift; | 274 | int shift; |
| 283 | int i; | 275 | int i; |
| 284 | int n; | 276 | int n; |
| 277 | struct scatterlist *sg; | ||
| 285 | 278 | ||
| 286 | shift = ilog2(umem->page_size); | 279 | shift = ilog2(umem->page_size); |
| 287 | 280 | ||
| 288 | n = 0; | 281 | n = 0; |
| 289 | list_for_each_entry(chunk, &umem->chunk_list, list) | 282 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) |
| 290 | for (i = 0; i < chunk->nmap; ++i) | 283 | n += sg_dma_len(sg) >> shift; |
| 291 | n += sg_dma_len(&chunk->page_list[i]) >> shift; | ||
| 292 | 284 | ||
| 293 | return n; | 285 | return n; |
| 294 | } | 286 | } |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3ac795115438..92525f855d82 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr) | |||
| 1169 | } | 1169 | } |
| 1170 | EXPORT_SYMBOL(ib_dereg_mr); | 1170 | EXPORT_SYMBOL(ib_dereg_mr); |
| 1171 | 1171 | ||
| 1172 | struct ib_mr *ib_create_mr(struct ib_pd *pd, | ||
| 1173 | struct ib_mr_init_attr *mr_init_attr) | ||
| 1174 | { | ||
| 1175 | struct ib_mr *mr; | ||
| 1176 | |||
| 1177 | if (!pd->device->create_mr) | ||
| 1178 | return ERR_PTR(-ENOSYS); | ||
| 1179 | |||
| 1180 | mr = pd->device->create_mr(pd, mr_init_attr); | ||
| 1181 | |||
| 1182 | if (!IS_ERR(mr)) { | ||
| 1183 | mr->device = pd->device; | ||
| 1184 | mr->pd = pd; | ||
| 1185 | mr->uobject = NULL; | ||
| 1186 | atomic_inc(&pd->usecnt); | ||
| 1187 | atomic_set(&mr->usecnt, 0); | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | return mr; | ||
| 1191 | } | ||
| 1192 | EXPORT_SYMBOL(ib_create_mr); | ||
| 1193 | |||
| 1194 | int ib_destroy_mr(struct ib_mr *mr) | ||
| 1195 | { | ||
| 1196 | struct ib_pd *pd; | ||
| 1197 | int ret; | ||
| 1198 | |||
| 1199 | if (atomic_read(&mr->usecnt)) | ||
| 1200 | return -EBUSY; | ||
| 1201 | |||
| 1202 | pd = mr->pd; | ||
| 1203 | ret = mr->device->destroy_mr(mr); | ||
| 1204 | if (!ret) | ||
| 1205 | atomic_dec(&pd->usecnt); | ||
| 1206 | |||
| 1207 | return ret; | ||
| 1208 | } | ||
| 1209 | EXPORT_SYMBOL(ib_destroy_mr); | ||
| 1210 | |||
| 1172 | struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) | 1211 | struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) |
| 1173 | { | 1212 | { |
| 1174 | struct ib_mr *mr; | 1213 | struct ib_mr *mr; |
| @@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id) | |||
| 1398 | return err; | 1437 | return err; |
| 1399 | } | 1438 | } |
| 1400 | EXPORT_SYMBOL(ib_destroy_flow); | 1439 | EXPORT_SYMBOL(ib_destroy_flow); |
| 1440 | |||
| 1441 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, | ||
| 1442 | struct ib_mr_status *mr_status) | ||
| 1443 | { | ||
| 1444 | return mr->device->check_mr_status ? | ||
| 1445 | mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; | ||
| 1446 | } | ||
| 1447 | EXPORT_SYMBOL(ib_check_mr_status); | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 07eb3a8067d8..8af33cf1fc4e 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
| @@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 431 | u64 *pages; | 431 | u64 *pages; |
| 432 | u64 kva = 0; | 432 | u64 kva = 0; |
| 433 | int shift, n, len; | 433 | int shift, n, len; |
| 434 | int i, j, k; | 434 | int i, k, entry; |
| 435 | int err = 0; | 435 | int err = 0; |
| 436 | struct ib_umem_chunk *chunk; | 436 | struct scatterlist *sg; |
| 437 | struct c2_pd *c2pd = to_c2pd(pd); | 437 | struct c2_pd *c2pd = to_c2pd(pd); |
| 438 | struct c2_mr *c2mr; | 438 | struct c2_mr *c2mr; |
| 439 | 439 | ||
| @@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | shift = ffs(c2mr->umem->page_size) - 1; | 454 | shift = ffs(c2mr->umem->page_size) - 1; |
| 455 | 455 | n = c2mr->umem->nmap; | |
| 456 | n = 0; | ||
| 457 | list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) | ||
| 458 | n += chunk->nents; | ||
| 459 | 456 | ||
| 460 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); | 457 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); |
| 461 | if (!pages) { | 458 | if (!pages) { |
| @@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 464 | } | 461 | } |
| 465 | 462 | ||
| 466 | i = 0; | 463 | i = 0; |
| 467 | list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) { | 464 | for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { |
| 468 | for (j = 0; j < chunk->nmap; ++j) { | 465 | len = sg_dma_len(sg) >> shift; |
| 469 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 466 | for (k = 0; k < len; ++k) { |
| 470 | for (k = 0; k < len; ++k) { | 467 | pages[i++] = |
| 471 | pages[i++] = | 468 | sg_dma_address(sg) + |
| 472 | sg_dma_address(&chunk->page_list[j]) + | 469 | (c2mr->umem->page_size * k); |
| 473 | (c2mr->umem->page_size * k); | ||
| 474 | } | ||
| 475 | } | 470 | } |
| 476 | } | 471 | } |
| 477 | 472 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index d2283837d451..811b24a539c0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 618 | { | 618 | { |
| 619 | __be64 *pages; | 619 | __be64 *pages; |
| 620 | int shift, n, len; | 620 | int shift, n, len; |
| 621 | int i, j, k; | 621 | int i, k, entry; |
| 622 | int err = 0; | 622 | int err = 0; |
| 623 | struct ib_umem_chunk *chunk; | ||
| 624 | struct iwch_dev *rhp; | 623 | struct iwch_dev *rhp; |
| 625 | struct iwch_pd *php; | 624 | struct iwch_pd *php; |
| 626 | struct iwch_mr *mhp; | 625 | struct iwch_mr *mhp; |
| 627 | struct iwch_reg_user_mr_resp uresp; | 626 | struct iwch_reg_user_mr_resp uresp; |
| 628 | 627 | struct scatterlist *sg; | |
| 629 | PDBG("%s ib_pd %p\n", __func__, pd); | 628 | PDBG("%s ib_pd %p\n", __func__, pd); |
| 630 | 629 | ||
| 631 | php = to_iwch_pd(pd); | 630 | php = to_iwch_pd(pd); |
| @@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 645 | 644 | ||
| 646 | shift = ffs(mhp->umem->page_size) - 1; | 645 | shift = ffs(mhp->umem->page_size) - 1; |
| 647 | 646 | ||
| 648 | n = 0; | 647 | n = mhp->umem->nmap; |
| 649 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) | ||
| 650 | n += chunk->nents; | ||
| 651 | 648 | ||
| 652 | err = iwch_alloc_pbl(mhp, n); | 649 | err = iwch_alloc_pbl(mhp, n); |
| 653 | if (err) | 650 | if (err) |
| @@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 661 | 658 | ||
| 662 | i = n = 0; | 659 | i = n = 0; |
| 663 | 660 | ||
| 664 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) | 661 | for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { |
| 665 | for (j = 0; j < chunk->nmap; ++j) { | 662 | len = sg_dma_len(sg) >> shift; |
| 666 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | ||
| 667 | for (k = 0; k < len; ++k) { | 663 | for (k = 0; k < len; ++k) { |
| 668 | pages[i++] = cpu_to_be64(sg_dma_address( | 664 | pages[i++] = cpu_to_be64(sg_dma_address(sg) + |
| 669 | &chunk->page_list[j]) + | ||
| 670 | mhp->umem->page_size * k); | 665 | mhp->umem->page_size * k); |
| 671 | if (i == PAGE_SIZE / sizeof *pages) { | 666 | if (i == PAGE_SIZE / sizeof *pages) { |
| 672 | err = iwch_write_pbl(mhp, pages, i, n); | 667 | err = iwch_write_pbl(mhp, pages, i, n); |
| @@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 676 | i = 0; | 671 | i = 0; |
| 677 | } | 672 | } |
| 678 | } | 673 | } |
| 679 | } | 674 | } |
| 680 | 675 | ||
| 681 | if (i) | 676 | if (i) |
| 682 | err = iwch_write_pbl(mhp, pages, i, n); | 677 | err = iwch_write_pbl(mhp, pages, i, n); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index d286bdebe2ab..26046c23334c 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -98,9 +98,9 @@ int c4iw_debug; | |||
| 98 | module_param(c4iw_debug, int, 0644); | 98 | module_param(c4iw_debug, int, 0644); |
| 99 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); | 99 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); |
| 100 | 100 | ||
| 101 | static int peer2peer; | 101 | static int peer2peer = 1; |
| 102 | module_param(peer2peer, int, 0644); | 102 | module_param(peer2peer, int, 0644); |
| 103 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); | 103 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); |
| 104 | 104 | ||
| 105 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; | 105 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; |
| 106 | module_param(p2p_type, int, 0644); | 106 | module_param(p2p_type, int, 0644); |
| @@ -400,7 +400,8 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, | |||
| 400 | n = dst_neigh_lookup(&rt->dst, &peer_ip); | 400 | n = dst_neigh_lookup(&rt->dst, &peer_ip); |
| 401 | if (!n) | 401 | if (!n) |
| 402 | return NULL; | 402 | return NULL; |
| 403 | if (!our_interface(dev, n->dev)) { | 403 | if (!our_interface(dev, n->dev) && |
| 404 | !(n->dev->flags & IFF_LOOPBACK)) { | ||
| 404 | dst_release(&rt->dst); | 405 | dst_release(&rt->dst); |
| 405 | return NULL; | 406 | return NULL; |
| 406 | } | 407 | } |
| @@ -759,8 +760,9 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, | |||
| 759 | ep->mpa_skb = skb; | 760 | ep->mpa_skb = skb; |
| 760 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 761 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
| 761 | start_ep_timer(ep); | 762 | start_ep_timer(ep); |
| 762 | state_set(&ep->com, MPA_REQ_SENT); | 763 | __state_set(&ep->com, MPA_REQ_SENT); |
| 763 | ep->mpa_attr.initiator = 1; | 764 | ep->mpa_attr.initiator = 1; |
| 765 | ep->snd_seq += mpalen; | ||
| 764 | return; | 766 | return; |
| 765 | } | 767 | } |
| 766 | 768 | ||
| @@ -840,6 +842,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
| 840 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | 842 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); |
| 841 | BUG_ON(ep->mpa_skb); | 843 | BUG_ON(ep->mpa_skb); |
| 842 | ep->mpa_skb = skb; | 844 | ep->mpa_skb = skb; |
| 845 | ep->snd_seq += mpalen; | ||
| 843 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 846 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
| 844 | } | 847 | } |
| 845 | 848 | ||
| @@ -923,7 +926,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
| 923 | skb_get(skb); | 926 | skb_get(skb); |
| 924 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | 927 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); |
| 925 | ep->mpa_skb = skb; | 928 | ep->mpa_skb = skb; |
| 926 | state_set(&ep->com, MPA_REP_SENT); | 929 | __state_set(&ep->com, MPA_REP_SENT); |
| 930 | ep->snd_seq += mpalen; | ||
| 927 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 931 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
| 928 | } | 932 | } |
| 929 | 933 | ||
| @@ -940,6 +944,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 940 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, | 944 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, |
| 941 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); | 945 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); |
| 942 | 946 | ||
| 947 | mutex_lock(&ep->com.mutex); | ||
| 943 | dst_confirm(ep->dst); | 948 | dst_confirm(ep->dst); |
| 944 | 949 | ||
| 945 | /* setup the hwtid for this connection */ | 950 | /* setup the hwtid for this connection */ |
| @@ -963,17 +968,18 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 963 | send_mpa_req(ep, skb, 1); | 968 | send_mpa_req(ep, skb, 1); |
| 964 | else | 969 | else |
| 965 | send_mpa_req(ep, skb, mpa_rev); | 970 | send_mpa_req(ep, skb, mpa_rev); |
| 966 | 971 | mutex_unlock(&ep->com.mutex); | |
| 967 | return 0; | 972 | return 0; |
| 968 | } | 973 | } |
| 969 | 974 | ||
| 970 | static void close_complete_upcall(struct c4iw_ep *ep) | 975 | static void close_complete_upcall(struct c4iw_ep *ep, int status) |
| 971 | { | 976 | { |
| 972 | struct iw_cm_event event; | 977 | struct iw_cm_event event; |
| 973 | 978 | ||
| 974 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 979 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 975 | memset(&event, 0, sizeof(event)); | 980 | memset(&event, 0, sizeof(event)); |
| 976 | event.event = IW_CM_EVENT_CLOSE; | 981 | event.event = IW_CM_EVENT_CLOSE; |
| 982 | event.status = status; | ||
| 977 | if (ep->com.cm_id) { | 983 | if (ep->com.cm_id) { |
| 978 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", | 984 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", |
| 979 | ep, ep->com.cm_id, ep->hwtid); | 985 | ep, ep->com.cm_id, ep->hwtid); |
| @@ -987,7 +993,6 @@ static void close_complete_upcall(struct c4iw_ep *ep) | |||
| 987 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | 993 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) |
| 988 | { | 994 | { |
| 989 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 995 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 990 | close_complete_upcall(ep); | ||
| 991 | state_set(&ep->com, ABORTING); | 996 | state_set(&ep->com, ABORTING); |
| 992 | set_bit(ABORT_CONN, &ep->com.history); | 997 | set_bit(ABORT_CONN, &ep->com.history); |
| 993 | return send_abort(ep, skb, gfp); | 998 | return send_abort(ep, skb, gfp); |
| @@ -1066,9 +1071,10 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
| 1066 | } | 1071 | } |
| 1067 | } | 1072 | } |
| 1068 | 1073 | ||
| 1069 | static void connect_request_upcall(struct c4iw_ep *ep) | 1074 | static int connect_request_upcall(struct c4iw_ep *ep) |
| 1070 | { | 1075 | { |
| 1071 | struct iw_cm_event event; | 1076 | struct iw_cm_event event; |
| 1077 | int ret; | ||
| 1072 | 1078 | ||
| 1073 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1079 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1074 | memset(&event, 0, sizeof(event)); | 1080 | memset(&event, 0, sizeof(event)); |
| @@ -1093,15 +1099,14 @@ static void connect_request_upcall(struct c4iw_ep *ep) | |||
| 1093 | event.private_data_len = ep->plen; | 1099 | event.private_data_len = ep->plen; |
| 1094 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 1100 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); |
| 1095 | } | 1101 | } |
| 1096 | if (state_read(&ep->parent_ep->com) != DEAD) { | 1102 | c4iw_get_ep(&ep->com); |
| 1097 | c4iw_get_ep(&ep->com); | 1103 | ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, |
| 1098 | ep->parent_ep->com.cm_id->event_handler( | 1104 | &event); |
| 1099 | ep->parent_ep->com.cm_id, | 1105 | if (ret) |
| 1100 | &event); | 1106 | c4iw_put_ep(&ep->com); |
| 1101 | } | ||
| 1102 | set_bit(CONNREQ_UPCALL, &ep->com.history); | 1107 | set_bit(CONNREQ_UPCALL, &ep->com.history); |
| 1103 | c4iw_put_ep(&ep->parent_ep->com); | 1108 | c4iw_put_ep(&ep->parent_ep->com); |
| 1104 | ep->parent_ep = NULL; | 1109 | return ret; |
| 1105 | } | 1110 | } |
| 1106 | 1111 | ||
| 1107 | static void established_upcall(struct c4iw_ep *ep) | 1112 | static void established_upcall(struct c4iw_ep *ep) |
| @@ -1165,7 +1170,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1165 | * the connection. | 1170 | * the connection. |
| 1166 | */ | 1171 | */ |
| 1167 | stop_ep_timer(ep); | 1172 | stop_ep_timer(ep); |
| 1168 | if (state_read(&ep->com) != MPA_REQ_SENT) | 1173 | if (ep->com.state != MPA_REQ_SENT) |
| 1169 | return; | 1174 | return; |
| 1170 | 1175 | ||
| 1171 | /* | 1176 | /* |
| @@ -1240,7 +1245,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1240 | * start reply message including private data. And | 1245 | * start reply message including private data. And |
| 1241 | * the MPA header is valid. | 1246 | * the MPA header is valid. |
| 1242 | */ | 1247 | */ |
| 1243 | state_set(&ep->com, FPDU_MODE); | 1248 | __state_set(&ep->com, FPDU_MODE); |
| 1244 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | 1249 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; |
| 1245 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | 1250 | ep->mpa_attr.recv_marker_enabled = markers_enabled; |
| 1246 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | 1251 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; |
| @@ -1355,7 +1360,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1355 | } | 1360 | } |
| 1356 | goto out; | 1361 | goto out; |
| 1357 | err: | 1362 | err: |
| 1358 | state_set(&ep->com, ABORTING); | 1363 | __state_set(&ep->com, ABORTING); |
| 1359 | send_abort(ep, skb, GFP_KERNEL); | 1364 | send_abort(ep, skb, GFP_KERNEL); |
| 1360 | out: | 1365 | out: |
| 1361 | connect_reply_upcall(ep, err); | 1366 | connect_reply_upcall(ep, err); |
| @@ -1370,7 +1375,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1370 | 1375 | ||
| 1371 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1376 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1372 | 1377 | ||
| 1373 | if (state_read(&ep->com) != MPA_REQ_WAIT) | 1378 | if (ep->com.state != MPA_REQ_WAIT) |
| 1374 | return; | 1379 | return; |
| 1375 | 1380 | ||
| 1376 | /* | 1381 | /* |
| @@ -1400,7 +1405,6 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1400 | return; | 1405 | return; |
| 1401 | 1406 | ||
| 1402 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | 1407 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); |
| 1403 | stop_ep_timer(ep); | ||
| 1404 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1408 | mpa = (struct mpa_message *) ep->mpa_pkt; |
| 1405 | 1409 | ||
| 1406 | /* | 1410 | /* |
| @@ -1492,10 +1496,18 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1492 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | 1496 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, |
| 1493 | ep->mpa_attr.p2p_type); | 1497 | ep->mpa_attr.p2p_type); |
| 1494 | 1498 | ||
| 1495 | state_set(&ep->com, MPA_REQ_RCVD); | 1499 | __state_set(&ep->com, MPA_REQ_RCVD); |
| 1500 | stop_ep_timer(ep); | ||
| 1496 | 1501 | ||
| 1497 | /* drive upcall */ | 1502 | /* drive upcall */ |
| 1498 | connect_request_upcall(ep); | 1503 | mutex_lock(&ep->parent_ep->com.mutex); |
| 1504 | if (ep->parent_ep->com.state != DEAD) { | ||
| 1505 | if (connect_request_upcall(ep)) | ||
| 1506 | abort_connection(ep, skb, GFP_KERNEL); | ||
| 1507 | } else { | ||
| 1508 | abort_connection(ep, skb, GFP_KERNEL); | ||
| 1509 | } | ||
| 1510 | mutex_unlock(&ep->parent_ep->com.mutex); | ||
| 1499 | return; | 1511 | return; |
| 1500 | } | 1512 | } |
| 1501 | 1513 | ||
| @@ -1509,14 +1521,17 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1509 | __u8 status = hdr->status; | 1521 | __u8 status = hdr->status; |
| 1510 | 1522 | ||
| 1511 | ep = lookup_tid(t, tid); | 1523 | ep = lookup_tid(t, tid); |
| 1524 | if (!ep) | ||
| 1525 | return 0; | ||
| 1512 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); | 1526 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); |
| 1513 | skb_pull(skb, sizeof(*hdr)); | 1527 | skb_pull(skb, sizeof(*hdr)); |
| 1514 | skb_trim(skb, dlen); | 1528 | skb_trim(skb, dlen); |
| 1529 | mutex_lock(&ep->com.mutex); | ||
| 1515 | 1530 | ||
| 1516 | /* update RX credits */ | 1531 | /* update RX credits */ |
| 1517 | update_rx_credits(ep, dlen); | 1532 | update_rx_credits(ep, dlen); |
| 1518 | 1533 | ||
| 1519 | switch (state_read(&ep->com)) { | 1534 | switch (ep->com.state) { |
| 1520 | case MPA_REQ_SENT: | 1535 | case MPA_REQ_SENT: |
| 1521 | ep->rcv_seq += dlen; | 1536 | ep->rcv_seq += dlen; |
| 1522 | process_mpa_reply(ep, skb); | 1537 | process_mpa_reply(ep, skb); |
| @@ -1532,7 +1547,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1532 | pr_err("%s Unexpected streaming data." \ | 1547 | pr_err("%s Unexpected streaming data." \ |
| 1533 | " qpid %u ep %p state %d tid %u status %d\n", | 1548 | " qpid %u ep %p state %d tid %u status %d\n", |
| 1534 | __func__, ep->com.qp->wq.sq.qid, ep, | 1549 | __func__, ep->com.qp->wq.sq.qid, ep, |
| 1535 | state_read(&ep->com), ep->hwtid, status); | 1550 | ep->com.state, ep->hwtid, status); |
| 1536 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1551 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1537 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1552 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1538 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1553 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); |
| @@ -1541,6 +1556,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1541 | default: | 1556 | default: |
| 1542 | break; | 1557 | break; |
| 1543 | } | 1558 | } |
| 1559 | mutex_unlock(&ep->com.mutex); | ||
| 1544 | return 0; | 1560 | return 0; |
| 1545 | } | 1561 | } |
| 1546 | 1562 | ||
| @@ -2246,7 +2262,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2246 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 2262 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 2247 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | 2263 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 2248 | } | 2264 | } |
| 2249 | close_complete_upcall(ep); | 2265 | close_complete_upcall(ep, 0); |
| 2250 | __state_set(&ep->com, DEAD); | 2266 | __state_set(&ep->com, DEAD); |
| 2251 | release = 1; | 2267 | release = 1; |
| 2252 | disconnect = 0; | 2268 | disconnect = 0; |
| @@ -2425,7 +2441,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2425 | C4IW_QP_ATTR_NEXT_STATE, | 2441 | C4IW_QP_ATTR_NEXT_STATE, |
| 2426 | &attrs, 1); | 2442 | &attrs, 1); |
| 2427 | } | 2443 | } |
| 2428 | close_complete_upcall(ep); | 2444 | close_complete_upcall(ep, 0); |
| 2429 | __state_set(&ep->com, DEAD); | 2445 | __state_set(&ep->com, DEAD); |
| 2430 | release = 1; | 2446 | release = 1; |
| 2431 | break; | 2447 | break; |
| @@ -2500,22 +2516,28 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2500 | 2516 | ||
| 2501 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | 2517 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) |
| 2502 | { | 2518 | { |
| 2503 | int err; | 2519 | int err = 0; |
| 2520 | int disconnect = 0; | ||
| 2504 | struct c4iw_ep *ep = to_ep(cm_id); | 2521 | struct c4iw_ep *ep = to_ep(cm_id); |
| 2505 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 2522 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 2506 | 2523 | ||
| 2507 | if (state_read(&ep->com) == DEAD) { | 2524 | mutex_lock(&ep->com.mutex); |
| 2525 | if (ep->com.state == DEAD) { | ||
| 2526 | mutex_unlock(&ep->com.mutex); | ||
| 2508 | c4iw_put_ep(&ep->com); | 2527 | c4iw_put_ep(&ep->com); |
| 2509 | return -ECONNRESET; | 2528 | return -ECONNRESET; |
| 2510 | } | 2529 | } |
| 2511 | set_bit(ULP_REJECT, &ep->com.history); | 2530 | set_bit(ULP_REJECT, &ep->com.history); |
| 2512 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | 2531 | BUG_ON(ep->com.state != MPA_REQ_RCVD); |
| 2513 | if (mpa_rev == 0) | 2532 | if (mpa_rev == 0) |
| 2514 | abort_connection(ep, NULL, GFP_KERNEL); | 2533 | abort_connection(ep, NULL, GFP_KERNEL); |
| 2515 | else { | 2534 | else { |
| 2516 | err = send_mpa_reject(ep, pdata, pdata_len); | 2535 | err = send_mpa_reject(ep, pdata, pdata_len); |
| 2517 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | 2536 | disconnect = 1; |
| 2518 | } | 2537 | } |
| 2538 | mutex_unlock(&ep->com.mutex); | ||
| 2539 | if (disconnect) | ||
| 2540 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
| 2519 | c4iw_put_ep(&ep->com); | 2541 | c4iw_put_ep(&ep->com); |
| 2520 | return 0; | 2542 | return 0; |
| 2521 | } | 2543 | } |
| @@ -2530,12 +2552,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 2530 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); | 2552 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); |
| 2531 | 2553 | ||
| 2532 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 2554 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 2533 | if (state_read(&ep->com) == DEAD) { | 2555 | |
| 2556 | mutex_lock(&ep->com.mutex); | ||
| 2557 | if (ep->com.state == DEAD) { | ||
| 2534 | err = -ECONNRESET; | 2558 | err = -ECONNRESET; |
| 2535 | goto err; | 2559 | goto err; |
| 2536 | } | 2560 | } |
| 2537 | 2561 | ||
| 2538 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | 2562 | BUG_ON(ep->com.state != MPA_REQ_RCVD); |
| 2539 | BUG_ON(!qp); | 2563 | BUG_ON(!qp); |
| 2540 | 2564 | ||
| 2541 | set_bit(ULP_ACCEPT, &ep->com.history); | 2565 | set_bit(ULP_ACCEPT, &ep->com.history); |
| @@ -2604,14 +2628,16 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 2604 | if (err) | 2628 | if (err) |
| 2605 | goto err1; | 2629 | goto err1; |
| 2606 | 2630 | ||
| 2607 | state_set(&ep->com, FPDU_MODE); | 2631 | __state_set(&ep->com, FPDU_MODE); |
| 2608 | established_upcall(ep); | 2632 | established_upcall(ep); |
| 2633 | mutex_unlock(&ep->com.mutex); | ||
| 2609 | c4iw_put_ep(&ep->com); | 2634 | c4iw_put_ep(&ep->com); |
| 2610 | return 0; | 2635 | return 0; |
| 2611 | err1: | 2636 | err1: |
| 2612 | ep->com.cm_id = NULL; | 2637 | ep->com.cm_id = NULL; |
| 2613 | cm_id->rem_ref(cm_id); | 2638 | cm_id->rem_ref(cm_id); |
| 2614 | err: | 2639 | err: |
| 2640 | mutex_unlock(&ep->com.mutex); | ||
| 2615 | c4iw_put_ep(&ep->com); | 2641 | c4iw_put_ep(&ep->com); |
| 2616 | return err; | 2642 | return err; |
| 2617 | } | 2643 | } |
| @@ -2980,7 +3006,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
| 2980 | rdev = &ep->com.dev->rdev; | 3006 | rdev = &ep->com.dev->rdev; |
| 2981 | if (c4iw_fatal_error(rdev)) { | 3007 | if (c4iw_fatal_error(rdev)) { |
| 2982 | fatal = 1; | 3008 | fatal = 1; |
| 2983 | close_complete_upcall(ep); | 3009 | close_complete_upcall(ep, -EIO); |
| 2984 | ep->com.state = DEAD; | 3010 | ep->com.state = DEAD; |
| 2985 | } | 3011 | } |
| 2986 | switch (ep->com.state) { | 3012 | switch (ep->com.state) { |
| @@ -3022,7 +3048,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
| 3022 | if (close) { | 3048 | if (close) { |
| 3023 | if (abrupt) { | 3049 | if (abrupt) { |
| 3024 | set_bit(EP_DISC_ABORT, &ep->com.history); | 3050 | set_bit(EP_DISC_ABORT, &ep->com.history); |
| 3025 | close_complete_upcall(ep); | 3051 | close_complete_upcall(ep, -ECONNRESET); |
| 3026 | ret = send_abort(ep, NULL, gfp); | 3052 | ret = send_abort(ep, NULL, gfp); |
| 3027 | } else { | 3053 | } else { |
| 3028 | set_bit(EP_DISC_CLOSE, &ep->com.history); | 3054 | set_bit(EP_DISC_CLOSE, &ep->com.history); |
| @@ -3203,6 +3229,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
| 3203 | struct sk_buff *req_skb; | 3229 | struct sk_buff *req_skb; |
| 3204 | struct fw_ofld_connection_wr *req; | 3230 | struct fw_ofld_connection_wr *req; |
| 3205 | struct cpl_pass_accept_req *cpl = cplhdr(skb); | 3231 | struct cpl_pass_accept_req *cpl = cplhdr(skb); |
| 3232 | int ret; | ||
| 3206 | 3233 | ||
| 3207 | req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); | 3234 | req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); |
| 3208 | req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); | 3235 | req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); |
| @@ -3239,7 +3266,13 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
| 3239 | req->cookie = (unsigned long)skb; | 3266 | req->cookie = (unsigned long)skb; |
| 3240 | 3267 | ||
| 3241 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | 3268 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); |
| 3242 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | 3269 | ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); |
| 3270 | if (ret < 0) { | ||
| 3271 | pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, | ||
| 3272 | ret); | ||
| 3273 | kfree_skb(skb); | ||
| 3274 | kfree_skb(req_skb); | ||
| 3275 | } | ||
| 3243 | } | 3276 | } |
| 3244 | 3277 | ||
| 3245 | /* | 3278 | /* |
| @@ -3346,13 +3379,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 3346 | pi = (struct port_info *)netdev_priv(pdev); | 3379 | pi = (struct port_info *)netdev_priv(pdev); |
| 3347 | tx_chan = cxgb4_port_chan(pdev); | 3380 | tx_chan = cxgb4_port_chan(pdev); |
| 3348 | } | 3381 | } |
| 3382 | neigh_release(neigh); | ||
| 3349 | if (!e) { | 3383 | if (!e) { |
| 3350 | pr_err("%s - failed to allocate l2t entry!\n", | 3384 | pr_err("%s - failed to allocate l2t entry!\n", |
| 3351 | __func__); | 3385 | __func__); |
| 3352 | goto free_dst; | 3386 | goto free_dst; |
| 3353 | } | 3387 | } |
| 3354 | 3388 | ||
| 3355 | neigh_release(neigh); | ||
| 3356 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3389 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
| 3357 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3390 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
| 3358 | window = (__force u16) htons((__force u16)tcph->window); | 3391 | window = (__force u16) htons((__force u16)tcph->window); |
| @@ -3427,6 +3460,7 @@ static void process_timeout(struct c4iw_ep *ep) | |||
| 3427 | &attrs, 1); | 3460 | &attrs, 1); |
| 3428 | } | 3461 | } |
| 3429 | __state_set(&ep->com, ABORTING); | 3462 | __state_set(&ep->com, ABORTING); |
| 3463 | close_complete_upcall(ep, -ETIMEDOUT); | ||
| 3430 | break; | 3464 | break; |
| 3431 | default: | 3465 | default: |
| 3432 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", | 3466 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 88de3aa9c5b0..ce468e542428 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
| @@ -365,8 +365,14 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) | |||
| 365 | 365 | ||
| 366 | if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { | 366 | if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { |
| 367 | 367 | ||
| 368 | /* | 368 | /* If we have reached here because of async |
| 369 | * drop peer2peer RTR reads. | 369 | * event or other error, and have egress error |
| 370 | * then drop | ||
| 371 | */ | ||
| 372 | if (CQE_TYPE(hw_cqe) == 1) | ||
| 373 | goto next_cqe; | ||
| 374 | |||
| 375 | /* drop peer2peer RTR reads. | ||
| 370 | */ | 376 | */ |
| 371 | if (CQE_WRID_STAG(hw_cqe) == 1) | 377 | if (CQE_WRID_STAG(hw_cqe) == 1) |
| 372 | goto next_cqe; | 378 | goto next_cqe; |
| @@ -511,8 +517,18 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | |||
| 511 | */ | 517 | */ |
| 512 | if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { | 518 | if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { |
| 513 | 519 | ||
| 514 | /* | 520 | /* If we have reached here because of async |
| 515 | * If this is an unsolicited read response, then the read | 521 | * event or other error, and have egress error |
| 522 | * then drop | ||
| 523 | */ | ||
| 524 | if (CQE_TYPE(hw_cqe) == 1) { | ||
| 525 | if (CQE_STATUS(hw_cqe)) | ||
| 526 | t4_set_wq_in_error(wq); | ||
| 527 | ret = -EAGAIN; | ||
| 528 | goto skip_cqe; | ||
| 529 | } | ||
| 530 | |||
| 531 | /* If this is an unsolicited read response, then the read | ||
| 516 | * was generated by the kernel driver as part of peer-2-peer | 532 | * was generated by the kernel driver as part of peer-2-peer |
| 517 | * connection setup. So ignore the completion. | 533 | * connection setup. So ignore the completion. |
| 518 | */ | 534 | */ |
| @@ -603,7 +619,7 @@ proc_cqe: | |||
| 603 | */ | 619 | */ |
| 604 | if (SQ_TYPE(hw_cqe)) { | 620 | if (SQ_TYPE(hw_cqe)) { |
| 605 | int idx = CQE_WRID_SQ_IDX(hw_cqe); | 621 | int idx = CQE_WRID_SQ_IDX(hw_cqe); |
| 606 | BUG_ON(idx > wq->sq.size); | 622 | BUG_ON(idx >= wq->sq.size); |
| 607 | 623 | ||
| 608 | /* | 624 | /* |
| 609 | * Account for any unsignaled completions completed by | 625 | * Account for any unsignaled completions completed by |
| @@ -617,7 +633,7 @@ proc_cqe: | |||
| 617 | wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; | 633 | wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; |
| 618 | else | 634 | else |
| 619 | wq->sq.in_use -= idx - wq->sq.cidx; | 635 | wq->sq.in_use -= idx - wq->sq.cidx; |
| 620 | BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size); | 636 | BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); |
| 621 | 637 | ||
| 622 | wq->sq.cidx = (uint16_t)idx; | 638 | wq->sq.cidx = (uint16_t)idx; |
| 623 | PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); | 639 | PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); |
| @@ -881,7 +897,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
| 881 | /* | 897 | /* |
| 882 | * Make actual HW queue 2x to avoid cdix_inc overflows. | 898 | * Make actual HW queue 2x to avoid cdix_inc overflows. |
| 883 | */ | 899 | */ |
| 884 | hwentries = entries * 2; | 900 | hwentries = min(entries * 2, T4_MAX_IQ_SIZE); |
| 885 | 901 | ||
| 886 | /* | 902 | /* |
| 887 | * Make HW queue at least 64 entries so GTS updates aren't too | 903 | * Make HW queue at least 64 entries so GTS updates aren't too |
| @@ -930,6 +946,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
| 930 | if (!mm2) | 946 | if (!mm2) |
| 931 | goto err4; | 947 | goto err4; |
| 932 | 948 | ||
| 949 | memset(&uresp, 0, sizeof(uresp)); | ||
| 933 | uresp.qid_mask = rhp->rdev.cqmask; | 950 | uresp.qid_mask = rhp->rdev.cqmask; |
| 934 | uresp.cqid = chp->cq.cqid; | 951 | uresp.cqid = chp->cq.cqid; |
| 935 | uresp.size = chp->cq.size; | 952 | uresp.size = chp->cq.size; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 4a033853312e..982f81586f90 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
| @@ -897,11 +897,13 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | |||
| 897 | } | 897 | } |
| 898 | 898 | ||
| 899 | opcode = *(u8 *)rsp; | 899 | opcode = *(u8 *)rsp; |
| 900 | if (c4iw_handlers[opcode]) | 900 | if (c4iw_handlers[opcode]) { |
| 901 | c4iw_handlers[opcode](dev, skb); | 901 | c4iw_handlers[opcode](dev, skb); |
| 902 | else | 902 | } else { |
| 903 | pr_info("%s no handler opcode 0x%x...\n", __func__, | 903 | pr_info("%s no handler opcode 0x%x...\n", __func__, |
| 904 | opcode); | 904 | opcode); |
| 905 | kfree_skb(skb); | ||
| 906 | } | ||
| 905 | 907 | ||
| 906 | return 0; | 908 | return 0; |
| 907 | nomem: | 909 | nomem: |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 23eaeabab93b..a1e8f1333b79 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -369,6 +369,7 @@ struct c4iw_fr_page_list { | |||
| 369 | DEFINE_DMA_UNMAP_ADDR(mapping); | 369 | DEFINE_DMA_UNMAP_ADDR(mapping); |
| 370 | dma_addr_t dma_addr; | 370 | dma_addr_t dma_addr; |
| 371 | struct c4iw_dev *dev; | 371 | struct c4iw_dev *dev; |
| 372 | int pll_len; | ||
| 372 | }; | 373 | }; |
| 373 | 374 | ||
| 374 | static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( | 375 | static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( |
| @@ -441,6 +442,7 @@ struct c4iw_qp { | |||
| 441 | atomic_t refcnt; | 442 | atomic_t refcnt; |
| 442 | wait_queue_head_t wait; | 443 | wait_queue_head_t wait; |
| 443 | struct timer_list timer; | 444 | struct timer_list timer; |
| 445 | int sq_sig_all; | ||
| 444 | }; | 446 | }; |
| 445 | 447 | ||
| 446 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) | 448 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 41b11951a30a..f9ca072a99ed 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -37,9 +37,9 @@ | |||
| 37 | 37 | ||
| 38 | #include "iw_cxgb4.h" | 38 | #include "iw_cxgb4.h" |
| 39 | 39 | ||
| 40 | int use_dsgl = 1; | 40 | int use_dsgl = 0; |
| 41 | module_param(use_dsgl, int, 0644); | 41 | module_param(use_dsgl, int, 0644); |
| 42 | MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)"); | 42 | MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)"); |
| 43 | 43 | ||
| 44 | #define T4_ULPTX_MIN_IO 32 | 44 | #define T4_ULPTX_MIN_IO 32 |
| 45 | #define C4IW_MAX_INLINE_SIZE 96 | 45 | #define C4IW_MAX_INLINE_SIZE 96 |
| @@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 678 | { | 678 | { |
| 679 | __be64 *pages; | 679 | __be64 *pages; |
| 680 | int shift, n, len; | 680 | int shift, n, len; |
| 681 | int i, j, k; | 681 | int i, k, entry; |
| 682 | int err = 0; | 682 | int err = 0; |
| 683 | struct ib_umem_chunk *chunk; | 683 | struct scatterlist *sg; |
| 684 | struct c4iw_dev *rhp; | 684 | struct c4iw_dev *rhp; |
| 685 | struct c4iw_pd *php; | 685 | struct c4iw_pd *php; |
| 686 | struct c4iw_mr *mhp; | 686 | struct c4iw_mr *mhp; |
| @@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 710 | 710 | ||
| 711 | shift = ffs(mhp->umem->page_size) - 1; | 711 | shift = ffs(mhp->umem->page_size) - 1; |
| 712 | 712 | ||
| 713 | n = 0; | 713 | n = mhp->umem->nmap; |
| 714 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) | ||
| 715 | n += chunk->nents; | ||
| 716 | |||
| 717 | err = alloc_pbl(mhp, n); | 714 | err = alloc_pbl(mhp, n); |
| 718 | if (err) | 715 | if (err) |
| 719 | goto err; | 716 | goto err; |
| @@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 726 | 723 | ||
| 727 | i = n = 0; | 724 | i = n = 0; |
| 728 | 725 | ||
| 729 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) | 726 | for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { |
| 730 | for (j = 0; j < chunk->nmap; ++j) { | 727 | len = sg_dma_len(sg) >> shift; |
| 731 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 728 | for (k = 0; k < len; ++k) { |
| 732 | for (k = 0; k < len; ++k) { | 729 | pages[i++] = cpu_to_be64(sg_dma_address(sg) + |
| 733 | pages[i++] = cpu_to_be64(sg_dma_address( | 730 | mhp->umem->page_size * k); |
| 734 | &chunk->page_list[j]) + | 731 | if (i == PAGE_SIZE / sizeof *pages) { |
| 735 | mhp->umem->page_size * k); | 732 | err = write_pbl(&mhp->rhp->rdev, |
| 736 | if (i == PAGE_SIZE / sizeof *pages) { | 733 | pages, |
| 737 | err = write_pbl(&mhp->rhp->rdev, | 734 | mhp->attr.pbl_addr + (n << 3), i); |
| 738 | pages, | 735 | if (err) |
| 739 | mhp->attr.pbl_addr + (n << 3), i); | 736 | goto pbl_done; |
| 740 | if (err) | 737 | n += i; |
| 741 | goto pbl_done; | 738 | i = 0; |
| 742 | n += i; | ||
| 743 | i = 0; | ||
| 744 | } | ||
| 745 | } | 739 | } |
| 746 | } | 740 | } |
| 741 | } | ||
| 747 | 742 | ||
| 748 | if (i) | 743 | if (i) |
| 749 | err = write_pbl(&mhp->rhp->rdev, pages, | 744 | err = write_pbl(&mhp->rhp->rdev, pages, |
| @@ -903,7 +898,11 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, | |||
| 903 | dma_unmap_addr_set(c4pl, mapping, dma_addr); | 898 | dma_unmap_addr_set(c4pl, mapping, dma_addr); |
| 904 | c4pl->dma_addr = dma_addr; | 899 | c4pl->dma_addr = dma_addr; |
| 905 | c4pl->dev = dev; | 900 | c4pl->dev = dev; |
| 906 | c4pl->ibpl.max_page_list_len = pll_len; | 901 | c4pl->pll_len = pll_len; |
| 902 | |||
| 903 | PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n", | ||
| 904 | __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list, | ||
| 905 | &c4pl->dma_addr); | ||
| 907 | 906 | ||
| 908 | return &c4pl->ibpl; | 907 | return &c4pl->ibpl; |
| 909 | } | 908 | } |
| @@ -912,8 +911,12 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) | |||
| 912 | { | 911 | { |
| 913 | struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); | 912 | struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); |
| 914 | 913 | ||
| 914 | PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n", | ||
| 915 | __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list, | ||
| 916 | &c4pl->dma_addr); | ||
| 917 | |||
| 915 | dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, | 918 | dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, |
| 916 | c4pl->ibpl.max_page_list_len, | 919 | c4pl->pll_len, |
| 917 | c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping)); | 920 | c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping)); |
| 918 | kfree(c4pl); | 921 | kfree(c4pl); |
| 919 | } | 922 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 582936708e6e..723ad290bd9d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -675,7 +675,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 675 | fw_flags = 0; | 675 | fw_flags = 0; |
| 676 | if (wr->send_flags & IB_SEND_SOLICITED) | 676 | if (wr->send_flags & IB_SEND_SOLICITED) |
| 677 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | 677 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; |
| 678 | if (wr->send_flags & IB_SEND_SIGNALED) | 678 | if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) |
| 679 | fw_flags |= FW_RI_COMPLETION_FLAG; | 679 | fw_flags |= FW_RI_COMPLETION_FLAG; |
| 680 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; | 680 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; |
| 681 | switch (wr->opcode) { | 681 | switch (wr->opcode) { |
| @@ -736,7 +736,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 736 | } | 736 | } |
| 737 | swsqe->idx = qhp->wq.sq.pidx; | 737 | swsqe->idx = qhp->wq.sq.pidx; |
| 738 | swsqe->complete = 0; | 738 | swsqe->complete = 0; |
| 739 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); | 739 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || |
| 740 | qhp->sq_sig_all; | ||
| 740 | swsqe->flushed = 0; | 741 | swsqe->flushed = 0; |
| 741 | swsqe->wr_id = wr->wr_id; | 742 | swsqe->wr_id = wr->wr_id; |
| 742 | 743 | ||
| @@ -1533,7 +1534,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1533 | struct c4iw_cq *schp; | 1534 | struct c4iw_cq *schp; |
| 1534 | struct c4iw_cq *rchp; | 1535 | struct c4iw_cq *rchp; |
| 1535 | struct c4iw_create_qp_resp uresp; | 1536 | struct c4iw_create_qp_resp uresp; |
| 1536 | int sqsize, rqsize; | 1537 | unsigned int sqsize, rqsize; |
| 1537 | struct c4iw_ucontext *ucontext; | 1538 | struct c4iw_ucontext *ucontext; |
| 1538 | int ret; | 1539 | int ret; |
| 1539 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; | 1540 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; |
| @@ -1605,6 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1605 | qhp->attr.enable_bind = 1; | 1606 | qhp->attr.enable_bind = 1; |
| 1606 | qhp->attr.max_ord = 1; | 1607 | qhp->attr.max_ord = 1; |
| 1607 | qhp->attr.max_ird = 1; | 1608 | qhp->attr.max_ird = 1; |
| 1609 | qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; | ||
| 1608 | spin_lock_init(&qhp->lock); | 1610 | spin_lock_init(&qhp->lock); |
| 1609 | mutex_init(&qhp->mutex); | 1611 | mutex_init(&qhp->mutex); |
| 1610 | init_waitqueue_head(&qhp->wait); | 1612 | init_waitqueue_head(&qhp->wait); |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index f08f6eaf3fa8..bd45e0f3923f 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
| @@ -322,7 +322,7 @@ struct ehca_mr_pginfo { | |||
| 322 | } phy; | 322 | } phy; |
| 323 | struct { /* type EHCA_MR_PGI_USER section */ | 323 | struct { /* type EHCA_MR_PGI_USER section */ |
| 324 | struct ib_umem *region; | 324 | struct ib_umem *region; |
| 325 | struct ib_umem_chunk *next_chunk; | 325 | struct scatterlist *next_sg; |
| 326 | u64 next_nmap; | 326 | u64 next_nmap; |
| 327 | } usr; | 327 | } usr; |
| 328 | struct { /* type EHCA_MR_PGI_FMR section */ | 328 | struct { /* type EHCA_MR_PGI_FMR section */ |
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 212150c25ea0..8cc837537768 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
| @@ -283,6 +283,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, | |||
| 283 | (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1)); | 283 | (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1)); |
| 284 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | 284 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { |
| 285 | ehca_err(device, "Copy to udata failed."); | 285 | ehca_err(device, "Copy to udata failed."); |
| 286 | cq = ERR_PTR(-EFAULT); | ||
| 286 | goto create_cq_exit4; | 287 | goto create_cq_exit4; |
| 287 | } | 288 | } |
| 288 | } | 289 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 65873eeca9b0..3488e8c9fcb4 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
| @@ -400,10 +400,7 @@ reg_user_mr_fallback: | |||
| 400 | pginfo.num_hwpages = num_hwpages; | 400 | pginfo.num_hwpages = num_hwpages; |
| 401 | pginfo.u.usr.region = e_mr->umem; | 401 | pginfo.u.usr.region = e_mr->umem; |
| 402 | pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; | 402 | pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; |
| 403 | pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, | 403 | pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl; |
| 404 | (&e_mr->umem->chunk_list), | ||
| 405 | list); | ||
| 406 | |||
| 407 | ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, | 404 | ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, |
| 408 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | 405 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, |
| 409 | &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); | 406 | &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); |
| @@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, | |||
| 1858 | u64 *kpage) | 1855 | u64 *kpage) |
| 1859 | { | 1856 | { |
| 1860 | int ret = 0; | 1857 | int ret = 0; |
| 1861 | struct ib_umem_chunk *prev_chunk; | ||
| 1862 | struct ib_umem_chunk *chunk; | ||
| 1863 | u64 pgaddr; | 1858 | u64 pgaddr; |
| 1864 | u32 i = 0; | ||
| 1865 | u32 j = 0; | 1859 | u32 j = 0; |
| 1866 | int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; | 1860 | int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; |
| 1867 | 1861 | struct scatterlist **sg = &pginfo->u.usr.next_sg; | |
| 1868 | /* loop over desired chunk entries */ | 1862 | |
| 1869 | chunk = pginfo->u.usr.next_chunk; | 1863 | while (*sg != NULL) { |
| 1870 | prev_chunk = pginfo->u.usr.next_chunk; | 1864 | pgaddr = page_to_pfn(sg_page(*sg)) |
| 1871 | list_for_each_entry_continue( | 1865 | << PAGE_SHIFT; |
| 1872 | chunk, (&(pginfo->u.usr.region->chunk_list)), list) { | 1866 | *kpage = pgaddr + (pginfo->next_hwpage * |
| 1873 | for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { | 1867 | pginfo->hwpage_size); |
| 1874 | pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) | 1868 | if (!(*kpage)) { |
| 1875 | << PAGE_SHIFT ; | 1869 | ehca_gen_err("pgaddr=%llx " |
| 1876 | *kpage = pgaddr + (pginfo->next_hwpage * | 1870 | "sg_dma_address=%llx " |
| 1877 | pginfo->hwpage_size); | 1871 | "entry=%llx next_hwpage=%llx", |
| 1878 | if ( !(*kpage) ) { | 1872 | pgaddr, (u64)sg_dma_address(*sg), |
| 1879 | ehca_gen_err("pgaddr=%llx " | 1873 | pginfo->u.usr.next_nmap, |
| 1880 | "chunk->page_list[i]=%llx " | 1874 | pginfo->next_hwpage); |
| 1881 | "i=%x next_hwpage=%llx", | 1875 | return -EFAULT; |
| 1882 | pgaddr, (u64)sg_dma_address( | ||
| 1883 | &chunk->page_list[i]), | ||
| 1884 | i, pginfo->next_hwpage); | ||
| 1885 | return -EFAULT; | ||
| 1886 | } | ||
| 1887 | (pginfo->hwpage_cnt)++; | ||
| 1888 | (pginfo->next_hwpage)++; | ||
| 1889 | kpage++; | ||
| 1890 | if (pginfo->next_hwpage % hwpages_per_kpage == 0) { | ||
| 1891 | (pginfo->kpage_cnt)++; | ||
| 1892 | (pginfo->u.usr.next_nmap)++; | ||
| 1893 | pginfo->next_hwpage = 0; | ||
| 1894 | i++; | ||
| 1895 | } | ||
| 1896 | j++; | ||
| 1897 | if (j >= number) break; | ||
| 1898 | } | 1876 | } |
| 1899 | if ((pginfo->u.usr.next_nmap >= chunk->nmap) && | 1877 | (pginfo->hwpage_cnt)++; |
| 1900 | (j >= number)) { | 1878 | (pginfo->next_hwpage)++; |
| 1901 | pginfo->u.usr.next_nmap = 0; | 1879 | kpage++; |
| 1902 | prev_chunk = chunk; | 1880 | if (pginfo->next_hwpage % hwpages_per_kpage == 0) { |
| 1903 | break; | 1881 | (pginfo->kpage_cnt)++; |
| 1904 | } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { | 1882 | (pginfo->u.usr.next_nmap)++; |
| 1905 | pginfo->u.usr.next_nmap = 0; | 1883 | pginfo->next_hwpage = 0; |
| 1906 | prev_chunk = chunk; | 1884 | *sg = sg_next(*sg); |
| 1907 | } else if (j >= number) | 1885 | } |
| 1886 | j++; | ||
| 1887 | if (j >= number) | ||
| 1908 | break; | 1888 | break; |
| 1909 | else | ||
| 1910 | prev_chunk = chunk; | ||
| 1911 | } | 1889 | } |
| 1912 | pginfo->u.usr.next_chunk = | 1890 | |
| 1913 | list_prepare_entry(prev_chunk, | ||
| 1914 | (&(pginfo->u.usr.region->chunk_list)), | ||
| 1915 | list); | ||
| 1916 | return ret; | 1891 | return ret; |
| 1917 | } | 1892 | } |
| 1918 | 1893 | ||
| @@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, | |||
| 1920 | * check given pages for contiguous layout | 1895 | * check given pages for contiguous layout |
| 1921 | * last page addr is returned in prev_pgaddr for further check | 1896 | * last page addr is returned in prev_pgaddr for further check |
| 1922 | */ | 1897 | */ |
| 1923 | static int ehca_check_kpages_per_ate(struct scatterlist *page_list, | 1898 | static int ehca_check_kpages_per_ate(struct scatterlist **sg, |
| 1924 | int start_idx, int end_idx, | 1899 | int num_pages, |
| 1925 | u64 *prev_pgaddr) | 1900 | u64 *prev_pgaddr) |
| 1926 | { | 1901 | { |
| 1927 | int t; | 1902 | for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) { |
| 1928 | for (t = start_idx; t <= end_idx; t++) { | 1903 | u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT; |
| 1929 | u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; | ||
| 1930 | if (ehca_debug_level >= 3) | 1904 | if (ehca_debug_level >= 3) |
| 1931 | ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, | 1905 | ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, |
| 1932 | *(u64 *)__va(pgaddr)); | 1906 | *(u64 *)__va(pgaddr)); |
| 1933 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { | 1907 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { |
| 1934 | ehca_gen_err("uncontiguous page found pgaddr=%llx " | 1908 | ehca_gen_err("uncontiguous page found pgaddr=%llx " |
| 1935 | "prev_pgaddr=%llx page_list_i=%x", | 1909 | "prev_pgaddr=%llx entries_left_in_hwpage=%x", |
| 1936 | pgaddr, *prev_pgaddr, t); | 1910 | pgaddr, *prev_pgaddr, num_pages); |
| 1937 | return -EINVAL; | 1911 | return -EINVAL; |
| 1938 | } | 1912 | } |
| 1939 | *prev_pgaddr = pgaddr; | 1913 | *prev_pgaddr = pgaddr; |
| @@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, | |||
| 1947 | u64 *kpage) | 1921 | u64 *kpage) |
| 1948 | { | 1922 | { |
| 1949 | int ret = 0; | 1923 | int ret = 0; |
| 1950 | struct ib_umem_chunk *prev_chunk; | ||
| 1951 | struct ib_umem_chunk *chunk; | ||
| 1952 | u64 pgaddr, prev_pgaddr; | 1924 | u64 pgaddr, prev_pgaddr; |
| 1953 | u32 i = 0; | ||
| 1954 | u32 j = 0; | 1925 | u32 j = 0; |
| 1955 | int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; | 1926 | int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; |
| 1956 | int nr_kpages = kpages_per_hwpage; | 1927 | int nr_kpages = kpages_per_hwpage; |
| 1928 | struct scatterlist **sg = &pginfo->u.usr.next_sg; | ||
| 1929 | |||
| 1930 | while (*sg != NULL) { | ||
| 1957 | 1931 | ||
| 1958 | /* loop over desired chunk entries */ | 1932 | if (nr_kpages == kpages_per_hwpage) { |
| 1959 | chunk = pginfo->u.usr.next_chunk; | 1933 | pgaddr = (page_to_pfn(sg_page(*sg)) |
| 1960 | prev_chunk = pginfo->u.usr.next_chunk; | 1934 | << PAGE_SHIFT); |
| 1961 | list_for_each_entry_continue( | 1935 | *kpage = pgaddr; |
| 1962 | chunk, (&(pginfo->u.usr.region->chunk_list)), list) { | 1936 | if (!(*kpage)) { |
| 1963 | for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { | 1937 | ehca_gen_err("pgaddr=%llx entry=%llx", |
| 1964 | if (nr_kpages == kpages_per_hwpage) { | 1938 | pgaddr, pginfo->u.usr.next_nmap); |
| 1965 | pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) | 1939 | ret = -EFAULT; |
| 1966 | << PAGE_SHIFT ); | 1940 | return ret; |
| 1967 | *kpage = pgaddr; | 1941 | } |
| 1968 | if ( !(*kpage) ) { | 1942 | /* |
| 1969 | ehca_gen_err("pgaddr=%llx i=%x", | 1943 | * The first page in a hwpage must be aligned; |
| 1970 | pgaddr, i); | 1944 | * the first MR page is exempt from this rule. |
| 1945 | */ | ||
| 1946 | if (pgaddr & (pginfo->hwpage_size - 1)) { | ||
| 1947 | if (pginfo->hwpage_cnt) { | ||
| 1948 | ehca_gen_err( | ||
| 1949 | "invalid alignment " | ||
| 1950 | "pgaddr=%llx entry=%llx " | ||
| 1951 | "mr_pgsize=%llx", | ||
| 1952 | pgaddr, pginfo->u.usr.next_nmap, | ||
| 1953 | pginfo->hwpage_size); | ||
| 1971 | ret = -EFAULT; | 1954 | ret = -EFAULT; |
| 1972 | return ret; | 1955 | return ret; |
| 1973 | } | 1956 | } |
| 1974 | /* | 1957 | /* first MR page */ |
| 1975 | * The first page in a hwpage must be aligned; | 1958 | pginfo->kpage_cnt = |
| 1976 | * the first MR page is exempt from this rule. | 1959 | (pgaddr & |
| 1977 | */ | 1960 | (pginfo->hwpage_size - 1)) >> |
| 1978 | if (pgaddr & (pginfo->hwpage_size - 1)) { | 1961 | PAGE_SHIFT; |
| 1979 | if (pginfo->hwpage_cnt) { | 1962 | nr_kpages -= pginfo->kpage_cnt; |
| 1980 | ehca_gen_err( | 1963 | *kpage = pgaddr & |
| 1981 | "invalid alignment " | 1964 | ~(pginfo->hwpage_size - 1); |
| 1982 | "pgaddr=%llx i=%x " | ||
| 1983 | "mr_pgsize=%llx", | ||
| 1984 | pgaddr, i, | ||
| 1985 | pginfo->hwpage_size); | ||
| 1986 | ret = -EFAULT; | ||
| 1987 | return ret; | ||
| 1988 | } | ||
| 1989 | /* first MR page */ | ||
| 1990 | pginfo->kpage_cnt = | ||
| 1991 | (pgaddr & | ||
| 1992 | (pginfo->hwpage_size - 1)) >> | ||
| 1993 | PAGE_SHIFT; | ||
| 1994 | nr_kpages -= pginfo->kpage_cnt; | ||
| 1995 | *kpage = pgaddr & | ||
| 1996 | ~(pginfo->hwpage_size - 1); | ||
| 1997 | } | ||
| 1998 | if (ehca_debug_level >= 3) { | ||
| 1999 | u64 val = *(u64 *)__va(pgaddr); | ||
| 2000 | ehca_gen_dbg("kpage=%llx chunk_page=%llx " | ||
| 2001 | "value=%016llx", | ||
| 2002 | *kpage, pgaddr, val); | ||
| 2003 | } | ||
| 2004 | prev_pgaddr = pgaddr; | ||
| 2005 | i++; | ||
| 2006 | pginfo->kpage_cnt++; | ||
| 2007 | pginfo->u.usr.next_nmap++; | ||
| 2008 | nr_kpages--; | ||
| 2009 | if (!nr_kpages) | ||
| 2010 | goto next_kpage; | ||
| 2011 | continue; | ||
| 2012 | } | 1965 | } |
| 2013 | if (i + nr_kpages > chunk->nmap) { | 1966 | if (ehca_debug_level >= 3) { |
| 2014 | ret = ehca_check_kpages_per_ate( | 1967 | u64 val = *(u64 *)__va(pgaddr); |
| 2015 | chunk->page_list, i, | 1968 | ehca_gen_dbg("kpage=%llx page=%llx " |
| 2016 | chunk->nmap - 1, &prev_pgaddr); | 1969 | "value=%016llx", |
| 2017 | if (ret) return ret; | 1970 | *kpage, pgaddr, val); |
| 2018 | pginfo->kpage_cnt += chunk->nmap - i; | ||
| 2019 | pginfo->u.usr.next_nmap += chunk->nmap - i; | ||
| 2020 | nr_kpages -= chunk->nmap - i; | ||
| 2021 | break; | ||
| 2022 | } | 1971 | } |
| 1972 | prev_pgaddr = pgaddr; | ||
| 1973 | *sg = sg_next(*sg); | ||
| 1974 | pginfo->kpage_cnt++; | ||
| 1975 | pginfo->u.usr.next_nmap++; | ||
| 1976 | nr_kpages--; | ||
| 1977 | if (!nr_kpages) | ||
| 1978 | goto next_kpage; | ||
| 1979 | continue; | ||
| 1980 | } | ||
| 1981 | |||
| 1982 | ret = ehca_check_kpages_per_ate(sg, nr_kpages, | ||
| 1983 | &prev_pgaddr); | ||
| 1984 | if (ret) | ||
| 1985 | return ret; | ||
| 1986 | pginfo->kpage_cnt += nr_kpages; | ||
| 1987 | pginfo->u.usr.next_nmap += nr_kpages; | ||
| 2023 | 1988 | ||
| 2024 | ret = ehca_check_kpages_per_ate(chunk->page_list, i, | ||
| 2025 | i + nr_kpages - 1, | ||
| 2026 | &prev_pgaddr); | ||
| 2027 | if (ret) return ret; | ||
| 2028 | i += nr_kpages; | ||
| 2029 | pginfo->kpage_cnt += nr_kpages; | ||
| 2030 | pginfo->u.usr.next_nmap += nr_kpages; | ||
| 2031 | next_kpage: | 1989 | next_kpage: |
| 2032 | nr_kpages = kpages_per_hwpage; | 1990 | nr_kpages = kpages_per_hwpage; |
| 2033 | (pginfo->hwpage_cnt)++; | 1991 | (pginfo->hwpage_cnt)++; |
| 2034 | kpage++; | 1992 | kpage++; |
| 2035 | j++; | 1993 | j++; |
| 2036 | if (j >= number) break; | 1994 | if (j >= number) |
| 2037 | } | ||
| 2038 | if ((pginfo->u.usr.next_nmap >= chunk->nmap) && | ||
| 2039 | (j >= number)) { | ||
| 2040 | pginfo->u.usr.next_nmap = 0; | ||
| 2041 | prev_chunk = chunk; | ||
| 2042 | break; | ||
| 2043 | } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { | ||
| 2044 | pginfo->u.usr.next_nmap = 0; | ||
| 2045 | prev_chunk = chunk; | ||
| 2046 | } else if (j >= number) | ||
| 2047 | break; | 1995 | break; |
| 2048 | else | ||
| 2049 | prev_chunk = chunk; | ||
| 2050 | } | 1996 | } |
| 2051 | pginfo->u.usr.next_chunk = | 1997 | |
| 2052 | list_prepare_entry(prev_chunk, | ||
| 2053 | (&(pginfo->u.usr.region->chunk_list)), | ||
| 2054 | list); | ||
| 2055 | return ret; | 1998 | return ret; |
| 2056 | } | 1999 | } |
| 2057 | 2000 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 714293b78518..e2f9a51f4a38 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
| @@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp, | |||
| 326 | size_t count, loff_t *off) | 326 | size_t count, loff_t *off) |
| 327 | { | 327 | { |
| 328 | u32 __iomem *piobuf; | 328 | u32 __iomem *piobuf; |
| 329 | u32 plen, clen, pbufn; | 329 | u32 plen, pbufn, maxlen_reserve; |
| 330 | struct ipath_diag_pkt odp; | 330 | struct ipath_diag_pkt odp; |
| 331 | struct ipath_diag_xpkt dp; | 331 | struct ipath_diag_xpkt dp; |
| 332 | u32 *tmpbuf = NULL; | 332 | u32 *tmpbuf = NULL; |
| @@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp, | |||
| 335 | u64 val; | 335 | u64 val; |
| 336 | u32 l_state, lt_state; /* LinkState, LinkTrainingState */ | 336 | u32 l_state, lt_state; /* LinkState, LinkTrainingState */ |
| 337 | 337 | ||
| 338 | if (count < sizeof(odp)) { | ||
| 339 | ret = -EINVAL; | ||
| 340 | goto bail; | ||
| 341 | } | ||
| 342 | 338 | ||
| 343 | if (count == sizeof(dp)) { | 339 | if (count == sizeof(dp)) { |
| 344 | if (copy_from_user(&dp, data, sizeof(dp))) { | 340 | if (copy_from_user(&dp, data, sizeof(dp))) { |
| 345 | ret = -EFAULT; | 341 | ret = -EFAULT; |
| 346 | goto bail; | 342 | goto bail; |
| 347 | } | 343 | } |
| 348 | } else if (copy_from_user(&odp, data, sizeof(odp))) { | 344 | } else if (count == sizeof(odp)) { |
| 349 | ret = -EFAULT; | 345 | if (copy_from_user(&odp, data, sizeof(odp))) { |
| 346 | ret = -EFAULT; | ||
| 347 | goto bail; | ||
| 348 | } | ||
| 349 | } else { | ||
| 350 | ret = -EINVAL; | ||
| 350 | goto bail; | 351 | goto bail; |
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | /* | ||
| 354 | * Due to padding/alignment issues (lessened with new struct) | ||
| 355 | * the old and new structs are the same length. We need to | ||
| 356 | * disambiguate them, which we can do because odp.len has never | ||
| 357 | * been less than the total of LRH+BTH+DETH so far, while | ||
| 358 | * dp.unit (same offset) unit is unlikely to get that high. | ||
| 359 | * Similarly, dp.data, the pointer to user at the same offset | ||
| 360 | * as odp.unit, is almost certainly at least one (512byte)page | ||
| 361 | * "above" NULL. The if-block below can be omitted if compatibility | ||
| 362 | * between a new driver and older diagnostic code is unimportant. | ||
| 363 | * compatibility the other direction (new diags, old driver) is | ||
| 364 | * handled in the diagnostic code, with a warning. | ||
| 365 | */ | ||
| 366 | if (dp.unit >= 20 && dp.data < 512) { | ||
| 367 | /* very probable version mismatch. Fix it up */ | ||
| 368 | memcpy(&odp, &dp, sizeof(odp)); | ||
| 369 | /* We got a legacy dp, copy elements to dp */ | ||
| 370 | dp.unit = odp.unit; | ||
| 371 | dp.data = odp.data; | ||
| 372 | dp.len = odp.len; | ||
| 373 | dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */ | ||
| 374 | } | ||
| 375 | |||
| 376 | /* send count must be an exact number of dwords */ | 354 | /* send count must be an exact number of dwords */ |
| 377 | if (dp.len & 3) { | 355 | if (dp.len & 3) { |
| 378 | ret = -EINVAL; | 356 | ret = -EINVAL; |
| 379 | goto bail; | 357 | goto bail; |
| 380 | } | 358 | } |
| 381 | 359 | ||
| 382 | clen = dp.len >> 2; | 360 | plen = dp.len >> 2; |
| 383 | 361 | ||
| 384 | dd = ipath_lookup(dp.unit); | 362 | dd = ipath_lookup(dp.unit); |
| 385 | if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || | 363 | if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || |
| @@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp, | |||
| 422 | goto bail; | 400 | goto bail; |
| 423 | } | 401 | } |
| 424 | 402 | ||
| 425 | /* need total length before first word written */ | 403 | /* |
| 426 | /* +1 word is for the qword padding */ | 404 | * need total length before first word written, plus 2 Dwords. One Dword |
| 427 | plen = sizeof(u32) + dp.len; | 405 | * is for padding so we get the full user data when not aligned on |
| 428 | 406 | * a word boundary. The other Dword is to make sure we have room for the | |
| 429 | if ((plen + 4) > dd->ipath_ibmaxlen) { | 407 | * ICRC which gets tacked on later. |
| 408 | */ | ||
| 409 | maxlen_reserve = 2 * sizeof(u32); | ||
| 410 | if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) { | ||
| 430 | ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", | 411 | ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", |
| 431 | plen - 4, dd->ipath_ibmaxlen); | 412 | dp.len, dd->ipath_ibmaxlen); |
| 432 | ret = -EINVAL; | 413 | ret = -EINVAL; |
| 433 | goto bail; /* before writing pbc */ | 414 | goto bail; |
| 434 | } | 415 | } |
| 416 | |||
| 417 | plen = sizeof(u32) + dp.len; | ||
| 418 | |||
| 435 | tmpbuf = vmalloc(plen); | 419 | tmpbuf = vmalloc(plen); |
| 436 | if (!tmpbuf) { | 420 | if (!tmpbuf) { |
| 437 | dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " | 421 | dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " |
| @@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp, | |||
| 473 | */ | 457 | */ |
| 474 | if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { | 458 | if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { |
| 475 | ipath_flush_wc(); | 459 | ipath_flush_wc(); |
| 476 | __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); | 460 | __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1); |
| 477 | ipath_flush_wc(); | 461 | ipath_flush_wc(); |
| 478 | __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); | 462 | __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); |
| 479 | } else | 463 | } else |
| 480 | __iowrite32_copy(piobuf + 2, tmpbuf, clen); | 464 | __iowrite32_copy(piobuf + 2, tmpbuf, plen); |
| 481 | 465 | ||
| 482 | ipath_flush_wc(); | 466 | ipath_flush_wc(); |
| 483 | 467 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index e346d3890a0e..5e61e9bff697 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
| @@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 188 | { | 188 | { |
| 189 | struct ipath_mr *mr; | 189 | struct ipath_mr *mr; |
| 190 | struct ib_umem *umem; | 190 | struct ib_umem *umem; |
| 191 | struct ib_umem_chunk *chunk; | 191 | int n, m, entry; |
| 192 | int n, m, i; | 192 | struct scatterlist *sg; |
| 193 | struct ib_mr *ret; | 193 | struct ib_mr *ret; |
| 194 | 194 | ||
| 195 | if (length == 0) { | 195 | if (length == 0) { |
| @@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 202 | if (IS_ERR(umem)) | 202 | if (IS_ERR(umem)) |
| 203 | return (void *) umem; | 203 | return (void *) umem; |
| 204 | 204 | ||
| 205 | n = 0; | 205 | n = umem->nmap; |
| 206 | list_for_each_entry(chunk, &umem->chunk_list, list) | ||
| 207 | n += chunk->nents; | ||
| 208 | |||
| 209 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | 206 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); |
| 210 | if (!mr) { | 207 | if (!mr) { |
| 211 | ret = ERR_PTR(-ENOMEM); | 208 | ret = ERR_PTR(-ENOMEM); |
| @@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 224 | 221 | ||
| 225 | m = 0; | 222 | m = 0; |
| 226 | n = 0; | 223 | n = 0; |
| 227 | list_for_each_entry(chunk, &umem->chunk_list, list) { | 224 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 228 | for (i = 0; i < chunk->nents; i++) { | 225 | void *vaddr; |
| 229 | void *vaddr; | 226 | |
| 230 | 227 | vaddr = page_address(sg_page(sg)); | |
| 231 | vaddr = page_address(sg_page(&chunk->page_list[i])); | 228 | if (!vaddr) { |
| 232 | if (!vaddr) { | 229 | ret = ERR_PTR(-EINVAL); |
| 233 | ret = ERR_PTR(-EINVAL); | 230 | goto bail; |
| 234 | goto bail; | 231 | } |
| 235 | } | 232 | mr->mr.map[m]->segs[n].vaddr = vaddr; |
| 236 | mr->mr.map[m]->segs[n].vaddr = vaddr; | 233 | mr->mr.map[m]->segs[n].length = umem->page_size; |
| 237 | mr->mr.map[m]->segs[n].length = umem->page_size; | 234 | n++; |
| 238 | n++; | 235 | if (n == IPATH_SEGSZ) { |
| 239 | if (n == IPATH_SEGSZ) { | 236 | m++; |
| 240 | m++; | 237 | n = 0; |
| 241 | n = 0; | ||
| 242 | } | ||
| 243 | } | 238 | } |
| 244 | } | 239 | } |
| 245 | ret = &mr->ibmr; | 240 | ret = &mr->ibmr; |
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 8aee4233b388..c51740986367 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
| @@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | |||
| 45 | struct mlx4_db *db) | 45 | struct mlx4_db *db) |
| 46 | { | 46 | { |
| 47 | struct mlx4_ib_user_db_page *page; | 47 | struct mlx4_ib_user_db_page *page; |
| 48 | struct ib_umem_chunk *chunk; | ||
| 49 | int err = 0; | 48 | int err = 0; |
| 50 | 49 | ||
| 51 | mutex_lock(&context->db_page_mutex); | 50 | mutex_lock(&context->db_page_mutex); |
| @@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | |||
| 73 | list_add(&page->list, &context->db_page_list); | 72 | list_add(&page->list, &context->db_page_list); |
| 74 | 73 | ||
| 75 | found: | 74 | found: |
| 76 | chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); | 75 | db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); |
| 77 | db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); | ||
| 78 | db->u.user_page = page; | 76 | db->u.user_page = page; |
| 79 | ++page->refcnt; | 77 | ++page->refcnt; |
| 80 | 78 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e81c5547e647..20b4d7a2d3d7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -1803,7 +1803,7 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) | |||
| 1803 | 1803 | ||
| 1804 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | 1804 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
| 1805 | { | 1805 | { |
| 1806 | char name[32]; | 1806 | char name[80]; |
| 1807 | int eq_per_port = 0; | 1807 | int eq_per_port = 0; |
| 1808 | int added_eqs = 0; | 1808 | int added_eqs = 0; |
| 1809 | int total_eqs = 0; | 1809 | int total_eqs = 0; |
| @@ -1833,8 +1833,8 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
| 1833 | eq = 0; | 1833 | eq = 0; |
| 1834 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { | 1834 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { |
| 1835 | for (j = 0; j < eq_per_port; j++) { | 1835 | for (j = 0; j < eq_per_port; j++) { |
| 1836 | sprintf(name, "mlx4-ib-%d-%d@%s", | 1836 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", |
| 1837 | i, j, dev->pdev->bus->name); | 1837 | i, j, dev->pdev->bus->name); |
| 1838 | /* Set IRQ for specific name (per ring) */ | 1838 | /* Set IRQ for specific name (per ring) */ |
| 1839 | if (mlx4_assign_eq(dev, name, NULL, | 1839 | if (mlx4_assign_eq(dev, name, NULL, |
| 1840 | &ibdev->eq_table[eq])) { | 1840 | &ibdev->eq_table[eq])) { |
| @@ -2056,8 +2056,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2056 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); | 2056 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); |
| 2057 | if (err) | 2057 | if (err) |
| 2058 | ibdev->counters[i] = -1; | 2058 | ibdev->counters[i] = -1; |
| 2059 | } else | 2059 | } else { |
| 2060 | ibdev->counters[i] = -1; | 2060 | ibdev->counters[i] = -1; |
| 2061 | } | ||
| 2061 | } | 2062 | } |
| 2062 | 2063 | ||
| 2063 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 2064 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index e471f089ff00..cb2a8727f3fb 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | |||
| 90 | struct ib_umem *umem) | 90 | struct ib_umem *umem) |
| 91 | { | 91 | { |
| 92 | u64 *pages; | 92 | u64 *pages; |
| 93 | struct ib_umem_chunk *chunk; | 93 | int i, k, entry; |
| 94 | int i, j, k; | ||
| 95 | int n; | 94 | int n; |
| 96 | int len; | 95 | int len; |
| 97 | int err = 0; | 96 | int err = 0; |
| 97 | struct scatterlist *sg; | ||
| 98 | 98 | ||
| 99 | pages = (u64 *) __get_free_page(GFP_KERNEL); | 99 | pages = (u64 *) __get_free_page(GFP_KERNEL); |
| 100 | if (!pages) | 100 | if (!pages) |
| @@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | |||
| 102 | 102 | ||
| 103 | i = n = 0; | 103 | i = n = 0; |
| 104 | 104 | ||
| 105 | list_for_each_entry(chunk, &umem->chunk_list, list) | 105 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 106 | for (j = 0; j < chunk->nmap; ++j) { | 106 | len = sg_dma_len(sg) >> mtt->page_shift; |
| 107 | len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; | 107 | for (k = 0; k < len; ++k) { |
| 108 | for (k = 0; k < len; ++k) { | 108 | pages[i++] = sg_dma_address(sg) + |
| 109 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + | 109 | umem->page_size * k; |
| 110 | umem->page_size * k; | 110 | /* |
| 111 | /* | 111 | * Be friendly to mlx4_write_mtt() and |
| 112 | * Be friendly to mlx4_write_mtt() and | 112 | * pass it chunks of appropriate size. |
| 113 | * pass it chunks of appropriate size. | 113 | */ |
| 114 | */ | 114 | if (i == PAGE_SIZE / sizeof (u64)) { |
| 115 | if (i == PAGE_SIZE / sizeof (u64)) { | 115 | err = mlx4_write_mtt(dev->dev, mtt, n, |
| 116 | err = mlx4_write_mtt(dev->dev, mtt, n, | 116 | i, pages); |
| 117 | i, pages); | 117 | if (err) |
| 118 | if (err) | 118 | goto out; |
| 119 | goto out; | 119 | n += i; |
| 120 | n += i; | 120 | i = 0; |
| 121 | i = 0; | ||
| 122 | } | ||
| 123 | } | 121 | } |
| 124 | } | 122 | } |
| 123 | } | ||
| 125 | 124 | ||
| 126 | if (i) | 125 | if (i) |
| 127 | err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); | 126 | err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index d8f4d1fe8494..74993250523e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -1882,7 +1882,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1882 | return err; | 1882 | return err; |
| 1883 | } | 1883 | } |
| 1884 | 1884 | ||
| 1885 | if (ah->av.eth.vlan != 0xffff) { | 1885 | if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { |
| 1886 | vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; | 1886 | vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; |
| 1887 | is_vlan = 1; | 1887 | is_vlan = 1; |
| 1888 | } | 1888 | } |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b1705ce6eb88..62bb6b49dc1d 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
| @@ -366,6 +366,38 @@ static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) | |||
| 366 | mlx5_buf_free(&dev->mdev, &buf->buf); | 366 | mlx5_buf_free(&dev->mdev, &buf->buf); |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, | ||
| 370 | struct ib_sig_err *item) | ||
| 371 | { | ||
| 372 | u16 syndrome = be16_to_cpu(cqe->syndrome); | ||
| 373 | |||
| 374 | #define GUARD_ERR (1 << 13) | ||
| 375 | #define APPTAG_ERR (1 << 12) | ||
| 376 | #define REFTAG_ERR (1 << 11) | ||
| 377 | |||
| 378 | if (syndrome & GUARD_ERR) { | ||
| 379 | item->err_type = IB_SIG_BAD_GUARD; | ||
| 380 | item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; | ||
| 381 | item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; | ||
| 382 | } else | ||
| 383 | if (syndrome & REFTAG_ERR) { | ||
| 384 | item->err_type = IB_SIG_BAD_REFTAG; | ||
| 385 | item->expected = be32_to_cpu(cqe->expected_reftag); | ||
| 386 | item->actual = be32_to_cpu(cqe->actual_reftag); | ||
| 387 | } else | ||
| 388 | if (syndrome & APPTAG_ERR) { | ||
| 389 | item->err_type = IB_SIG_BAD_APPTAG; | ||
| 390 | item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; | ||
| 391 | item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; | ||
| 392 | } else { | ||
| 393 | pr_err("Got signature completion error with bad syndrome %04x\n", | ||
| 394 | syndrome); | ||
| 395 | } | ||
| 396 | |||
| 397 | item->sig_err_offset = be64_to_cpu(cqe->err_offset); | ||
| 398 | item->key = be32_to_cpu(cqe->mkey); | ||
| 399 | } | ||
| 400 | |||
| 369 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, | 401 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, |
| 370 | struct mlx5_ib_qp **cur_qp, | 402 | struct mlx5_ib_qp **cur_qp, |
| 371 | struct ib_wc *wc) | 403 | struct ib_wc *wc) |
| @@ -375,6 +407,9 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, | |||
| 375 | struct mlx5_cqe64 *cqe64; | 407 | struct mlx5_cqe64 *cqe64; |
| 376 | struct mlx5_core_qp *mqp; | 408 | struct mlx5_core_qp *mqp; |
| 377 | struct mlx5_ib_wq *wq; | 409 | struct mlx5_ib_wq *wq; |
| 410 | struct mlx5_sig_err_cqe *sig_err_cqe; | ||
| 411 | struct mlx5_core_mr *mmr; | ||
| 412 | struct mlx5_ib_mr *mr; | ||
| 378 | uint8_t opcode; | 413 | uint8_t opcode; |
| 379 | uint32_t qpn; | 414 | uint32_t qpn; |
| 380 | u16 wqe_ctr; | 415 | u16 wqe_ctr; |
| @@ -475,6 +510,33 @@ repoll: | |||
| 475 | } | 510 | } |
| 476 | } | 511 | } |
| 477 | break; | 512 | break; |
| 513 | case MLX5_CQE_SIG_ERR: | ||
| 514 | sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; | ||
| 515 | |||
| 516 | read_lock(&dev->mdev.priv.mr_table.lock); | ||
| 517 | mmr = __mlx5_mr_lookup(&dev->mdev, | ||
| 518 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); | ||
| 519 | if (unlikely(!mmr)) { | ||
| 520 | read_unlock(&dev->mdev.priv.mr_table.lock); | ||
| 521 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", | ||
| 522 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | ||
| 523 | return -EINVAL; | ||
| 524 | } | ||
| 525 | |||
| 526 | mr = to_mibmr(mmr); | ||
| 527 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); | ||
| 528 | mr->sig->sig_err_exists = true; | ||
| 529 | mr->sig->sigerr_count++; | ||
| 530 | |||
| 531 | mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", | ||
| 532 | cq->mcq.cqn, mr->sig->err_item.key, | ||
| 533 | mr->sig->err_item.err_type, | ||
| 534 | mr->sig->err_item.sig_err_offset, | ||
| 535 | mr->sig->err_item.expected, | ||
| 536 | mr->sig->err_item.actual); | ||
| 537 | |||
| 538 | read_unlock(&dev->mdev.priv.mr_table.lock); | ||
| 539 | goto repoll; | ||
| 478 | } | 540 | } |
| 479 | 541 | ||
| 480 | return 0; | 542 | return 0; |
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 256a23344f28..ece028fc47d6 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c | |||
| @@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, | |||
| 47 | struct mlx5_db *db) | 47 | struct mlx5_db *db) |
| 48 | { | 48 | { |
| 49 | struct mlx5_ib_user_db_page *page; | 49 | struct mlx5_ib_user_db_page *page; |
| 50 | struct ib_umem_chunk *chunk; | ||
| 51 | int err = 0; | 50 | int err = 0; |
| 52 | 51 | ||
| 53 | mutex_lock(&context->db_page_mutex); | 52 | mutex_lock(&context->db_page_mutex); |
| @@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, | |||
| 75 | list_add(&page->list, &context->db_page_list); | 74 | list_add(&page->list, &context->db_page_list); |
| 76 | 75 | ||
| 77 | found: | 76 | found: |
| 78 | chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); | 77 | db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); |
| 79 | db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); | ||
| 80 | db->u.user_page = page; | 78 | db->u.user_page = page; |
| 81 | ++page->refcnt; | 79 | ++page->refcnt; |
| 82 | 80 | ||
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aa03e732b6a8..7b9c0782105e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -273,6 +273,15 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
| 273 | if (flags & MLX5_DEV_CAP_FLAG_XRC) | 273 | if (flags & MLX5_DEV_CAP_FLAG_XRC) |
| 274 | props->device_cap_flags |= IB_DEVICE_XRC; | 274 | props->device_cap_flags |= IB_DEVICE_XRC; |
| 275 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 275 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
| 276 | if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) { | ||
| 277 | props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; | ||
| 278 | /* At this stage no support for signature handover */ | ||
| 279 | props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | | ||
| 280 | IB_PROT_T10DIF_TYPE_2 | | ||
| 281 | IB_PROT_T10DIF_TYPE_3; | ||
| 282 | props->sig_guard_cap = IB_GUARD_T10DIF_CRC | | ||
| 283 | IB_GUARD_T10DIF_CSUM; | ||
| 284 | } | ||
| 276 | 285 | ||
| 277 | props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & | 286 | props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & |
| 278 | 0xffffff; | 287 | 0xffffff; |
| @@ -1423,12 +1432,15 @@ static int init_one(struct pci_dev *pdev, | |||
| 1423 | dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; | 1432 | dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; |
| 1424 | dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; | 1433 | dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; |
| 1425 | dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; | 1434 | dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; |
| 1435 | dev->ib_dev.destroy_mr = mlx5_ib_destroy_mr; | ||
| 1426 | dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; | 1436 | dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; |
| 1427 | dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; | 1437 | dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; |
| 1428 | dev->ib_dev.process_mad = mlx5_ib_process_mad; | 1438 | dev->ib_dev.process_mad = mlx5_ib_process_mad; |
| 1439 | dev->ib_dev.create_mr = mlx5_ib_create_mr; | ||
| 1429 | dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; | 1440 | dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; |
| 1430 | dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; | 1441 | dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; |
| 1431 | dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; | 1442 | dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; |
| 1443 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; | ||
| 1432 | 1444 | ||
| 1433 | if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { | 1445 | if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { |
| 1434 | dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; | 1446 | dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; |
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 3a5322870b96..8499aec94db6 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c | |||
| @@ -44,16 +44,17 @@ | |||
| 44 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | 44 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, |
| 45 | int *ncont, int *order) | 45 | int *ncont, int *order) |
| 46 | { | 46 | { |
| 47 | struct ib_umem_chunk *chunk; | ||
| 48 | unsigned long tmp; | 47 | unsigned long tmp; |
| 49 | unsigned long m; | 48 | unsigned long m; |
| 50 | int i, j, k; | 49 | int i, k; |
| 51 | u64 base = 0; | 50 | u64 base = 0; |
| 52 | int p = 0; | 51 | int p = 0; |
| 53 | int skip; | 52 | int skip; |
| 54 | int mask; | 53 | int mask; |
| 55 | u64 len; | 54 | u64 len; |
| 56 | u64 pfn; | 55 | u64 pfn; |
| 56 | struct scatterlist *sg; | ||
| 57 | int entry; | ||
| 57 | 58 | ||
| 58 | addr = addr >> PAGE_SHIFT; | 59 | addr = addr >> PAGE_SHIFT; |
| 59 | tmp = (unsigned long)addr; | 60 | tmp = (unsigned long)addr; |
| @@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
| 61 | skip = 1 << m; | 62 | skip = 1 << m; |
| 62 | mask = skip - 1; | 63 | mask = skip - 1; |
| 63 | i = 0; | 64 | i = 0; |
| 64 | list_for_each_entry(chunk, &umem->chunk_list, list) | 65 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 65 | for (j = 0; j < chunk->nmap; j++) { | 66 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
| 66 | len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; | 67 | pfn = sg_dma_address(sg) >> PAGE_SHIFT; |
| 67 | pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; | 68 | for (k = 0; k < len; k++) { |
| 68 | for (k = 0; k < len; k++) { | 69 | if (!(i & mask)) { |
| 69 | if (!(i & mask)) { | 70 | tmp = (unsigned long)pfn; |
| 70 | tmp = (unsigned long)pfn; | 71 | m = min(m, find_first_bit(&tmp, sizeof(tmp))); |
| 71 | m = min(m, find_first_bit(&tmp, sizeof(tmp))); | 72 | skip = 1 << m; |
| 73 | mask = skip - 1; | ||
| 74 | base = pfn; | ||
| 75 | p = 0; | ||
| 76 | } else { | ||
| 77 | if (base + p != pfn) { | ||
| 78 | tmp = (unsigned long)p; | ||
| 79 | m = find_first_bit(&tmp, sizeof(tmp)); | ||
| 72 | skip = 1 << m; | 80 | skip = 1 << m; |
| 73 | mask = skip - 1; | 81 | mask = skip - 1; |
| 74 | base = pfn; | 82 | base = pfn; |
| 75 | p = 0; | 83 | p = 0; |
| 76 | } else { | ||
| 77 | if (base + p != pfn) { | ||
| 78 | tmp = (unsigned long)p; | ||
| 79 | m = find_first_bit(&tmp, sizeof(tmp)); | ||
| 80 | skip = 1 << m; | ||
| 81 | mask = skip - 1; | ||
| 82 | base = pfn; | ||
| 83 | p = 0; | ||
| 84 | } | ||
| 85 | } | 84 | } |
| 86 | p++; | ||
| 87 | i++; | ||
| 88 | } | 85 | } |
| 86 | p++; | ||
| 87 | i++; | ||
| 89 | } | 88 | } |
| 89 | } | ||
| 90 | 90 | ||
| 91 | if (i) { | 91 | if (i) { |
| 92 | m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); | 92 | m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); |
| @@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | |||
| 112 | { | 112 | { |
| 113 | int shift = page_shift - PAGE_SHIFT; | 113 | int shift = page_shift - PAGE_SHIFT; |
| 114 | int mask = (1 << shift) - 1; | 114 | int mask = (1 << shift) - 1; |
| 115 | struct ib_umem_chunk *chunk; | 115 | int i, k; |
| 116 | int i, j, k; | ||
| 117 | u64 cur = 0; | 116 | u64 cur = 0; |
| 118 | u64 base; | 117 | u64 base; |
| 119 | int len; | 118 | int len; |
| 119 | struct scatterlist *sg; | ||
| 120 | int entry; | ||
| 120 | 121 | ||
| 121 | i = 0; | 122 | i = 0; |
| 122 | list_for_each_entry(chunk, &umem->chunk_list, list) | 123 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 123 | for (j = 0; j < chunk->nmap; j++) { | 124 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
| 124 | len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; | 125 | base = sg_dma_address(sg); |
| 125 | base = sg_dma_address(&chunk->page_list[j]); | 126 | for (k = 0; k < len; k++) { |
| 126 | for (k = 0; k < len; k++) { | 127 | if (!(i & mask)) { |
| 127 | if (!(i & mask)) { | 128 | cur = base + (k << PAGE_SHIFT); |
| 128 | cur = base + (k << PAGE_SHIFT); | 129 | if (umr) |
| 129 | if (umr) | 130 | cur |= 3; |
| 130 | cur |= 3; | ||
| 131 | 131 | ||
| 132 | pas[i >> shift] = cpu_to_be64(cur); | 132 | pas[i >> shift] = cpu_to_be64(cur); |
| 133 | mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", | 133 | mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", |
| 134 | i >> shift, be64_to_cpu(pas[i >> shift])); | 134 | i >> shift, be64_to_cpu(pas[i >> shift])); |
| 135 | } else | 135 | } else |
| 136 | mlx5_ib_dbg(dev, "=====> 0x%llx\n", | 136 | mlx5_ib_dbg(dev, "=====> 0x%llx\n", |
| 137 | base + (k << PAGE_SHIFT)); | 137 | base + (k << PAGE_SHIFT)); |
| 138 | i++; | 138 | i++; |
| 139 | } | ||
| 140 | } | 139 | } |
| 140 | } | ||
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) | 143 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 389e31965773..50541586e0a6 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
| @@ -189,6 +189,9 @@ struct mlx5_ib_qp { | |||
| 189 | 189 | ||
| 190 | int create_type; | 190 | int create_type; |
| 191 | u32 pa_lkey; | 191 | u32 pa_lkey; |
| 192 | |||
| 193 | /* Store signature errors */ | ||
| 194 | bool signature_en; | ||
| 192 | }; | 195 | }; |
| 193 | 196 | ||
| 194 | struct mlx5_ib_cq_buf { | 197 | struct mlx5_ib_cq_buf { |
| @@ -265,6 +268,7 @@ struct mlx5_ib_mr { | |||
| 265 | enum ib_wc_status status; | 268 | enum ib_wc_status status; |
| 266 | struct mlx5_ib_dev *dev; | 269 | struct mlx5_ib_dev *dev; |
| 267 | struct mlx5_create_mkey_mbox_out out; | 270 | struct mlx5_create_mkey_mbox_out out; |
| 271 | struct mlx5_core_sig_ctx *sig; | ||
| 268 | }; | 272 | }; |
| 269 | 273 | ||
| 270 | struct mlx5_ib_fast_reg_page_list { | 274 | struct mlx5_ib_fast_reg_page_list { |
| @@ -396,6 +400,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) | |||
| 396 | return container_of(mqp, struct mlx5_ib_qp, mqp); | 400 | return container_of(mqp, struct mlx5_ib_qp, mqp); |
| 397 | } | 401 | } |
| 398 | 402 | ||
| 403 | static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr) | ||
| 404 | { | ||
| 405 | return container_of(mmr, struct mlx5_ib_mr, mmr); | ||
| 406 | } | ||
| 407 | |||
| 399 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) | 408 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
| 400 | { | 409 | { |
| 401 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); | 410 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); |
| @@ -495,6 +504,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 495 | u64 virt_addr, int access_flags, | 504 | u64 virt_addr, int access_flags, |
| 496 | struct ib_udata *udata); | 505 | struct ib_udata *udata); |
| 497 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); | 506 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); |
| 507 | int mlx5_ib_destroy_mr(struct ib_mr *ibmr); | ||
| 508 | struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, | ||
| 509 | struct ib_mr_init_attr *mr_init_attr); | ||
| 498 | struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 510 | struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
| 499 | int max_page_list_len); | 511 | int max_page_list_len); |
| 500 | struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | 512 | struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, |
| @@ -530,6 +542,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); | |||
| 530 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); | 542 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); |
| 531 | int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); | 543 | int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); |
| 532 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); | 544 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); |
| 545 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, | ||
| 546 | struct ib_mr_status *mr_status); | ||
| 533 | 547 | ||
| 534 | static inline void init_query_mad(struct ib_smp *mad) | 548 | static inline void init_query_mad(struct ib_smp *mad) |
| 535 | { | 549 | { |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 7c95ca1f0c25..81392b26d078 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
| @@ -992,6 +992,122 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | |||
| 992 | return 0; | 992 | return 0; |
| 993 | } | 993 | } |
| 994 | 994 | ||
| 995 | struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, | ||
| 996 | struct ib_mr_init_attr *mr_init_attr) | ||
| 997 | { | ||
| 998 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | ||
| 999 | struct mlx5_create_mkey_mbox_in *in; | ||
| 1000 | struct mlx5_ib_mr *mr; | ||
| 1001 | int access_mode, err; | ||
| 1002 | int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4); | ||
| 1003 | |||
| 1004 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | ||
| 1005 | if (!mr) | ||
| 1006 | return ERR_PTR(-ENOMEM); | ||
| 1007 | |||
| 1008 | in = kzalloc(sizeof(*in), GFP_KERNEL); | ||
| 1009 | if (!in) { | ||
| 1010 | err = -ENOMEM; | ||
| 1011 | goto err_free; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | in->seg.status = 1 << 6; /* free */ | ||
| 1015 | in->seg.xlt_oct_size = cpu_to_be32(ndescs); | ||
| 1016 | in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | ||
| 1017 | in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); | ||
| 1018 | access_mode = MLX5_ACCESS_MODE_MTT; | ||
| 1019 | |||
| 1020 | if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) { | ||
| 1021 | u32 psv_index[2]; | ||
| 1022 | |||
| 1023 | in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) | | ||
| 1024 | MLX5_MKEY_BSF_EN); | ||
| 1025 | in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); | ||
| 1026 | mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); | ||
| 1027 | if (!mr->sig) { | ||
| 1028 | err = -ENOMEM; | ||
| 1029 | goto err_free_in; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | /* create mem & wire PSVs */ | ||
| 1033 | err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn, | ||
| 1034 | 2, psv_index); | ||
| 1035 | if (err) | ||
| 1036 | goto err_free_sig; | ||
| 1037 | |||
| 1038 | access_mode = MLX5_ACCESS_MODE_KLM; | ||
| 1039 | mr->sig->psv_memory.psv_idx = psv_index[0]; | ||
| 1040 | mr->sig->psv_wire.psv_idx = psv_index[1]; | ||
| 1041 | |||
| 1042 | mr->sig->sig_status_checked = true; | ||
| 1043 | mr->sig->sig_err_exists = false; | ||
| 1044 | /* Next UMR, Arm SIGERR */ | ||
| 1045 | ++mr->sig->sigerr_count; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | in->seg.flags = MLX5_PERM_UMR_EN | access_mode; | ||
| 1049 | err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), | ||
| 1050 | NULL, NULL, NULL); | ||
| 1051 | if (err) | ||
| 1052 | goto err_destroy_psv; | ||
| 1053 | |||
| 1054 | mr->ibmr.lkey = mr->mmr.key; | ||
| 1055 | mr->ibmr.rkey = mr->mmr.key; | ||
| 1056 | mr->umem = NULL; | ||
| 1057 | kfree(in); | ||
| 1058 | |||
| 1059 | return &mr->ibmr; | ||
| 1060 | |||
| 1061 | err_destroy_psv: | ||
| 1062 | if (mr->sig) { | ||
| 1063 | if (mlx5_core_destroy_psv(&dev->mdev, | ||
| 1064 | mr->sig->psv_memory.psv_idx)) | ||
| 1065 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | ||
| 1066 | mr->sig->psv_memory.psv_idx); | ||
| 1067 | if (mlx5_core_destroy_psv(&dev->mdev, | ||
| 1068 | mr->sig->psv_wire.psv_idx)) | ||
| 1069 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | ||
| 1070 | mr->sig->psv_wire.psv_idx); | ||
| 1071 | } | ||
| 1072 | err_free_sig: | ||
| 1073 | kfree(mr->sig); | ||
| 1074 | err_free_in: | ||
| 1075 | kfree(in); | ||
| 1076 | err_free: | ||
| 1077 | kfree(mr); | ||
| 1078 | return ERR_PTR(err); | ||
| 1079 | } | ||
| 1080 | |||
| 1081 | int mlx5_ib_destroy_mr(struct ib_mr *ibmr) | ||
| 1082 | { | ||
| 1083 | struct mlx5_ib_dev *dev = to_mdev(ibmr->device); | ||
| 1084 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | ||
| 1085 | int err; | ||
| 1086 | |||
| 1087 | if (mr->sig) { | ||
| 1088 | if (mlx5_core_destroy_psv(&dev->mdev, | ||
| 1089 | mr->sig->psv_memory.psv_idx)) | ||
| 1090 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | ||
| 1091 | mr->sig->psv_memory.psv_idx); | ||
| 1092 | if (mlx5_core_destroy_psv(&dev->mdev, | ||
| 1093 | mr->sig->psv_wire.psv_idx)) | ||
| 1094 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | ||
| 1095 | mr->sig->psv_wire.psv_idx); | ||
| 1096 | kfree(mr->sig); | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); | ||
| 1100 | if (err) { | ||
| 1101 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", | ||
| 1102 | mr->mmr.key, err); | ||
| 1103 | return err; | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | kfree(mr); | ||
| 1107 | |||
| 1108 | return err; | ||
| 1109 | } | ||
| 1110 | |||
| 995 | struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 1111 | struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
| 996 | int max_page_list_len) | 1112 | int max_page_list_len) |
| 997 | { | 1113 | { |
| @@ -1077,3 +1193,44 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) | |||
| 1077 | kfree(mfrpl->ibfrpl.page_list); | 1193 | kfree(mfrpl->ibfrpl.page_list); |
| 1078 | kfree(mfrpl); | 1194 | kfree(mfrpl); |
| 1079 | } | 1195 | } |
| 1196 | |||
| 1197 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, | ||
| 1198 | struct ib_mr_status *mr_status) | ||
| 1199 | { | ||
| 1200 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); | ||
| 1201 | int ret = 0; | ||
| 1202 | |||
| 1203 | if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { | ||
| 1204 | pr_err("Invalid status check mask\n"); | ||
| 1205 | ret = -EINVAL; | ||
| 1206 | goto done; | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | mr_status->fail_status = 0; | ||
| 1210 | if (check_mask & IB_MR_CHECK_SIG_STATUS) { | ||
| 1211 | if (!mmr->sig) { | ||
| 1212 | ret = -EINVAL; | ||
| 1213 | pr_err("signature status check requested on a non-signature enabled MR\n"); | ||
| 1214 | goto done; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | mmr->sig->sig_status_checked = true; | ||
| 1218 | if (!mmr->sig->sig_err_exists) | ||
| 1219 | goto done; | ||
| 1220 | |||
| 1221 | if (ibmr->lkey == mmr->sig->err_item.key) | ||
| 1222 | memcpy(&mr_status->sig_err, &mmr->sig->err_item, | ||
| 1223 | sizeof(mr_status->sig_err)); | ||
| 1224 | else { | ||
| 1225 | mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; | ||
| 1226 | mr_status->sig_err.sig_err_offset = 0; | ||
| 1227 | mr_status->sig_err.key = mmr->sig->err_item.key; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | mmr->sig->sig_err_exists = false; | ||
| 1231 | mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | done: | ||
| 1235 | return ret; | ||
| 1236 | } | ||
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7dfe8a1c84cf..ae788d27b93f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -256,8 +256,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) | |||
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); | 258 | size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); |
| 259 | 259 | if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN && | |
| 260 | return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); | 260 | ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) |
| 261 | return MLX5_SIG_WQE_SIZE; | ||
| 262 | else | ||
| 263 | return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); | ||
| 261 | } | 264 | } |
| 262 | 265 | ||
| 263 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | 266 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, |
| @@ -284,6 +287,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
| 284 | sizeof(struct mlx5_wqe_inline_seg); | 287 | sizeof(struct mlx5_wqe_inline_seg); |
| 285 | attr->cap.max_inline_data = qp->max_inline_data; | 288 | attr->cap.max_inline_data = qp->max_inline_data; |
| 286 | 289 | ||
| 290 | if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) | ||
| 291 | qp->signature_en = true; | ||
| 292 | |||
| 287 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); | 293 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
| 288 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; | 294 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
| 289 | if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { | 295 | if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { |
| @@ -665,7 +671,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | |||
| 665 | int err; | 671 | int err; |
| 666 | 672 | ||
| 667 | uuari = &dev->mdev.priv.uuari; | 673 | uuari = &dev->mdev.priv.uuari; |
| 668 | if (init_attr->create_flags) | 674 | if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) |
| 669 | return -EINVAL; | 675 | return -EINVAL; |
| 670 | 676 | ||
| 671 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) | 677 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) |
| @@ -1771,6 +1777,27 @@ static __be64 frwr_mkey_mask(void) | |||
| 1771 | return cpu_to_be64(result); | 1777 | return cpu_to_be64(result); |
| 1772 | } | 1778 | } |
| 1773 | 1779 | ||
| 1780 | static __be64 sig_mkey_mask(void) | ||
| 1781 | { | ||
| 1782 | u64 result; | ||
| 1783 | |||
| 1784 | result = MLX5_MKEY_MASK_LEN | | ||
| 1785 | MLX5_MKEY_MASK_PAGE_SIZE | | ||
| 1786 | MLX5_MKEY_MASK_START_ADDR | | ||
| 1787 | MLX5_MKEY_MASK_EN_SIGERR | | ||
| 1788 | MLX5_MKEY_MASK_EN_RINVAL | | ||
| 1789 | MLX5_MKEY_MASK_KEY | | ||
| 1790 | MLX5_MKEY_MASK_LR | | ||
| 1791 | MLX5_MKEY_MASK_LW | | ||
| 1792 | MLX5_MKEY_MASK_RR | | ||
| 1793 | MLX5_MKEY_MASK_RW | | ||
| 1794 | MLX5_MKEY_MASK_SMALL_FENCE | | ||
| 1795 | MLX5_MKEY_MASK_FREE | | ||
| 1796 | MLX5_MKEY_MASK_BSF_EN; | ||
| 1797 | |||
| 1798 | return cpu_to_be64(result); | ||
| 1799 | } | ||
| 1800 | |||
| 1774 | static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | 1801 | static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, |
| 1775 | struct ib_send_wr *wr, int li) | 1802 | struct ib_send_wr *wr, int li) |
| 1776 | { | 1803 | { |
| @@ -1826,7 +1853,7 @@ static u8 get_umr_flags(int acc) | |||
| 1826 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | | 1853 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | |
| 1827 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | | 1854 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | |
| 1828 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | | 1855 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | |
| 1829 | MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT; | 1856 | MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; |
| 1830 | } | 1857 | } |
| 1831 | 1858 | ||
| 1832 | static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, | 1859 | static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, |
| @@ -1838,7 +1865,8 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, | |||
| 1838 | return; | 1865 | return; |
| 1839 | } | 1866 | } |
| 1840 | 1867 | ||
| 1841 | seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags); | 1868 | seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | |
| 1869 | MLX5_ACCESS_MODE_MTT; | ||
| 1842 | *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); | 1870 | *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); |
| 1843 | seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); | 1871 | seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); |
| 1844 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); | 1872 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); |
| @@ -1954,6 +1982,342 @@ static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, | |||
| 1954 | return 0; | 1982 | return 0; |
| 1955 | } | 1983 | } |
| 1956 | 1984 | ||
| 1985 | static u16 prot_field_size(enum ib_signature_type type) | ||
| 1986 | { | ||
| 1987 | switch (type) { | ||
| 1988 | case IB_SIG_TYPE_T10_DIF: | ||
| 1989 | return MLX5_DIF_SIZE; | ||
| 1990 | default: | ||
| 1991 | return 0; | ||
| 1992 | } | ||
| 1993 | } | ||
| 1994 | |||
| 1995 | static u8 bs_selector(int block_size) | ||
| 1996 | { | ||
| 1997 | switch (block_size) { | ||
| 1998 | case 512: return 0x1; | ||
| 1999 | case 520: return 0x2; | ||
| 2000 | case 4096: return 0x3; | ||
| 2001 | case 4160: return 0x4; | ||
| 2002 | case 1073741824: return 0x5; | ||
| 2003 | default: return 0; | ||
| 2004 | } | ||
| 2005 | } | ||
| 2006 | |||
| 2007 | static int format_selector(struct ib_sig_attrs *attr, | ||
| 2008 | struct ib_sig_domain *domain, | ||
| 2009 | int *selector) | ||
| 2010 | { | ||
| 2011 | |||
| 2012 | #define FORMAT_DIF_NONE 0 | ||
| 2013 | #define FORMAT_DIF_CRC_INC 8 | ||
| 2014 | #define FORMAT_DIF_CRC_NO_INC 12 | ||
| 2015 | #define FORMAT_DIF_CSUM_INC 13 | ||
| 2016 | #define FORMAT_DIF_CSUM_NO_INC 14 | ||
| 2017 | |||
| 2018 | switch (domain->sig.dif.type) { | ||
| 2019 | case IB_T10DIF_NONE: | ||
| 2020 | /* No DIF */ | ||
| 2021 | *selector = FORMAT_DIF_NONE; | ||
| 2022 | break; | ||
| 2023 | case IB_T10DIF_TYPE1: /* Fall through */ | ||
| 2024 | case IB_T10DIF_TYPE2: | ||
| 2025 | switch (domain->sig.dif.bg_type) { | ||
| 2026 | case IB_T10DIF_CRC: | ||
| 2027 | *selector = FORMAT_DIF_CRC_INC; | ||
| 2028 | break; | ||
| 2029 | case IB_T10DIF_CSUM: | ||
| 2030 | *selector = FORMAT_DIF_CSUM_INC; | ||
| 2031 | break; | ||
| 2032 | default: | ||
| 2033 | return 1; | ||
| 2034 | } | ||
| 2035 | break; | ||
| 2036 | case IB_T10DIF_TYPE3: | ||
| 2037 | switch (domain->sig.dif.bg_type) { | ||
| 2038 | case IB_T10DIF_CRC: | ||
| 2039 | *selector = domain->sig.dif.type3_inc_reftag ? | ||
| 2040 | FORMAT_DIF_CRC_INC : | ||
| 2041 | FORMAT_DIF_CRC_NO_INC; | ||
| 2042 | break; | ||
| 2043 | case IB_T10DIF_CSUM: | ||
| 2044 | *selector = domain->sig.dif.type3_inc_reftag ? | ||
| 2045 | FORMAT_DIF_CSUM_INC : | ||
| 2046 | FORMAT_DIF_CSUM_NO_INC; | ||
| 2047 | break; | ||
| 2048 | default: | ||
| 2049 | return 1; | ||
| 2050 | } | ||
| 2051 | break; | ||
| 2052 | default: | ||
| 2053 | return 1; | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | return 0; | ||
| 2057 | } | ||
| 2058 | |||
| 2059 | static int mlx5_set_bsf(struct ib_mr *sig_mr, | ||
| 2060 | struct ib_sig_attrs *sig_attrs, | ||
| 2061 | struct mlx5_bsf *bsf, u32 data_size) | ||
| 2062 | { | ||
| 2063 | struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; | ||
| 2064 | struct mlx5_bsf_basic *basic = &bsf->basic; | ||
| 2065 | struct ib_sig_domain *mem = &sig_attrs->mem; | ||
| 2066 | struct ib_sig_domain *wire = &sig_attrs->wire; | ||
| 2067 | int ret, selector; | ||
| 2068 | |||
| 2069 | switch (sig_attrs->mem.sig_type) { | ||
| 2070 | case IB_SIG_TYPE_T10_DIF: | ||
| 2071 | if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) | ||
| 2072 | return -EINVAL; | ||
| 2073 | |||
| 2074 | /* Input domain check byte mask */ | ||
| 2075 | basic->check_byte_mask = sig_attrs->check_mask; | ||
| 2076 | if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && | ||
| 2077 | mem->sig.dif.type == wire->sig.dif.type) { | ||
| 2078 | /* Same block structure */ | ||
| 2079 | basic->bsf_size_sbs = 1 << 4; | ||
| 2080 | if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) | ||
| 2081 | basic->wire.copy_byte_mask = 0xff; | ||
| 2082 | else | ||
| 2083 | basic->wire.copy_byte_mask = 0x3f; | ||
| 2084 | } else | ||
| 2085 | basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); | ||
| 2086 | |||
| 2087 | basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); | ||
| 2088 | basic->raw_data_size = cpu_to_be32(data_size); | ||
| 2089 | |||
| 2090 | ret = format_selector(sig_attrs, mem, &selector); | ||
| 2091 | if (ret) | ||
| 2092 | return -EINVAL; | ||
| 2093 | basic->m_bfs_psv = cpu_to_be32(selector << 24 | | ||
| 2094 | msig->psv_memory.psv_idx); | ||
| 2095 | |||
| 2096 | ret = format_selector(sig_attrs, wire, &selector); | ||
| 2097 | if (ret) | ||
| 2098 | return -EINVAL; | ||
| 2099 | basic->w_bfs_psv = cpu_to_be32(selector << 24 | | ||
| 2100 | msig->psv_wire.psv_idx); | ||
| 2101 | break; | ||
| 2102 | |||
| 2103 | default: | ||
| 2104 | return -EINVAL; | ||
| 2105 | } | ||
| 2106 | |||
| 2107 | return 0; | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | ||
| 2111 | void **seg, int *size) | ||
| 2112 | { | ||
| 2113 | struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; | ||
| 2114 | struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; | ||
| 2115 | struct mlx5_bsf *bsf; | ||
| 2116 | u32 data_len = wr->sg_list->length; | ||
| 2117 | u32 data_key = wr->sg_list->lkey; | ||
| 2118 | u64 data_va = wr->sg_list->addr; | ||
| 2119 | int ret; | ||
| 2120 | int wqe_size; | ||
| 2121 | |||
| 2122 | if (!wr->wr.sig_handover.prot) { | ||
| 2123 | /** | ||
| 2124 | * Source domain doesn't contain signature information | ||
| 2125 | * So need construct: | ||
| 2126 | * ------------------ | ||
| 2127 | * | data_klm | | ||
| 2128 | * ------------------ | ||
| 2129 | * | BSF | | ||
| 2130 | * ------------------ | ||
| 2131 | **/ | ||
| 2132 | struct mlx5_klm *data_klm = *seg; | ||
| 2133 | |||
| 2134 | data_klm->bcount = cpu_to_be32(data_len); | ||
| 2135 | data_klm->key = cpu_to_be32(data_key); | ||
| 2136 | data_klm->va = cpu_to_be64(data_va); | ||
| 2137 | wqe_size = ALIGN(sizeof(*data_klm), 64); | ||
| 2138 | } else { | ||
| 2139 | /** | ||
| 2140 | * Source domain contains signature information | ||
| 2141 | * So need construct a strided block format: | ||
| 2142 | * --------------------------- | ||
| 2143 | * | stride_block_ctrl | | ||
| 2144 | * --------------------------- | ||
| 2145 | * | data_klm | | ||
| 2146 | * --------------------------- | ||
| 2147 | * | prot_klm | | ||
| 2148 | * --------------------------- | ||
| 2149 | * | BSF | | ||
| 2150 | * --------------------------- | ||
| 2151 | **/ | ||
| 2152 | struct mlx5_stride_block_ctrl_seg *sblock_ctrl; | ||
| 2153 | struct mlx5_stride_block_entry *data_sentry; | ||
| 2154 | struct mlx5_stride_block_entry *prot_sentry; | ||
| 2155 | u32 prot_key = wr->wr.sig_handover.prot->lkey; | ||
| 2156 | u64 prot_va = wr->wr.sig_handover.prot->addr; | ||
| 2157 | u16 block_size = sig_attrs->mem.sig.dif.pi_interval; | ||
| 2158 | int prot_size; | ||
| 2159 | |||
| 2160 | sblock_ctrl = *seg; | ||
| 2161 | data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl); | ||
| 2162 | prot_sentry = (void *)data_sentry + sizeof(*data_sentry); | ||
| 2163 | |||
| 2164 | prot_size = prot_field_size(sig_attrs->mem.sig_type); | ||
| 2165 | if (!prot_size) { | ||
| 2166 | pr_err("Bad block size given: %u\n", block_size); | ||
| 2167 | return -EINVAL; | ||
| 2168 | } | ||
| 2169 | sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + | ||
| 2170 | prot_size); | ||
| 2171 | sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); | ||
| 2172 | sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); | ||
| 2173 | sblock_ctrl->num_entries = cpu_to_be16(2); | ||
| 2174 | |||
| 2175 | data_sentry->bcount = cpu_to_be16(block_size); | ||
| 2176 | data_sentry->key = cpu_to_be32(data_key); | ||
| 2177 | data_sentry->va = cpu_to_be64(data_va); | ||
| 2178 | prot_sentry->bcount = cpu_to_be16(prot_size); | ||
| 2179 | prot_sentry->key = cpu_to_be32(prot_key); | ||
| 2180 | |||
| 2181 | if (prot_key == data_key && prot_va == data_va) { | ||
| 2182 | /** | ||
| 2183 | * The data and protection are interleaved | ||
| 2184 | * in a single memory region | ||
| 2185 | **/ | ||
| 2186 | prot_sentry->va = cpu_to_be64(data_va + block_size); | ||
| 2187 | prot_sentry->stride = cpu_to_be16(block_size + prot_size); | ||
| 2188 | data_sentry->stride = prot_sentry->stride; | ||
| 2189 | } else { | ||
| 2190 | /* The data and protection are two different buffers */ | ||
| 2191 | prot_sentry->va = cpu_to_be64(prot_va); | ||
| 2192 | data_sentry->stride = cpu_to_be16(block_size); | ||
| 2193 | prot_sentry->stride = cpu_to_be16(prot_size); | ||
| 2194 | } | ||
| 2195 | wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + | ||
| 2196 | sizeof(*prot_sentry), 64); | ||
| 2197 | } | ||
| 2198 | |||
| 2199 | *seg += wqe_size; | ||
| 2200 | *size += wqe_size / 16; | ||
| 2201 | if (unlikely((*seg == qp->sq.qend))) | ||
| 2202 | *seg = mlx5_get_send_wqe(qp, 0); | ||
| 2203 | |||
| 2204 | bsf = *seg; | ||
| 2205 | ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); | ||
| 2206 | if (ret) | ||
| 2207 | return -EINVAL; | ||
| 2208 | |||
| 2209 | *seg += sizeof(*bsf); | ||
| 2210 | *size += sizeof(*bsf) / 16; | ||
| 2211 | if (unlikely((*seg == qp->sq.qend))) | ||
| 2212 | *seg = mlx5_get_send_wqe(qp, 0); | ||
| 2213 | |||
| 2214 | return 0; | ||
| 2215 | } | ||
| 2216 | |||
| 2217 | static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, | ||
| 2218 | struct ib_send_wr *wr, u32 nelements, | ||
| 2219 | u32 length, u32 pdn) | ||
| 2220 | { | ||
| 2221 | struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; | ||
| 2222 | u32 sig_key = sig_mr->rkey; | ||
| 2223 | u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; | ||
| 2224 | |||
| 2225 | memset(seg, 0, sizeof(*seg)); | ||
| 2226 | |||
| 2227 | seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | | ||
| 2228 | MLX5_ACCESS_MODE_KLM; | ||
| 2229 | seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); | ||
| 2230 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | | ||
| 2231 | MLX5_MKEY_BSF_EN | pdn); | ||
| 2232 | seg->len = cpu_to_be64(length); | ||
| 2233 | seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); | ||
| 2234 | seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); | ||
| 2235 | } | ||
| 2236 | |||
| 2237 | static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | ||
| 2238 | struct ib_send_wr *wr, u32 nelements) | ||
| 2239 | { | ||
| 2240 | memset(umr, 0, sizeof(*umr)); | ||
| 2241 | |||
| 2242 | umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; | ||
| 2243 | umr->klm_octowords = get_klm_octo(nelements); | ||
| 2244 | umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); | ||
| 2245 | umr->mkey_mask = sig_mkey_mask(); | ||
| 2246 | } | ||
| 2247 | |||
| 2248 | |||
| 2249 | static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | ||
| 2250 | void **seg, int *size) | ||
| 2251 | { | ||
| 2252 | struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); | ||
| 2253 | u32 pdn = get_pd(qp)->pdn; | ||
| 2254 | u32 klm_oct_size; | ||
| 2255 | int region_len, ret; | ||
| 2256 | |||
| 2257 | if (unlikely(wr->num_sge != 1) || | ||
| 2258 | unlikely(wr->wr.sig_handover.access_flags & | ||
| 2259 | IB_ACCESS_REMOTE_ATOMIC) || | ||
| 2260 | unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || | ||
| 2261 | unlikely(!sig_mr->sig->sig_status_checked)) | ||
| 2262 | return -EINVAL; | ||
| 2263 | |||
| 2264 | /* length of the protected region, data + protection */ | ||
| 2265 | region_len = wr->sg_list->length; | ||
| 2266 | if (wr->wr.sig_handover.prot) | ||
| 2267 | region_len += wr->wr.sig_handover.prot->length; | ||
| 2268 | |||
| 2269 | /** | ||
| 2270 | * KLM octoword size - if protection was provided | ||
| 2271 | * then we use strided block format (3 octowords), | ||
| 2272 | * else we use single KLM (1 octoword) | ||
| 2273 | **/ | ||
| 2274 | klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; | ||
| 2275 | |||
| 2276 | set_sig_umr_segment(*seg, wr, klm_oct_size); | ||
| 2277 | *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); | ||
| 2278 | *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; | ||
| 2279 | if (unlikely((*seg == qp->sq.qend))) | ||
| 2280 | *seg = mlx5_get_send_wqe(qp, 0); | ||
| 2281 | |||
| 2282 | set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); | ||
| 2283 | *seg += sizeof(struct mlx5_mkey_seg); | ||
| 2284 | *size += sizeof(struct mlx5_mkey_seg) / 16; | ||
| 2285 | if (unlikely((*seg == qp->sq.qend))) | ||
| 2286 | *seg = mlx5_get_send_wqe(qp, 0); | ||
| 2287 | |||
| 2288 | ret = set_sig_data_segment(wr, qp, seg, size); | ||
| 2289 | if (ret) | ||
| 2290 | return ret; | ||
| 2291 | |||
| 2292 | sig_mr->sig->sig_status_checked = false; | ||
| 2293 | return 0; | ||
| 2294 | } | ||
| 2295 | |||
| 2296 | static int set_psv_wr(struct ib_sig_domain *domain, | ||
| 2297 | u32 psv_idx, void **seg, int *size) | ||
| 2298 | { | ||
| 2299 | struct mlx5_seg_set_psv *psv_seg = *seg; | ||
| 2300 | |||
| 2301 | memset(psv_seg, 0, sizeof(*psv_seg)); | ||
| 2302 | psv_seg->psv_num = cpu_to_be32(psv_idx); | ||
| 2303 | switch (domain->sig_type) { | ||
| 2304 | case IB_SIG_TYPE_T10_DIF: | ||
| 2305 | psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | | ||
| 2306 | domain->sig.dif.app_tag); | ||
| 2307 | psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); | ||
| 2308 | |||
| 2309 | *seg += sizeof(*psv_seg); | ||
| 2310 | *size += sizeof(*psv_seg) / 16; | ||
| 2311 | break; | ||
| 2312 | |||
| 2313 | default: | ||
| 2314 | pr_err("Bad signature type given.\n"); | ||
| 2315 | return 1; | ||
| 2316 | } | ||
| 2317 | |||
| 2318 | return 0; | ||
| 2319 | } | ||
| 2320 | |||
| 1957 | static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, | 2321 | static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, |
| 1958 | struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) | 2322 | struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) |
| 1959 | { | 2323 | { |
| @@ -2041,6 +2405,59 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) | |||
| 2041 | } | 2405 | } |
| 2042 | } | 2406 | } |
| 2043 | 2407 | ||
| 2408 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | ||
| 2409 | struct mlx5_wqe_ctrl_seg **ctrl, | ||
| 2410 | struct ib_send_wr *wr, int *idx, | ||
| 2411 | int *size, int nreq) | ||
| 2412 | { | ||
| 2413 | int err = 0; | ||
| 2414 | |||
| 2415 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { | ||
| 2416 | err = -ENOMEM; | ||
| 2417 | return err; | ||
| 2418 | } | ||
| 2419 | |||
| 2420 | *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); | ||
| 2421 | *seg = mlx5_get_send_wqe(qp, *idx); | ||
| 2422 | *ctrl = *seg; | ||
| 2423 | *(uint32_t *)(*seg + 8) = 0; | ||
| 2424 | (*ctrl)->imm = send_ieth(wr); | ||
| 2425 | (*ctrl)->fm_ce_se = qp->sq_signal_bits | | ||
| 2426 | (wr->send_flags & IB_SEND_SIGNALED ? | ||
| 2427 | MLX5_WQE_CTRL_CQ_UPDATE : 0) | | ||
| 2428 | (wr->send_flags & IB_SEND_SOLICITED ? | ||
| 2429 | MLX5_WQE_CTRL_SOLICITED : 0); | ||
| 2430 | |||
| 2431 | *seg += sizeof(**ctrl); | ||
| 2432 | *size = sizeof(**ctrl) / 16; | ||
| 2433 | |||
| 2434 | return err; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | static void finish_wqe(struct mlx5_ib_qp *qp, | ||
| 2438 | struct mlx5_wqe_ctrl_seg *ctrl, | ||
| 2439 | u8 size, unsigned idx, u64 wr_id, | ||
| 2440 | int nreq, u8 fence, u8 next_fence, | ||
| 2441 | u32 mlx5_opcode) | ||
| 2442 | { | ||
| 2443 | u8 opmod = 0; | ||
| 2444 | |||
| 2445 | ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | | ||
| 2446 | mlx5_opcode | ((u32)opmod << 24)); | ||
| 2447 | ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); | ||
| 2448 | ctrl->fm_ce_se |= fence; | ||
| 2449 | qp->fm_cache = next_fence; | ||
| 2450 | if (unlikely(qp->wq_sig)) | ||
| 2451 | ctrl->signature = wq_sig(ctrl); | ||
| 2452 | |||
| 2453 | qp->sq.wrid[idx] = wr_id; | ||
| 2454 | qp->sq.w_list[idx].opcode = mlx5_opcode; | ||
| 2455 | qp->sq.wqe_head[idx] = qp->sq.head + nreq; | ||
| 2456 | qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); | ||
| 2457 | qp->sq.w_list[idx].next = qp->sq.cur_post; | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | |||
| 2044 | int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 2461 | int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
| 2045 | struct ib_send_wr **bad_wr) | 2462 | struct ib_send_wr **bad_wr) |
| 2046 | { | 2463 | { |
| @@ -2048,13 +2465,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2048 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); | 2465 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); |
| 2049 | struct mlx5_core_dev *mdev = &dev->mdev; | 2466 | struct mlx5_core_dev *mdev = &dev->mdev; |
| 2050 | struct mlx5_ib_qp *qp = to_mqp(ibqp); | 2467 | struct mlx5_ib_qp *qp = to_mqp(ibqp); |
| 2468 | struct mlx5_ib_mr *mr; | ||
| 2051 | struct mlx5_wqe_data_seg *dpseg; | 2469 | struct mlx5_wqe_data_seg *dpseg; |
| 2052 | struct mlx5_wqe_xrc_seg *xrc; | 2470 | struct mlx5_wqe_xrc_seg *xrc; |
| 2053 | struct mlx5_bf *bf = qp->bf; | 2471 | struct mlx5_bf *bf = qp->bf; |
| 2054 | int uninitialized_var(size); | 2472 | int uninitialized_var(size); |
| 2055 | void *qend = qp->sq.qend; | 2473 | void *qend = qp->sq.qend; |
| 2056 | unsigned long flags; | 2474 | unsigned long flags; |
| 2057 | u32 mlx5_opcode; | ||
| 2058 | unsigned idx; | 2475 | unsigned idx; |
| 2059 | int err = 0; | 2476 | int err = 0; |
| 2060 | int inl = 0; | 2477 | int inl = 0; |
| @@ -2063,7 +2480,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2063 | int nreq; | 2480 | int nreq; |
| 2064 | int i; | 2481 | int i; |
| 2065 | u8 next_fence = 0; | 2482 | u8 next_fence = 0; |
| 2066 | u8 opmod = 0; | ||
| 2067 | u8 fence; | 2483 | u8 fence; |
| 2068 | 2484 | ||
| 2069 | spin_lock_irqsave(&qp->sq.lock, flags); | 2485 | spin_lock_irqsave(&qp->sq.lock, flags); |
| @@ -2076,36 +2492,23 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2076 | goto out; | 2492 | goto out; |
| 2077 | } | 2493 | } |
| 2078 | 2494 | ||
| 2079 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { | 2495 | fence = qp->fm_cache; |
| 2496 | num_sge = wr->num_sge; | ||
| 2497 | if (unlikely(num_sge > qp->sq.max_gs)) { | ||
| 2080 | mlx5_ib_warn(dev, "\n"); | 2498 | mlx5_ib_warn(dev, "\n"); |
| 2081 | err = -ENOMEM; | 2499 | err = -ENOMEM; |
| 2082 | *bad_wr = wr; | 2500 | *bad_wr = wr; |
| 2083 | goto out; | 2501 | goto out; |
| 2084 | } | 2502 | } |
| 2085 | 2503 | ||
| 2086 | fence = qp->fm_cache; | 2504 | err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); |
| 2087 | num_sge = wr->num_sge; | 2505 | if (err) { |
| 2088 | if (unlikely(num_sge > qp->sq.max_gs)) { | ||
| 2089 | mlx5_ib_warn(dev, "\n"); | 2506 | mlx5_ib_warn(dev, "\n"); |
| 2090 | err = -ENOMEM; | 2507 | err = -ENOMEM; |
| 2091 | *bad_wr = wr; | 2508 | *bad_wr = wr; |
| 2092 | goto out; | 2509 | goto out; |
| 2093 | } | 2510 | } |
| 2094 | 2511 | ||
| 2095 | idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); | ||
| 2096 | seg = mlx5_get_send_wqe(qp, idx); | ||
| 2097 | ctrl = seg; | ||
| 2098 | *(uint32_t *)(seg + 8) = 0; | ||
| 2099 | ctrl->imm = send_ieth(wr); | ||
| 2100 | ctrl->fm_ce_se = qp->sq_signal_bits | | ||
| 2101 | (wr->send_flags & IB_SEND_SIGNALED ? | ||
| 2102 | MLX5_WQE_CTRL_CQ_UPDATE : 0) | | ||
| 2103 | (wr->send_flags & IB_SEND_SOLICITED ? | ||
| 2104 | MLX5_WQE_CTRL_SOLICITED : 0); | ||
| 2105 | |||
| 2106 | seg += sizeof(*ctrl); | ||
| 2107 | size = sizeof(*ctrl) / 16; | ||
| 2108 | |||
| 2109 | switch (ibqp->qp_type) { | 2512 | switch (ibqp->qp_type) { |
| 2110 | case IB_QPT_XRC_INI: | 2513 | case IB_QPT_XRC_INI: |
| 2111 | xrc = seg; | 2514 | xrc = seg; |
| @@ -2158,6 +2561,73 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2158 | num_sge = 0; | 2561 | num_sge = 0; |
| 2159 | break; | 2562 | break; |
| 2160 | 2563 | ||
| 2564 | case IB_WR_REG_SIG_MR: | ||
| 2565 | qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; | ||
| 2566 | mr = to_mmr(wr->wr.sig_handover.sig_mr); | ||
| 2567 | |||
| 2568 | ctrl->imm = cpu_to_be32(mr->ibmr.rkey); | ||
| 2569 | err = set_sig_umr_wr(wr, qp, &seg, &size); | ||
| 2570 | if (err) { | ||
| 2571 | mlx5_ib_warn(dev, "\n"); | ||
| 2572 | *bad_wr = wr; | ||
| 2573 | goto out; | ||
| 2574 | } | ||
| 2575 | |||
| 2576 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | ||
| 2577 | nreq, get_fence(fence, wr), | ||
| 2578 | next_fence, MLX5_OPCODE_UMR); | ||
| 2579 | /* | ||
| 2580 | * SET_PSV WQEs are not signaled and solicited | ||
| 2581 | * on error | ||
| 2582 | */ | ||
| 2583 | wr->send_flags &= ~IB_SEND_SIGNALED; | ||
| 2584 | wr->send_flags |= IB_SEND_SOLICITED; | ||
| 2585 | err = begin_wqe(qp, &seg, &ctrl, wr, | ||
| 2586 | &idx, &size, nreq); | ||
| 2587 | if (err) { | ||
| 2588 | mlx5_ib_warn(dev, "\n"); | ||
| 2589 | err = -ENOMEM; | ||
| 2590 | *bad_wr = wr; | ||
| 2591 | goto out; | ||
| 2592 | } | ||
| 2593 | |||
| 2594 | err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, | ||
| 2595 | mr->sig->psv_memory.psv_idx, &seg, | ||
| 2596 | &size); | ||
| 2597 | if (err) { | ||
| 2598 | mlx5_ib_warn(dev, "\n"); | ||
| 2599 | *bad_wr = wr; | ||
| 2600 | goto out; | ||
| 2601 | } | ||
| 2602 | |||
| 2603 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | ||
| 2604 | nreq, get_fence(fence, wr), | ||
| 2605 | next_fence, MLX5_OPCODE_SET_PSV); | ||
| 2606 | err = begin_wqe(qp, &seg, &ctrl, wr, | ||
| 2607 | &idx, &size, nreq); | ||
| 2608 | if (err) { | ||
| 2609 | mlx5_ib_warn(dev, "\n"); | ||
| 2610 | err = -ENOMEM; | ||
| 2611 | *bad_wr = wr; | ||
| 2612 | goto out; | ||
| 2613 | } | ||
| 2614 | |||
| 2615 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 2616 | err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, | ||
| 2617 | mr->sig->psv_wire.psv_idx, &seg, | ||
| 2618 | &size); | ||
| 2619 | if (err) { | ||
| 2620 | mlx5_ib_warn(dev, "\n"); | ||
| 2621 | *bad_wr = wr; | ||
| 2622 | goto out; | ||
| 2623 | } | ||
| 2624 | |||
| 2625 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | ||
| 2626 | nreq, get_fence(fence, wr), | ||
| 2627 | next_fence, MLX5_OPCODE_SET_PSV); | ||
| 2628 | num_sge = 0; | ||
| 2629 | goto skip_psv; | ||
| 2630 | |||
| 2161 | default: | 2631 | default: |
| 2162 | break; | 2632 | break; |
| 2163 | } | 2633 | } |
| @@ -2238,22 +2708,10 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2238 | } | 2708 | } |
| 2239 | } | 2709 | } |
| 2240 | 2710 | ||
| 2241 | mlx5_opcode = mlx5_ib_opcode[wr->opcode]; | 2711 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
| 2242 | ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | | 2712 | get_fence(fence, wr), next_fence, |
| 2243 | mlx5_opcode | | 2713 | mlx5_ib_opcode[wr->opcode]); |
| 2244 | ((u32)opmod << 24)); | 2714 | skip_psv: |
| 2245 | ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); | ||
| 2246 | ctrl->fm_ce_se |= get_fence(fence, wr); | ||
| 2247 | qp->fm_cache = next_fence; | ||
| 2248 | if (unlikely(qp->wq_sig)) | ||
| 2249 | ctrl->signature = wq_sig(ctrl); | ||
| 2250 | |||
| 2251 | qp->sq.wrid[idx] = wr->wr_id; | ||
| 2252 | qp->sq.w_list[idx].opcode = mlx5_opcode; | ||
| 2253 | qp->sq.wqe_head[idx] = qp->sq.head + nreq; | ||
| 2254 | qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); | ||
| 2255 | qp->sq.w_list[idx].next = qp->sq.cur_post; | ||
| 2256 | |||
| 2257 | if (0) | 2715 | if (0) |
| 2258 | dump_wqe(qp, idx, size); | 2716 | dump_wqe(qp, idx, size); |
| 2259 | } | 2717 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 5b71d43bd89c..415f8e1a54db 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
| @@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, | |||
| 695 | 695 | ||
| 696 | if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { | 696 | if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { |
| 697 | mthca_free_cq(to_mdev(ibdev), cq); | 697 | mthca_free_cq(to_mdev(ibdev), cq); |
| 698 | err = -EFAULT; | ||
| 698 | goto err_free; | 699 | goto err_free; |
| 699 | } | 700 | } |
| 700 | 701 | ||
| @@ -976,12 +977,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 976 | u64 virt, int acc, struct ib_udata *udata) | 977 | u64 virt, int acc, struct ib_udata *udata) |
| 977 | { | 978 | { |
| 978 | struct mthca_dev *dev = to_mdev(pd->device); | 979 | struct mthca_dev *dev = to_mdev(pd->device); |
| 979 | struct ib_umem_chunk *chunk; | 980 | struct scatterlist *sg; |
| 980 | struct mthca_mr *mr; | 981 | struct mthca_mr *mr; |
| 981 | struct mthca_reg_mr ucmd; | 982 | struct mthca_reg_mr ucmd; |
| 982 | u64 *pages; | 983 | u64 *pages; |
| 983 | int shift, n, len; | 984 | int shift, n, len; |
| 984 | int i, j, k; | 985 | int i, k, entry; |
| 985 | int err = 0; | 986 | int err = 0; |
| 986 | int write_mtt_size; | 987 | int write_mtt_size; |
| 987 | 988 | ||
| @@ -1009,10 +1010,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 1009 | } | 1010 | } |
| 1010 | 1011 | ||
| 1011 | shift = ffs(mr->umem->page_size) - 1; | 1012 | shift = ffs(mr->umem->page_size) - 1; |
| 1012 | 1013 | n = mr->umem->nmap; | |
| 1013 | n = 0; | ||
| 1014 | list_for_each_entry(chunk, &mr->umem->chunk_list, list) | ||
| 1015 | n += chunk->nents; | ||
| 1016 | 1014 | ||
| 1017 | mr->mtt = mthca_alloc_mtt(dev, n); | 1015 | mr->mtt = mthca_alloc_mtt(dev, n); |
| 1018 | if (IS_ERR(mr->mtt)) { | 1016 | if (IS_ERR(mr->mtt)) { |
| @@ -1030,25 +1028,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 1030 | 1028 | ||
| 1031 | write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); | 1029 | write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); |
| 1032 | 1030 | ||
| 1033 | list_for_each_entry(chunk, &mr->umem->chunk_list, list) | 1031 | for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { |
| 1034 | for (j = 0; j < chunk->nmap; ++j) { | 1032 | len = sg_dma_len(sg) >> shift; |
| 1035 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 1033 | for (k = 0; k < len; ++k) { |
| 1036 | for (k = 0; k < len; ++k) { | 1034 | pages[i++] = sg_dma_address(sg) + |
| 1037 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + | 1035 | mr->umem->page_size * k; |
| 1038 | mr->umem->page_size * k; | 1036 | /* |
| 1039 | /* | 1037 | * Be friendly to write_mtt and pass it chunks |
| 1040 | * Be friendly to write_mtt and pass it chunks | 1038 | * of appropriate size. |
| 1041 | * of appropriate size. | 1039 | */ |
| 1042 | */ | 1040 | if (i == write_mtt_size) { |
| 1043 | if (i == write_mtt_size) { | 1041 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); |
| 1044 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); | 1042 | if (err) |
| 1045 | if (err) | 1043 | goto mtt_done; |
| 1046 | goto mtt_done; | 1044 | n += i; |
| 1047 | n += i; | 1045 | i = 0; |
| 1048 | i = 0; | ||
| 1049 | } | ||
| 1050 | } | 1046 | } |
| 1051 | } | 1047 | } |
| 1048 | } | ||
| 1052 | 1049 | ||
| 1053 | if (i) | 1050 | if (i) |
| 1054 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); | 1051 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 9c9f2f57e960..dfa9df484505 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -128,6 +128,7 @@ static void build_mpa_v1(struct nes_cm_node *, void *, u8); | |||
| 128 | static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **); | 128 | static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **); |
| 129 | 129 | ||
| 130 | static void print_core(struct nes_cm_core *core); | 130 | static void print_core(struct nes_cm_core *core); |
| 131 | static void record_ird_ord(struct nes_cm_node *, u16, u16); | ||
| 131 | 132 | ||
| 132 | /* External CM API Interface */ | 133 | /* External CM API Interface */ |
| 133 | /* instance of function pointers for client API */ | 134 | /* instance of function pointers for client API */ |
| @@ -317,7 +318,6 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
| 317 | } | 318 | } |
| 318 | } | 319 | } |
| 319 | 320 | ||
| 320 | |||
| 321 | if (priv_data_len + mpa_hdr_len != len) { | 321 | if (priv_data_len + mpa_hdr_len != len) { |
| 322 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" | 322 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" |
| 323 | " complete (%x + %x != %x)\n", | 323 | " complete (%x + %x != %x)\n", |
| @@ -356,25 +356,57 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
| 356 | /* send reset */ | 356 | /* send reset */ |
| 357 | return -EINVAL; | 357 | return -EINVAL; |
| 358 | } | 358 | } |
| 359 | if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) | ||
| 360 | cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; | ||
| 359 | 361 | ||
| 360 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { | 362 | if (cm_node->mpav2_ird_ord != IETF_NO_IRD_ORD) { |
| 361 | /* responder */ | 363 | /* responder */ |
| 362 | if (cm_node->ord_size > ird_size) | 364 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { |
| 363 | cm_node->ord_size = ird_size; | 365 | /* we are still negotiating */ |
| 364 | } else { | 366 | if (ord_size > NES_MAX_IRD) { |
| 365 | /* initiator */ | 367 | cm_node->ird_size = NES_MAX_IRD; |
| 366 | if (cm_node->ord_size > ird_size) | 368 | } else { |
| 367 | cm_node->ord_size = ird_size; | 369 | cm_node->ird_size = ord_size; |
| 368 | 370 | if (ord_size == 0 && | |
| 369 | if (cm_node->ird_size < ord_size) { | 371 | (rtr_ctrl_ord & IETF_RDMA0_READ)) { |
| 370 | /* no resources available */ | 372 | cm_node->ird_size = 1; |
| 371 | /* send terminate message */ | 373 | nes_debug(NES_DBG_CM, |
| 372 | return -EINVAL; | 374 | "%s: Remote peer doesn't support RDMA0_READ (ord=%u)\n", |
| 375 | __func__, ord_size); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | if (ird_size > NES_MAX_ORD) | ||
| 379 | cm_node->ord_size = NES_MAX_ORD; | ||
| 380 | else | ||
| 381 | cm_node->ord_size = ird_size; | ||
| 382 | } else { /* initiator */ | ||
| 383 | if (ord_size > NES_MAX_IRD) { | ||
| 384 | nes_debug(NES_DBG_CM, | ||
| 385 | "%s: Unable to support the requested (ord =%u)\n", | ||
| 386 | __func__, ord_size); | ||
| 387 | return -EINVAL; | ||
| 388 | } | ||
| 389 | cm_node->ird_size = ord_size; | ||
| 390 | |||
| 391 | if (ird_size > NES_MAX_ORD) { | ||
| 392 | cm_node->ord_size = NES_MAX_ORD; | ||
| 393 | } else { | ||
| 394 | if (ird_size == 0 && | ||
| 395 | (rtr_ctrl_ord & IETF_RDMA0_READ)) { | ||
| 396 | nes_debug(NES_DBG_CM, | ||
| 397 | "%s: Remote peer doesn't support RDMA0_READ (ird=%u)\n", | ||
| 398 | __func__, ird_size); | ||
| 399 | return -EINVAL; | ||
| 400 | } else { | ||
| 401 | cm_node->ord_size = ird_size; | ||
| 402 | } | ||
| 403 | } | ||
| 373 | } | 404 | } |
| 374 | } | 405 | } |
| 375 | 406 | ||
| 376 | if (rtr_ctrl_ord & IETF_RDMA0_READ) { | 407 | if (rtr_ctrl_ord & IETF_RDMA0_READ) { |
| 377 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | 408 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; |
| 409 | |||
| 378 | } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { | 410 | } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { |
| 379 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; | 411 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; |
| 380 | } else { /* Not supported RDMA0 operation */ | 412 | } else { /* Not supported RDMA0 operation */ |
| @@ -514,6 +546,19 @@ static void print_core(struct nes_cm_core *core) | |||
| 514 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); | 546 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); |
| 515 | } | 547 | } |
| 516 | 548 | ||
| 549 | static void record_ird_ord(struct nes_cm_node *cm_node, | ||
| 550 | u16 conn_ird, u16 conn_ord) | ||
| 551 | { | ||
| 552 | if (conn_ird > NES_MAX_IRD) | ||
| 553 | conn_ird = NES_MAX_IRD; | ||
| 554 | |||
| 555 | if (conn_ord > NES_MAX_ORD) | ||
| 556 | conn_ord = NES_MAX_ORD; | ||
| 557 | |||
| 558 | cm_node->ird_size = conn_ird; | ||
| 559 | cm_node->ord_size = conn_ord; | ||
| 560 | } | ||
| 561 | |||
| 517 | /** | 562 | /** |
| 518 | * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame | 563 | * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame |
| 519 | */ | 564 | */ |
| @@ -557,11 +602,13 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, | |||
| 557 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); | 602 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); |
| 558 | 603 | ||
| 559 | /* initialize RTR msg */ | 604 | /* initialize RTR msg */ |
| 560 | ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? | 605 | if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { |
| 561 | IETF_NO_IRD_ORD : cm_node->ird_size; | 606 | ctrl_ird = IETF_NO_IRD_ORD; |
| 562 | ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? | 607 | ctrl_ord = IETF_NO_IRD_ORD; |
| 563 | IETF_NO_IRD_ORD : cm_node->ord_size; | 608 | } else { |
| 564 | 609 | ctrl_ird = cm_node->ird_size & IETF_NO_IRD_ORD; | |
| 610 | ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; | ||
| 611 | } | ||
| 565 | ctrl_ird |= IETF_PEER_TO_PEER; | 612 | ctrl_ird |= IETF_PEER_TO_PEER; |
| 566 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | 613 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; |
| 567 | 614 | ||
| @@ -610,7 +657,7 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a | |||
| 610 | struct nes_qp *nesqp = *nesqp_addr; | 657 | struct nes_qp *nesqp = *nesqp_addr; |
| 611 | struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; | 658 | struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; |
| 612 | 659 | ||
| 613 | u64temp = (unsigned long)nesqp; | 660 | u64temp = (unsigned long)nesqp->nesuqp_addr; |
| 614 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; | 661 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; |
| 615 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); | 662 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); |
| 616 | 663 | ||
| @@ -1409,8 +1456,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
| 1409 | 1456 | ||
| 1410 | cm_node->mpa_frame_rev = mpa_version; | 1457 | cm_node->mpa_frame_rev = mpa_version; |
| 1411 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | 1458 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; |
| 1412 | cm_node->ird_size = IETF_NO_IRD_ORD; | 1459 | cm_node->mpav2_ird_ord = 0; |
| 1413 | cm_node->ord_size = IETF_NO_IRD_ORD; | 1460 | cm_node->ird_size = 0; |
| 1461 | cm_node->ord_size = 0; | ||
| 1414 | 1462 | ||
| 1415 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", | 1463 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", |
| 1416 | &cm_node->loc_addr, cm_node->loc_port, | 1464 | &cm_node->loc_addr, cm_node->loc_port, |
| @@ -3027,11 +3075,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3027 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 3075 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
| 3028 | return -ECONNRESET; | 3076 | return -ECONNRESET; |
| 3029 | } | 3077 | } |
| 3030 | |||
| 3031 | /* associate the node with the QP */ | 3078 | /* associate the node with the QP */ |
| 3032 | nesqp->cm_node = (void *)cm_node; | 3079 | nesqp->cm_node = (void *)cm_node; |
| 3033 | cm_node->nesqp = nesqp; | 3080 | cm_node->nesqp = nesqp; |
| 3034 | 3081 | ||
| 3082 | |||
| 3035 | nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", | 3083 | nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", |
| 3036 | nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); | 3084 | nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); |
| 3037 | atomic_inc(&cm_accepts); | 3085 | atomic_inc(&cm_accepts); |
| @@ -3054,6 +3102,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3054 | if (cm_node->mpa_frame_rev == IETF_MPA_V1) | 3102 | if (cm_node->mpa_frame_rev == IETF_MPA_V1) |
| 3055 | mpa_frame_offset = 4; | 3103 | mpa_frame_offset = 4; |
| 3056 | 3104 | ||
| 3105 | if (cm_node->mpa_frame_rev == IETF_MPA_V1 || | ||
| 3106 | cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { | ||
| 3107 | record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); | ||
| 3108 | } | ||
| 3109 | |||
| 3057 | memcpy(mpa_v2_frame->priv_data, conn_param->private_data, | 3110 | memcpy(mpa_v2_frame->priv_data, conn_param->private_data, |
| 3058 | conn_param->private_data_len); | 3111 | conn_param->private_data_len); |
| 3059 | 3112 | ||
| @@ -3117,7 +3170,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3117 | } | 3170 | } |
| 3118 | nesqp->skip_lsmm = 1; | 3171 | nesqp->skip_lsmm = 1; |
| 3119 | 3172 | ||
| 3120 | |||
| 3121 | /* Cache the cm_id in the qp */ | 3173 | /* Cache the cm_id in the qp */ |
| 3122 | nesqp->cm_id = cm_id; | 3174 | nesqp->cm_id = cm_id; |
| 3123 | cm_node->cm_id = cm_id; | 3175 | cm_node->cm_id = cm_id; |
| @@ -3154,7 +3206,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3154 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( | 3206 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( |
| 3155 | ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); | 3207 | ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); |
| 3156 | nesqp->nesqp_context->ird_ord_sizes |= | 3208 | nesqp->nesqp_context->ird_ord_sizes |= |
| 3157 | cpu_to_le32((u32)conn_param->ord); | 3209 | cpu_to_le32((u32)cm_node->ord_size); |
| 3158 | 3210 | ||
| 3159 | memset(&nes_quad, 0, sizeof(nes_quad)); | 3211 | memset(&nes_quad, 0, sizeof(nes_quad)); |
| 3160 | nes_quad.DstIpAdrIndex = | 3212 | nes_quad.DstIpAdrIndex = |
| @@ -3194,6 +3246,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3194 | cm_event.remote_addr = cm_id->remote_addr; | 3246 | cm_event.remote_addr = cm_id->remote_addr; |
| 3195 | cm_event.private_data = NULL; | 3247 | cm_event.private_data = NULL; |
| 3196 | cm_event.private_data_len = 0; | 3248 | cm_event.private_data_len = 0; |
| 3249 | cm_event.ird = cm_node->ird_size; | ||
| 3250 | cm_event.ord = cm_node->ord_size; | ||
| 3251 | |||
| 3197 | ret = cm_id->event_handler(cm_id, &cm_event); | 3252 | ret = cm_id->event_handler(cm_id, &cm_event); |
| 3198 | attr.qp_state = IB_QPS_RTS; | 3253 | attr.qp_state = IB_QPS_RTS; |
| 3199 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | 3254 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); |
| @@ -3290,14 +3345,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3290 | 3345 | ||
| 3291 | /* cache the cm_id in the qp */ | 3346 | /* cache the cm_id in the qp */ |
| 3292 | nesqp->cm_id = cm_id; | 3347 | nesqp->cm_id = cm_id; |
| 3293 | |||
| 3294 | cm_id->provider_data = nesqp; | 3348 | cm_id->provider_data = nesqp; |
| 3295 | |||
| 3296 | nesqp->private_data_len = conn_param->private_data_len; | 3349 | nesqp->private_data_len = conn_param->private_data_len; |
| 3297 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | ||
| 3298 | /* space for rdma0 read msg */ | ||
| 3299 | if (conn_param->ord == 0) | ||
| 3300 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1); | ||
| 3301 | 3350 | ||
| 3302 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); | 3351 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); |
| 3303 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", | 3352 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", |
| @@ -3334,6 +3383,11 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3334 | return -ENOMEM; | 3383 | return -ENOMEM; |
| 3335 | } | 3384 | } |
| 3336 | 3385 | ||
| 3386 | record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); | ||
| 3387 | if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && | ||
| 3388 | cm_node->ord_size == 0) | ||
| 3389 | cm_node->ord_size = 1; | ||
| 3390 | |||
| 3337 | cm_node->apbvt_set = apbvt_set; | 3391 | cm_node->apbvt_set = apbvt_set; |
| 3338 | nesqp->cm_node = cm_node; | 3392 | nesqp->cm_node = cm_node; |
| 3339 | cm_node->nesqp = nesqp; | 3393 | cm_node->nesqp = nesqp; |
| @@ -3530,6 +3584,8 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
| 3530 | nesqp->nesqp_context->ird_ord_sizes |= | 3584 | nesqp->nesqp_context->ird_ord_sizes |= |
| 3531 | cpu_to_le32((u32)1 << | 3585 | cpu_to_le32((u32)1 << |
| 3532 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); | 3586 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); |
| 3587 | nesqp->nesqp_context->ird_ord_sizes |= | ||
| 3588 | cpu_to_le32((u32)cm_node->ord_size); | ||
| 3533 | 3589 | ||
| 3534 | /* Adjust tail for not having a LSMM */ | 3590 | /* Adjust tail for not having a LSMM */ |
| 3535 | /*nesqp->hwqp.sq_tail = 1;*/ | 3591 | /*nesqp->hwqp.sq_tail = 1;*/ |
| @@ -3742,8 +3798,13 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
| 3742 | cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr); | 3798 | cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
| 3743 | cm_event.private_data = cm_node->mpa_frame_buf; | 3799 | cm_event.private_data = cm_node->mpa_frame_buf; |
| 3744 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; | 3800 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; |
| 3801 | if (cm_node->mpa_frame_rev == IETF_MPA_V1) { | ||
| 3802 | cm_event.ird = NES_MAX_IRD; | ||
| 3803 | cm_event.ord = NES_MAX_ORD; | ||
| 3804 | } else { | ||
| 3745 | cm_event.ird = cm_node->ird_size; | 3805 | cm_event.ird = cm_node->ird_size; |
| 3746 | cm_event.ord = cm_node->ord_size; | 3806 | cm_event.ord = cm_node->ord_size; |
| 3807 | } | ||
| 3747 | 3808 | ||
| 3748 | ret = cm_id->event_handler(cm_id, &cm_event); | 3809 | ret = cm_id->event_handler(cm_id, &cm_event); |
| 3749 | if (ret) | 3810 | if (ret) |
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 4646e6666087..522c99cd07c4 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
| @@ -58,6 +58,8 @@ | |||
| 58 | #define IETF_RDMA0_WRITE 0x8000 | 58 | #define IETF_RDMA0_WRITE 0x8000 |
| 59 | #define IETF_RDMA0_READ 0x4000 | 59 | #define IETF_RDMA0_READ 0x4000 |
| 60 | #define IETF_NO_IRD_ORD 0x3FFF | 60 | #define IETF_NO_IRD_ORD 0x3FFF |
| 61 | #define NES_MAX_IRD 0x40 | ||
| 62 | #define NES_MAX_ORD 0x7F | ||
| 61 | 63 | ||
| 62 | enum ietf_mpa_flags { | 64 | enum ietf_mpa_flags { |
| 63 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ | 65 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ |
| @@ -333,6 +335,7 @@ struct nes_cm_node { | |||
| 333 | enum mpa_frame_version mpa_frame_rev; | 335 | enum mpa_frame_version mpa_frame_rev; |
| 334 | u16 ird_size; | 336 | u16 ird_size; |
| 335 | u16 ord_size; | 337 | u16 ord_size; |
| 338 | u16 mpav2_ird_ord; | ||
| 336 | 339 | ||
| 337 | u16 mpa_frame_size; | 340 | u16 mpa_frame_size; |
| 338 | struct iw_cm_id *cm_id; | 341 | struct iw_cm_id *cm_id; |
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h index 4926de744488..529c421bb15c 100644 --- a/drivers/infiniband/hw/nes/nes_user.h +++ b/drivers/infiniband/hw/nes/nes_user.h | |||
| @@ -39,8 +39,8 @@ | |||
| 39 | 39 | ||
| 40 | #include <linux/types.h> | 40 | #include <linux/types.h> |
| 41 | 41 | ||
| 42 | #define NES_ABI_USERSPACE_VER 1 | 42 | #define NES_ABI_USERSPACE_VER 2 |
| 43 | #define NES_ABI_KERNEL_VER 1 | 43 | #define NES_ABI_KERNEL_VER 2 |
| 44 | 44 | ||
| 45 | /* | 45 | /* |
| 46 | * Make sure that all structs defined in this file remain laid out so | 46 | * Make sure that all structs defined in this file remain laid out so |
| @@ -78,6 +78,7 @@ struct nes_create_cq_req { | |||
| 78 | 78 | ||
| 79 | struct nes_create_qp_req { | 79 | struct nes_create_qp_req { |
| 80 | __u64 user_wqe_buffers; | 80 | __u64 user_wqe_buffers; |
| 81 | __u64 user_qp_buffer; | ||
| 81 | }; | 82 | }; |
| 82 | 83 | ||
| 83 | enum iwnes_memreg_type { | 84 | enum iwnes_memreg_type { |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 8308e3634767..218dd3574285 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -1186,11 +1186,13 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
| 1186 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | 1186 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); |
| 1187 | kfree(nesqp->allocated_buffer); | 1187 | kfree(nesqp->allocated_buffer); |
| 1188 | nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n"); | 1188 | nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n"); |
| 1189 | return NULL; | 1189 | return ERR_PTR(-EFAULT); |
| 1190 | } | 1190 | } |
| 1191 | if (req.user_wqe_buffers) { | 1191 | if (req.user_wqe_buffers) { |
| 1192 | virt_wqs = 1; | 1192 | virt_wqs = 1; |
| 1193 | } | 1193 | } |
| 1194 | if (req.user_qp_buffer) | ||
| 1195 | nesqp->nesuqp_addr = req.user_qp_buffer; | ||
| 1194 | if ((ibpd->uobject) && (ibpd->uobject->context)) { | 1196 | if ((ibpd->uobject) && (ibpd->uobject->context)) { |
| 1195 | nesqp->user_mode = 1; | 1197 | nesqp->user_mode = 1; |
| 1196 | nes_ucontext = to_nesucontext(ibpd->uobject->context); | 1198 | nes_ucontext = to_nesucontext(ibpd->uobject->context); |
| @@ -2307,7 +2309,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 2307 | struct nes_device *nesdev = nesvnic->nesdev; | 2309 | struct nes_device *nesdev = nesvnic->nesdev; |
| 2308 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 2310 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
| 2309 | struct ib_mr *ibmr = ERR_PTR(-EINVAL); | 2311 | struct ib_mr *ibmr = ERR_PTR(-EINVAL); |
| 2310 | struct ib_umem_chunk *chunk; | 2312 | struct scatterlist *sg; |
| 2311 | struct nes_ucontext *nes_ucontext; | 2313 | struct nes_ucontext *nes_ucontext; |
| 2312 | struct nes_pbl *nespbl; | 2314 | struct nes_pbl *nespbl; |
| 2313 | struct nes_mr *nesmr; | 2315 | struct nes_mr *nesmr; |
| @@ -2315,7 +2317,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 2315 | struct nes_mem_reg_req req; | 2317 | struct nes_mem_reg_req req; |
| 2316 | struct nes_vpbl vpbl; | 2318 | struct nes_vpbl vpbl; |
| 2317 | struct nes_root_vpbl root_vpbl; | 2319 | struct nes_root_vpbl root_vpbl; |
| 2318 | int nmap_index, page_index; | 2320 | int entry, page_index; |
| 2319 | int page_count = 0; | 2321 | int page_count = 0; |
| 2320 | int err, pbl_depth = 0; | 2322 | int err, pbl_depth = 0; |
| 2321 | int chunk_pages; | 2323 | int chunk_pages; |
| @@ -2330,6 +2332,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 2330 | u16 pbl_count; | 2332 | u16 pbl_count; |
| 2331 | u8 single_page = 1; | 2333 | u8 single_page = 1; |
| 2332 | u8 stag_key; | 2334 | u8 stag_key; |
| 2335 | int first_page = 1; | ||
| 2333 | 2336 | ||
| 2334 | region = ib_umem_get(pd->uobject->context, start, length, acc, 0); | 2337 | region = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
| 2335 | if (IS_ERR(region)) { | 2338 | if (IS_ERR(region)) { |
| @@ -2380,128 +2383,125 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 2380 | } | 2383 | } |
| 2381 | nesmr->region = region; | 2384 | nesmr->region = region; |
| 2382 | 2385 | ||
| 2383 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | 2386 | for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { |
| 2384 | nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n", | 2387 | if (sg_dma_address(sg) & ~PAGE_MASK) { |
| 2385 | chunk->nents, chunk->nmap); | 2388 | ib_umem_release(region); |
| 2386 | for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { | 2389 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); |
| 2387 | if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) { | 2390 | nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n", |
| 2388 | ib_umem_release(region); | 2391 | (unsigned int) sg_dma_address(sg)); |
| 2389 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | 2392 | ibmr = ERR_PTR(-EINVAL); |
| 2390 | nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n", | 2393 | kfree(nesmr); |
| 2391 | (unsigned int) sg_dma_address(&chunk->page_list[nmap_index])); | 2394 | goto reg_user_mr_err; |
| 2392 | ibmr = ERR_PTR(-EINVAL); | 2395 | } |
| 2393 | kfree(nesmr); | ||
| 2394 | goto reg_user_mr_err; | ||
| 2395 | } | ||
| 2396 | 2396 | ||
| 2397 | if (!sg_dma_len(&chunk->page_list[nmap_index])) { | 2397 | if (!sg_dma_len(sg)) { |
| 2398 | ib_umem_release(region); | 2398 | ib_umem_release(region); |
| 2399 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | 2399 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, |
| 2400 | stag_index); | 2400 | stag_index); |
| 2401 | nes_debug(NES_DBG_MR, "Invalid Buffer Size\n"); | 2401 | nes_debug(NES_DBG_MR, "Invalid Buffer Size\n"); |
| 2402 | ibmr = ERR_PTR(-EINVAL); | 2402 | ibmr = ERR_PTR(-EINVAL); |
| 2403 | kfree(nesmr); | 2403 | kfree(nesmr); |
| 2404 | goto reg_user_mr_err; | 2404 | goto reg_user_mr_err; |
| 2405 | } | 2405 | } |
| 2406 | 2406 | ||
| 2407 | region_length += sg_dma_len(&chunk->page_list[nmap_index]); | 2407 | region_length += sg_dma_len(sg); |
| 2408 | chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; | 2408 | chunk_pages = sg_dma_len(sg) >> 12; |
| 2409 | region_length -= skip_pages << 12; | 2409 | region_length -= skip_pages << 12; |
| 2410 | for (page_index=skip_pages; page_index < chunk_pages; page_index++) { | 2410 | for (page_index = skip_pages; page_index < chunk_pages; page_index++) { |
| 2411 | skip_pages = 0; | 2411 | skip_pages = 0; |
| 2412 | if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length) | 2412 | if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length) |
| 2413 | goto enough_pages; | 2413 | goto enough_pages; |
| 2414 | if ((page_count&0x01FF) == 0) { | 2414 | if ((page_count&0x01FF) == 0) { |
| 2415 | if (page_count >= 1024 * 512) { | 2415 | if (page_count >= 1024 * 512) { |
| 2416 | ib_umem_release(region); | ||
| 2417 | nes_free_resource(nesadapter, | ||
| 2418 | nesadapter->allocated_mrs, stag_index); | ||
| 2419 | kfree(nesmr); | ||
| 2420 | ibmr = ERR_PTR(-E2BIG); | ||
| 2421 | goto reg_user_mr_err; | ||
| 2422 | } | ||
| 2423 | if (root_pbl_index == 1) { | ||
| 2424 | root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, | ||
| 2425 | 8192, &root_vpbl.pbl_pbase); | ||
| 2426 | nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n", | ||
| 2427 | root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase); | ||
| 2428 | if (!root_vpbl.pbl_vbase) { | ||
| 2416 | ib_umem_release(region); | 2429 | ib_umem_release(region); |
| 2417 | nes_free_resource(nesadapter, | 2430 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, |
| 2418 | nesadapter->allocated_mrs, stag_index); | 2431 | vpbl.pbl_pbase); |
| 2432 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
| 2433 | stag_index); | ||
| 2419 | kfree(nesmr); | 2434 | kfree(nesmr); |
| 2420 | ibmr = ERR_PTR(-E2BIG); | 2435 | ibmr = ERR_PTR(-ENOMEM); |
| 2421 | goto reg_user_mr_err; | 2436 | goto reg_user_mr_err; |
| 2422 | } | 2437 | } |
| 2423 | if (root_pbl_index == 1) { | 2438 | root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, |
| 2424 | root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, | 2439 | GFP_KERNEL); |
| 2425 | 8192, &root_vpbl.pbl_pbase); | 2440 | if (!root_vpbl.leaf_vpbl) { |
| 2426 | nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n", | ||
| 2427 | root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase); | ||
| 2428 | if (!root_vpbl.pbl_vbase) { | ||
| 2429 | ib_umem_release(region); | ||
| 2430 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
| 2431 | vpbl.pbl_pbase); | ||
| 2432 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
| 2433 | stag_index); | ||
| 2434 | kfree(nesmr); | ||
| 2435 | ibmr = ERR_PTR(-ENOMEM); | ||
| 2436 | goto reg_user_mr_err; | ||
| 2437 | } | ||
| 2438 | root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, | ||
| 2439 | GFP_KERNEL); | ||
| 2440 | if (!root_vpbl.leaf_vpbl) { | ||
| 2441 | ib_umem_release(region); | ||
| 2442 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, | ||
| 2443 | root_vpbl.pbl_pbase); | ||
| 2444 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
| 2445 | vpbl.pbl_pbase); | ||
| 2446 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
| 2447 | stag_index); | ||
| 2448 | kfree(nesmr); | ||
| 2449 | ibmr = ERR_PTR(-ENOMEM); | ||
| 2450 | goto reg_user_mr_err; | ||
| 2451 | } | ||
| 2452 | root_vpbl.pbl_vbase[0].pa_low = | ||
| 2453 | cpu_to_le32((u32)vpbl.pbl_pbase); | ||
| 2454 | root_vpbl.pbl_vbase[0].pa_high = | ||
| 2455 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32))); | ||
| 2456 | root_vpbl.leaf_vpbl[0] = vpbl; | ||
| 2457 | } | ||
| 2458 | vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, | ||
| 2459 | &vpbl.pbl_pbase); | ||
| 2460 | nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n", | ||
| 2461 | vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase); | ||
| 2462 | if (!vpbl.pbl_vbase) { | ||
| 2463 | ib_umem_release(region); | 2441 | ib_umem_release(region); |
| 2464 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | 2442 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, |
| 2465 | ibmr = ERR_PTR(-ENOMEM); | 2443 | root_vpbl.pbl_pbase); |
| 2444 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
| 2445 | vpbl.pbl_pbase); | ||
| 2446 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
| 2447 | stag_index); | ||
| 2466 | kfree(nesmr); | 2448 | kfree(nesmr); |
| 2449 | ibmr = ERR_PTR(-ENOMEM); | ||
| 2467 | goto reg_user_mr_err; | 2450 | goto reg_user_mr_err; |
| 2468 | } | 2451 | } |
| 2469 | if (1 <= root_pbl_index) { | 2452 | root_vpbl.pbl_vbase[0].pa_low = |
| 2470 | root_vpbl.pbl_vbase[root_pbl_index].pa_low = | 2453 | cpu_to_le32((u32)vpbl.pbl_pbase); |
| 2471 | cpu_to_le32((u32)vpbl.pbl_pbase); | 2454 | root_vpbl.pbl_vbase[0].pa_high = |
| 2472 | root_vpbl.pbl_vbase[root_pbl_index].pa_high = | 2455 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32))); |
| 2473 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32))); | 2456 | root_vpbl.leaf_vpbl[0] = vpbl; |
| 2474 | root_vpbl.leaf_vpbl[root_pbl_index] = vpbl; | ||
| 2475 | } | ||
| 2476 | root_pbl_index++; | ||
| 2477 | cur_pbl_index = 0; | ||
| 2478 | } | 2457 | } |
| 2479 | if (single_page) { | 2458 | vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, |
| 2480 | if (page_count != 0) { | 2459 | &vpbl.pbl_pbase); |
| 2481 | if ((last_dma_addr+4096) != | 2460 | nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n", |
| 2482 | (sg_dma_address(&chunk->page_list[nmap_index])+ | 2461 | vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase); |
| 2483 | (page_index*4096))) | 2462 | if (!vpbl.pbl_vbase) { |
| 2484 | single_page = 0; | 2463 | ib_umem_release(region); |
| 2485 | last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ | 2464 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); |
| 2486 | (page_index*4096); | 2465 | ibmr = ERR_PTR(-ENOMEM); |
| 2487 | } else { | 2466 | kfree(nesmr); |
| 2488 | first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ | 2467 | goto reg_user_mr_err; |
| 2489 | (page_index*4096); | 2468 | } |
| 2490 | last_dma_addr = first_dma_addr; | 2469 | if (1 <= root_pbl_index) { |
| 2491 | } | 2470 | root_vpbl.pbl_vbase[root_pbl_index].pa_low = |
| 2471 | cpu_to_le32((u32)vpbl.pbl_pbase); | ||
| 2472 | root_vpbl.pbl_vbase[root_pbl_index].pa_high = | ||
| 2473 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32))); | ||
| 2474 | root_vpbl.leaf_vpbl[root_pbl_index] = vpbl; | ||
| 2475 | } | ||
| 2476 | root_pbl_index++; | ||
| 2477 | cur_pbl_index = 0; | ||
| 2478 | } | ||
| 2479 | if (single_page) { | ||
| 2480 | if (page_count != 0) { | ||
| 2481 | if ((last_dma_addr+4096) != | ||
| 2482 | (sg_dma_address(sg)+ | ||
| 2483 | (page_index*4096))) | ||
| 2484 | single_page = 0; | ||
| 2485 | last_dma_addr = sg_dma_address(sg)+ | ||
| 2486 | (page_index*4096); | ||
| 2487 | } else { | ||
| 2488 | first_dma_addr = sg_dma_address(sg)+ | ||
| 2489 | (page_index*4096); | ||
| 2490 | last_dma_addr = first_dma_addr; | ||
| 2492 | } | 2491 | } |
| 2493 | |||
| 2494 | vpbl.pbl_vbase[cur_pbl_index].pa_low = | ||
| 2495 | cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+ | ||
| 2496 | (page_index*4096))); | ||
| 2497 | vpbl.pbl_vbase[cur_pbl_index].pa_high = | ||
| 2498 | cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+ | ||
| 2499 | (page_index*4096))) >> 32))); | ||
| 2500 | cur_pbl_index++; | ||
| 2501 | page_count++; | ||
| 2502 | } | 2492 | } |
| 2493 | |||
| 2494 | vpbl.pbl_vbase[cur_pbl_index].pa_low = | ||
| 2495 | cpu_to_le32((u32)(sg_dma_address(sg)+ | ||
| 2496 | (page_index*4096))); | ||
| 2497 | vpbl.pbl_vbase[cur_pbl_index].pa_high = | ||
| 2498 | cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+ | ||
| 2499 | (page_index*4096))) >> 32))); | ||
| 2500 | cur_pbl_index++; | ||
| 2501 | page_count++; | ||
| 2503 | } | 2502 | } |
| 2504 | } | 2503 | } |
| 2504 | |||
| 2505 | enough_pages: | 2505 | enough_pages: |
| 2506 | nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x," | 2506 | nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x," |
| 2507 | " stag_key=0x%08x\n", | 2507 | " stag_key=0x%08x\n", |
| @@ -2613,25 +2613,28 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 2613 | nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase, | 2613 | nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase, |
| 2614 | (void *) nespbl->pbl_vbase, nespbl->user_base); | 2614 | (void *) nespbl->pbl_vbase, nespbl->user_base); |
| 2615 | 2615 | ||
| 2616 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | 2616 | for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { |
| 2617 | for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { | 2617 | chunk_pages = sg_dma_len(sg) >> 12; |
| 2618 | chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; | 2618 | chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0; |
| 2619 | chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0; | 2619 | if (first_page) { |
| 2620 | nespbl->page = sg_page(&chunk->page_list[0]); | 2620 | nespbl->page = sg_page(sg); |
| 2621 | for (page_index=0; page_index<chunk_pages; page_index++) { | 2621 | first_page = 0; |
| 2622 | ((__le32 *)pbl)[0] = cpu_to_le32((u32) | 2622 | } |
| 2623 | (sg_dma_address(&chunk->page_list[nmap_index])+ | 2623 | |
| 2624 | (page_index*4096))); | 2624 | for (page_index = 0; page_index < chunk_pages; page_index++) { |
| 2625 | ((__le32 *)pbl)[1] = cpu_to_le32(((u64) | 2625 | ((__le32 *)pbl)[0] = cpu_to_le32((u32) |
| 2626 | (sg_dma_address(&chunk->page_list[nmap_index])+ | 2626 | (sg_dma_address(sg)+ |
| 2627 | (page_index*4096)))>>32); | 2627 | (page_index*4096))); |
| 2628 | nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl, | 2628 | ((__le32 *)pbl)[1] = cpu_to_le32(((u64) |
| 2629 | (unsigned long long)*pbl, | 2629 | (sg_dma_address(sg)+ |
| 2630 | le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0])); | 2630 | (page_index*4096)))>>32); |
| 2631 | pbl++; | 2631 | nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl, |
| 2632 | } | 2632 | (unsigned long long)*pbl, |
| 2633 | le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0])); | ||
| 2634 | pbl++; | ||
| 2633 | } | 2635 | } |
| 2634 | } | 2636 | } |
| 2637 | |||
| 2635 | if (req.reg_type == IWNES_MEMREG_TYPE_QP) { | 2638 | if (req.reg_type == IWNES_MEMREG_TYPE_QP) { |
| 2636 | list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list); | 2639 | list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list); |
| 2637 | } else { | 2640 | } else { |
| @@ -3134,9 +3137,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 3134 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", | 3137 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", |
| 3135 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | 3138 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), |
| 3136 | original_last_aeq, nesqp->last_aeq); | 3139 | original_last_aeq, nesqp->last_aeq); |
| 3137 | if ((!ret) || | 3140 | if (!ret || original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { |
| 3138 | ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) && | ||
| 3139 | (ret))) { | ||
| 3140 | if (dont_wait) { | 3141 | if (dont_wait) { |
| 3141 | if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { | 3142 | if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { |
| 3142 | nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d)," | 3143 | nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d)," |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 0eff7c44d76b..309b31c31ae1 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h | |||
| @@ -184,5 +184,6 @@ struct nes_qp { | |||
| 184 | u8 pau_busy; | 184 | u8 pau_busy; |
| 185 | u8 pau_pending; | 185 | u8 pau_pending; |
| 186 | u8 pau_state; | 186 | u8 pau_state; |
| 187 | __u64 nesuqp_addr; | ||
| 187 | }; | 188 | }; |
| 188 | #endif /* NES_VERBS_H */ | 189 | #endif /* NES_VERBS_H */ |
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile index 06a5bed12e43..d1bfd4f4cdde 100644 --- a/drivers/infiniband/hw/ocrdma/Makefile +++ b/drivers/infiniband/hw/ocrdma/Makefile | |||
| @@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/emulex/benet | |||
| 2 | 2 | ||
| 3 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o | 3 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o |
| 4 | 4 | ||
| 5 | ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o | 5 | ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 7c001b97b23f..19011dbb930f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
| @@ -35,17 +35,27 @@ | |||
| 35 | 35 | ||
| 36 | #include <rdma/ib_verbs.h> | 36 | #include <rdma/ib_verbs.h> |
| 37 | #include <rdma/ib_user_verbs.h> | 37 | #include <rdma/ib_user_verbs.h> |
| 38 | #include <rdma/ib_addr.h> | ||
| 38 | 39 | ||
| 39 | #include <be_roce.h> | 40 | #include <be_roce.h> |
| 40 | #include "ocrdma_sli.h" | 41 | #include "ocrdma_sli.h" |
| 41 | 42 | ||
| 42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | 43 | #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" |
| 44 | |||
| 45 | #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" | ||
| 43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | 46 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
| 44 | 47 | ||
| 48 | #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" | ||
| 49 | #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" | ||
| 50 | |||
| 51 | #define OC_SKH_DEVICE_PF 0x720 | ||
| 52 | #define OC_SKH_DEVICE_VF 0x728 | ||
| 45 | #define OCRDMA_MAX_AH 512 | 53 | #define OCRDMA_MAX_AH 512 |
| 46 | 54 | ||
| 47 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | 55 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
| 48 | 56 | ||
| 57 | #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) | ||
| 58 | |||
| 49 | struct ocrdma_dev_attr { | 59 | struct ocrdma_dev_attr { |
| 50 | u8 fw_ver[32]; | 60 | u8 fw_ver[32]; |
| 51 | u32 vendor_id; | 61 | u32 vendor_id; |
| @@ -65,6 +75,7 @@ struct ocrdma_dev_attr { | |||
| 65 | int max_mr; | 75 | int max_mr; |
| 66 | u64 max_mr_size; | 76 | u64 max_mr_size; |
| 67 | u32 max_num_mr_pbl; | 77 | u32 max_num_mr_pbl; |
| 78 | int max_mw; | ||
| 68 | int max_fmr; | 79 | int max_fmr; |
| 69 | int max_map_per_fmr; | 80 | int max_map_per_fmr; |
| 70 | int max_pages_per_frmr; | 81 | int max_pages_per_frmr; |
| @@ -83,6 +94,12 @@ struct ocrdma_dev_attr { | |||
| 83 | u8 num_ird_pages; | 94 | u8 num_ird_pages; |
| 84 | }; | 95 | }; |
| 85 | 96 | ||
| 97 | struct ocrdma_dma_mem { | ||
| 98 | void *va; | ||
| 99 | dma_addr_t pa; | ||
| 100 | u32 size; | ||
| 101 | }; | ||
| 102 | |||
| 86 | struct ocrdma_pbl { | 103 | struct ocrdma_pbl { |
| 87 | void *va; | 104 | void *va; |
| 88 | dma_addr_t pa; | 105 | dma_addr_t pa; |
| @@ -148,6 +165,26 @@ struct ocrdma_mr { | |||
| 148 | struct ocrdma_hw_mr hwmr; | 165 | struct ocrdma_hw_mr hwmr; |
| 149 | }; | 166 | }; |
| 150 | 167 | ||
| 168 | struct ocrdma_stats { | ||
| 169 | u8 type; | ||
| 170 | struct ocrdma_dev *dev; | ||
| 171 | }; | ||
| 172 | |||
| 173 | struct stats_mem { | ||
| 174 | struct ocrdma_mqe mqe; | ||
| 175 | void *va; | ||
| 176 | dma_addr_t pa; | ||
| 177 | u32 size; | ||
| 178 | char *debugfs_mem; | ||
| 179 | }; | ||
| 180 | |||
| 181 | struct phy_info { | ||
| 182 | u16 auto_speeds_supported; | ||
| 183 | u16 fixed_speeds_supported; | ||
| 184 | u16 phy_type; | ||
| 185 | u16 interface_type; | ||
| 186 | }; | ||
| 187 | |||
| 151 | struct ocrdma_dev { | 188 | struct ocrdma_dev { |
| 152 | struct ib_device ibdev; | 189 | struct ib_device ibdev; |
| 153 | struct ocrdma_dev_attr attr; | 190 | struct ocrdma_dev_attr attr; |
| @@ -191,12 +228,30 @@ struct ocrdma_dev { | |||
| 191 | struct mqe_ctx mqe_ctx; | 228 | struct mqe_ctx mqe_ctx; |
| 192 | 229 | ||
| 193 | struct be_dev_info nic_info; | 230 | struct be_dev_info nic_info; |
| 231 | struct phy_info phy; | ||
| 232 | char model_number[32]; | ||
| 233 | u32 hba_port_num; | ||
| 194 | 234 | ||
| 195 | struct list_head entry; | 235 | struct list_head entry; |
| 196 | struct rcu_head rcu; | 236 | struct rcu_head rcu; |
| 197 | int id; | 237 | int id; |
| 198 | struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; | 238 | u64 stag_arr[OCRDMA_MAX_STAG]; |
| 199 | u16 pvid; | 239 | u16 pvid; |
| 240 | u32 asic_id; | ||
| 241 | |||
| 242 | ulong last_stats_time; | ||
| 243 | struct mutex stats_lock; /* provide synch for debugfs operations */ | ||
| 244 | struct stats_mem stats_mem; | ||
| 245 | struct ocrdma_stats rsrc_stats; | ||
| 246 | struct ocrdma_stats rx_stats; | ||
| 247 | struct ocrdma_stats wqe_stats; | ||
| 248 | struct ocrdma_stats tx_stats; | ||
| 249 | struct ocrdma_stats db_err_stats; | ||
| 250 | struct ocrdma_stats tx_qp_err_stats; | ||
| 251 | struct ocrdma_stats rx_qp_err_stats; | ||
| 252 | struct ocrdma_stats tx_dbg_stats; | ||
| 253 | struct ocrdma_stats rx_dbg_stats; | ||
| 254 | struct dentry *dir; | ||
| 200 | }; | 255 | }; |
| 201 | 256 | ||
| 202 | struct ocrdma_cq { | 257 | struct ocrdma_cq { |
| @@ -209,8 +264,8 @@ struct ocrdma_cq { | |||
| 209 | */ | 264 | */ |
| 210 | u32 max_hw_cqe; | 265 | u32 max_hw_cqe; |
| 211 | bool phase_change; | 266 | bool phase_change; |
| 212 | bool armed, solicited; | 267 | bool deferred_arm, deferred_sol; |
| 213 | bool arm_needed; | 268 | bool first_arm; |
| 214 | 269 | ||
| 215 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | 270 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization |
| 216 | * to cq polling | 271 | * to cq polling |
| @@ -223,6 +278,7 @@ struct ocrdma_cq { | |||
| 223 | struct ocrdma_ucontext *ucontext; | 278 | struct ocrdma_ucontext *ucontext; |
| 224 | dma_addr_t pa; | 279 | dma_addr_t pa; |
| 225 | u32 len; | 280 | u32 len; |
| 281 | u32 cqe_cnt; | ||
| 226 | 282 | ||
| 227 | /* head of all qp's sq and rq for which cqes need to be flushed | 283 | /* head of all qp's sq and rq for which cqes need to be flushed |
| 228 | * by the software. | 284 | * by the software. |
| @@ -232,7 +288,6 @@ struct ocrdma_cq { | |||
| 232 | 288 | ||
| 233 | struct ocrdma_pd { | 289 | struct ocrdma_pd { |
| 234 | struct ib_pd ibpd; | 290 | struct ib_pd ibpd; |
| 235 | struct ocrdma_dev *dev; | ||
| 236 | struct ocrdma_ucontext *uctx; | 291 | struct ocrdma_ucontext *uctx; |
| 237 | u32 id; | 292 | u32 id; |
| 238 | int num_dpp_qp; | 293 | int num_dpp_qp; |
| @@ -317,10 +372,8 @@ struct ocrdma_qp { | |||
| 317 | bool dpp_enabled; | 372 | bool dpp_enabled; |
| 318 | u8 *ird_q_va; | 373 | u8 *ird_q_va; |
| 319 | bool signaled; | 374 | bool signaled; |
| 320 | u16 db_cache; | ||
| 321 | }; | 375 | }; |
| 322 | 376 | ||
| 323 | |||
| 324 | struct ocrdma_ucontext { | 377 | struct ocrdma_ucontext { |
| 325 | struct ib_ucontext ibucontext; | 378 | struct ib_ucontext ibucontext; |
| 326 | 379 | ||
| @@ -385,13 +438,6 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |||
| 385 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | 438 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); |
| 386 | } | 439 | } |
| 387 | 440 | ||
| 388 | |||
| 389 | static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) | ||
| 390 | { | ||
| 391 | return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && | ||
| 392 | qp->id < 128) ? 24 : 16); | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) | 441 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) |
| 396 | { | 442 | { |
| 397 | int cqe_valid; | 443 | int cqe_valid; |
| @@ -436,4 +482,40 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, | |||
| 436 | return 0; | 482 | return 0; |
| 437 | } | 483 | } |
| 438 | 484 | ||
| 485 | static inline char *hca_name(struct ocrdma_dev *dev) | ||
| 486 | { | ||
| 487 | switch (dev->nic_info.pdev->device) { | ||
| 488 | case OC_SKH_DEVICE_PF: | ||
| 489 | case OC_SKH_DEVICE_VF: | ||
| 490 | return OC_NAME_SH; | ||
| 491 | default: | ||
| 492 | return OC_NAME_UNKNOWN; | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 496 | static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, | ||
| 497 | int eqid) | ||
| 498 | { | ||
| 499 | int indx; | ||
| 500 | |||
| 501 | for (indx = 0; indx < dev->eq_cnt; indx++) { | ||
| 502 | if (dev->eq_tbl[indx].q.id == eqid) | ||
| 503 | return indx; | ||
| 504 | } | ||
| 505 | |||
| 506 | return -EINVAL; | ||
| 507 | } | ||
| 508 | |||
| 509 | static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) | ||
| 510 | { | ||
| 511 | if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { | ||
| 512 | pci_read_config_dword( | ||
| 513 | dev->nic_info.pdev, | ||
| 514 | OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); | ||
| 515 | } | ||
| 516 | |||
| 517 | return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> | ||
| 518 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; | ||
| 519 | } | ||
| 520 | |||
| 439 | #endif | 521 | #endif |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index fbac8eb44036..1554cca5712a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h | |||
| @@ -28,7 +28,8 @@ | |||
| 28 | #ifndef __OCRDMA_ABI_H__ | 28 | #ifndef __OCRDMA_ABI_H__ |
| 29 | #define __OCRDMA_ABI_H__ | 29 | #define __OCRDMA_ABI_H__ |
| 30 | 30 | ||
| 31 | #define OCRDMA_ABI_VERSION 1 | 31 | #define OCRDMA_ABI_VERSION 2 |
| 32 | #define OCRDMA_BE_ROCE_ABI_VERSION 1 | ||
| 32 | /* user kernel communication data structures. */ | 33 | /* user kernel communication data structures. */ |
| 33 | 34 | ||
| 34 | struct ocrdma_alloc_ucontext_resp { | 35 | struct ocrdma_alloc_ucontext_resp { |
| @@ -107,9 +108,7 @@ struct ocrdma_create_qp_uresp { | |||
| 107 | u32 db_sq_offset; | 108 | u32 db_sq_offset; |
| 108 | u32 db_rq_offset; | 109 | u32 db_rq_offset; |
| 109 | u32 db_shift; | 110 | u32 db_shift; |
| 110 | u64 rsvd1; | 111 | u64 rsvd[11]; |
| 111 | u64 rsvd2; | ||
| 112 | u64 rsvd3; | ||
| 113 | } __packed; | 112 | } __packed; |
| 114 | 113 | ||
| 115 | struct ocrdma_create_srq_uresp { | 114 | struct ocrdma_create_srq_uresp { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 34071143006e..d4cc01f10c01 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -100,7 +100,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
| 100 | if (!(attr->ah_flags & IB_AH_GRH)) | 100 | if (!(attr->ah_flags & IB_AH_GRH)) |
| 101 | return ERR_PTR(-EINVAL); | 101 | return ERR_PTR(-EINVAL); |
| 102 | 102 | ||
| 103 | ah = kzalloc(sizeof *ah, GFP_ATOMIC); | 103 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); |
| 104 | if (!ah) | 104 | if (!ah) |
| 105 | return ERR_PTR(-ENOMEM); | 105 | return ERR_PTR(-ENOMEM); |
| 106 | 106 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 1664d648cbfc..3bbf2010a821 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | 32 | ||
| 33 | #include <rdma/ib_verbs.h> | 33 | #include <rdma/ib_verbs.h> |
| 34 | #include <rdma/ib_user_verbs.h> | 34 | #include <rdma/ib_user_verbs.h> |
| 35 | #include <rdma/ib_addr.h> | ||
| 36 | 35 | ||
| 37 | #include "ocrdma.h" | 36 | #include "ocrdma.h" |
| 38 | #include "ocrdma_hw.h" | 37 | #include "ocrdma_hw.h" |
| @@ -243,6 +242,23 @@ static int ocrdma_get_mbx_errno(u32 status) | |||
| 243 | return err_num; | 242 | return err_num; |
| 244 | } | 243 | } |
| 245 | 244 | ||
| 245 | char *port_speed_string(struct ocrdma_dev *dev) | ||
| 246 | { | ||
| 247 | char *str = ""; | ||
| 248 | u16 speeds_supported; | ||
| 249 | |||
| 250 | speeds_supported = dev->phy.fixed_speeds_supported | | ||
| 251 | dev->phy.auto_speeds_supported; | ||
| 252 | if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS) | ||
| 253 | str = "40Gbps "; | ||
| 254 | else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS) | ||
| 255 | str = "10Gbps "; | ||
| 256 | else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS) | ||
| 257 | str = "1Gbps "; | ||
| 258 | |||
| 259 | return str; | ||
| 260 | } | ||
| 261 | |||
| 246 | static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) | 262 | static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) |
| 247 | { | 263 | { |
| 248 | int err_num = -EINVAL; | 264 | int err_num = -EINVAL; |
| @@ -332,6 +348,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) | |||
| 332 | return mqe; | 348 | return mqe; |
| 333 | } | 349 | } |
| 334 | 350 | ||
| 351 | static void *ocrdma_alloc_mqe(void) | ||
| 352 | { | ||
| 353 | return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); | ||
| 354 | } | ||
| 355 | |||
| 335 | static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) | 356 | static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) |
| 336 | { | 357 | { |
| 337 | dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); | 358 | dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); |
| @@ -364,8 +385,8 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt, | |||
| 364 | } | 385 | } |
| 365 | } | 386 | } |
| 366 | 387 | ||
| 367 | static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q, | 388 | static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, |
| 368 | int queue_type) | 389 | struct ocrdma_queue_info *q, int queue_type) |
| 369 | { | 390 | { |
| 370 | u8 opcode = 0; | 391 | u8 opcode = 0; |
| 371 | int status; | 392 | int status; |
| @@ -444,7 +465,7 @@ mbx_err: | |||
| 444 | return status; | 465 | return status; |
| 445 | } | 466 | } |
| 446 | 467 | ||
| 447 | static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) | 468 | int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) |
| 448 | { | 469 | { |
| 449 | int irq; | 470 | int irq; |
| 450 | 471 | ||
| @@ -574,6 +595,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev) | |||
| 574 | if (status) | 595 | if (status) |
| 575 | goto alloc_err; | 596 | goto alloc_err; |
| 576 | 597 | ||
| 598 | dev->eq_tbl[0].cq_cnt++; | ||
| 577 | status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); | 599 | status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); |
| 578 | if (status) | 600 | if (status) |
| 579 | goto mbx_cq_free; | 601 | goto mbx_cq_free; |
| @@ -639,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 639 | { | 661 | { |
| 640 | struct ocrdma_qp *qp = NULL; | 662 | struct ocrdma_qp *qp = NULL; |
| 641 | struct ocrdma_cq *cq = NULL; | 663 | struct ocrdma_cq *cq = NULL; |
| 642 | struct ib_event ib_evt; | 664 | struct ib_event ib_evt = { 0 }; |
| 643 | int cq_event = 0; | 665 | int cq_event = 0; |
| 644 | int qp_event = 1; | 666 | int qp_event = 1; |
| 645 | int srq_event = 0; | 667 | int srq_event = 0; |
| @@ -664,6 +686,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 664 | case OCRDMA_CQ_OVERRUN_ERROR: | 686 | case OCRDMA_CQ_OVERRUN_ERROR: |
| 665 | ib_evt.element.cq = &cq->ibcq; | 687 | ib_evt.element.cq = &cq->ibcq; |
| 666 | ib_evt.event = IB_EVENT_CQ_ERR; | 688 | ib_evt.event = IB_EVENT_CQ_ERR; |
| 689 | cq_event = 1; | ||
| 690 | qp_event = 0; | ||
| 667 | break; | 691 | break; |
| 668 | case OCRDMA_CQ_QPCAT_ERROR: | 692 | case OCRDMA_CQ_QPCAT_ERROR: |
| 669 | ib_evt.element.qp = &qp->ibqp; | 693 | ib_evt.element.qp = &qp->ibqp; |
| @@ -725,6 +749,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 725 | qp->srq->ibsrq. | 749 | qp->srq->ibsrq. |
| 726 | srq_context); | 750 | srq_context); |
| 727 | } else if (dev_event) { | 751 | } else if (dev_event) { |
| 752 | pr_err("%s: Fatal event received\n", dev->ibdev.name); | ||
| 728 | ib_dispatch_event(&ib_evt); | 753 | ib_dispatch_event(&ib_evt); |
| 729 | } | 754 | } |
| 730 | 755 | ||
| @@ -752,7 +777,6 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, | |||
| 752 | } | 777 | } |
| 753 | } | 778 | } |
| 754 | 779 | ||
| 755 | |||
| 756 | static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) | 780 | static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) |
| 757 | { | 781 | { |
| 758 | /* async CQE processing */ | 782 | /* async CQE processing */ |
| @@ -799,8 +823,6 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | |||
| 799 | ocrdma_process_acqe(dev, cqe); | 823 | ocrdma_process_acqe(dev, cqe); |
| 800 | else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) | 824 | else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) |
| 801 | ocrdma_process_mcqe(dev, cqe); | 825 | ocrdma_process_mcqe(dev, cqe); |
| 802 | else | ||
| 803 | pr_err("%s() cqe->compl is not set.\n", __func__); | ||
| 804 | memset(cqe, 0, sizeof(struct ocrdma_mcqe)); | 826 | memset(cqe, 0, sizeof(struct ocrdma_mcqe)); |
| 805 | ocrdma_mcq_inc_tail(dev); | 827 | ocrdma_mcq_inc_tail(dev); |
| 806 | } | 828 | } |
| @@ -858,16 +880,8 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) | |||
| 858 | BUG(); | 880 | BUG(); |
| 859 | 881 | ||
| 860 | cq = dev->cq_tbl[cq_idx]; | 882 | cq = dev->cq_tbl[cq_idx]; |
| 861 | if (cq == NULL) { | 883 | if (cq == NULL) |
| 862 | pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); | ||
| 863 | return; | 884 | return; |
| 864 | } | ||
| 865 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 866 | cq->armed = false; | ||
| 867 | cq->solicited = false; | ||
| 868 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 869 | |||
| 870 | ocrdma_ring_cq_db(dev, cq->id, false, false, 0); | ||
| 871 | 885 | ||
| 872 | if (cq->ibcq.comp_handler) { | 886 | if (cq->ibcq.comp_handler) { |
| 873 | spin_lock_irqsave(&cq->comp_handler_lock, flags); | 887 | spin_lock_irqsave(&cq->comp_handler_lock, flags); |
| @@ -892,27 +906,35 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle) | |||
| 892 | struct ocrdma_dev *dev = eq->dev; | 906 | struct ocrdma_dev *dev = eq->dev; |
| 893 | struct ocrdma_eqe eqe; | 907 | struct ocrdma_eqe eqe; |
| 894 | struct ocrdma_eqe *ptr; | 908 | struct ocrdma_eqe *ptr; |
| 895 | u16 eqe_popped = 0; | ||
| 896 | u16 cq_id; | 909 | u16 cq_id; |
| 897 | while (1) { | 910 | int budget = eq->cq_cnt; |
| 911 | |||
| 912 | do { | ||
| 898 | ptr = ocrdma_get_eqe(eq); | 913 | ptr = ocrdma_get_eqe(eq); |
| 899 | eqe = *ptr; | 914 | eqe = *ptr; |
| 900 | ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); | 915 | ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); |
| 901 | if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) | 916 | if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) |
| 902 | break; | 917 | break; |
| 903 | eqe_popped += 1; | 918 | |
| 904 | ptr->id_valid = 0; | 919 | ptr->id_valid = 0; |
| 920 | /* ring eq doorbell as soon as its consumed. */ | ||
| 921 | ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1); | ||
| 905 | /* check whether its CQE or not. */ | 922 | /* check whether its CQE or not. */ |
| 906 | if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { | 923 | if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { |
| 907 | cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; | 924 | cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; |
| 908 | ocrdma_cq_handler(dev, cq_id); | 925 | ocrdma_cq_handler(dev, cq_id); |
| 909 | } | 926 | } |
| 910 | ocrdma_eq_inc_tail(eq); | 927 | ocrdma_eq_inc_tail(eq); |
| 911 | } | 928 | |
| 912 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped); | 929 | /* There can be a stale EQE after the last bound CQ is |
| 913 | /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */ | 930 | * destroyed. EQE valid and budget == 0 implies this. |
| 914 | if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) | 931 | */ |
| 915 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); | 932 | if (budget) |
| 933 | budget--; | ||
| 934 | |||
| 935 | } while (budget); | ||
| 936 | |||
| 937 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); | ||
| 916 | return IRQ_HANDLED; | 938 | return IRQ_HANDLED; |
| 917 | } | 939 | } |
| 918 | 940 | ||
| @@ -949,7 +971,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | |||
| 949 | { | 971 | { |
| 950 | int status = 0; | 972 | int status = 0; |
| 951 | u16 cqe_status, ext_status; | 973 | u16 cqe_status, ext_status; |
| 952 | struct ocrdma_mqe *rsp; | 974 | struct ocrdma_mqe *rsp_mqe; |
| 975 | struct ocrdma_mbx_rsp *rsp = NULL; | ||
| 953 | 976 | ||
| 954 | mutex_lock(&dev->mqe_ctx.lock); | 977 | mutex_lock(&dev->mqe_ctx.lock); |
| 955 | ocrdma_post_mqe(dev, mqe); | 978 | ocrdma_post_mqe(dev, mqe); |
| @@ -958,23 +981,61 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | |||
| 958 | goto mbx_err; | 981 | goto mbx_err; |
| 959 | cqe_status = dev->mqe_ctx.cqe_status; | 982 | cqe_status = dev->mqe_ctx.cqe_status; |
| 960 | ext_status = dev->mqe_ctx.ext_status; | 983 | ext_status = dev->mqe_ctx.ext_status; |
| 961 | rsp = ocrdma_get_mqe_rsp(dev); | 984 | rsp_mqe = ocrdma_get_mqe_rsp(dev); |
| 962 | ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); | 985 | ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe))); |
| 986 | if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> | ||
| 987 | OCRDMA_MQE_HDR_EMB_SHIFT) | ||
| 988 | rsp = &mqe->u.rsp; | ||
| 989 | |||
| 963 | if (cqe_status || ext_status) { | 990 | if (cqe_status || ext_status) { |
| 964 | pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", | 991 | pr_err("%s() cqe_status=0x%x, ext_status=0x%x,", |
| 965 | __func__, | 992 | __func__, cqe_status, ext_status); |
| 966 | (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | 993 | if (rsp) { |
| 967 | OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status); | 994 | /* This is for embedded cmds. */ |
| 995 | pr_err("opcode=0x%x, subsystem=0x%x\n", | ||
| 996 | (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | ||
| 997 | OCRDMA_MBX_RSP_OPCODE_SHIFT, | ||
| 998 | (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> | ||
| 999 | OCRDMA_MBX_RSP_SUBSYS_SHIFT); | ||
| 1000 | } | ||
| 968 | status = ocrdma_get_mbx_cqe_errno(cqe_status); | 1001 | status = ocrdma_get_mbx_cqe_errno(cqe_status); |
| 969 | goto mbx_err; | 1002 | goto mbx_err; |
| 970 | } | 1003 | } |
| 971 | if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK) | 1004 | /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */ |
| 1005 | if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)) | ||
| 972 | status = ocrdma_get_mbx_errno(mqe->u.rsp.status); | 1006 | status = ocrdma_get_mbx_errno(mqe->u.rsp.status); |
| 973 | mbx_err: | 1007 | mbx_err: |
| 974 | mutex_unlock(&dev->mqe_ctx.lock); | 1008 | mutex_unlock(&dev->mqe_ctx.lock); |
| 975 | return status; | 1009 | return status; |
| 976 | } | 1010 | } |
| 977 | 1011 | ||
| 1012 | static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe, | ||
| 1013 | void *payload_va) | ||
| 1014 | { | ||
| 1015 | int status = 0; | ||
| 1016 | struct ocrdma_mbx_rsp *rsp = payload_va; | ||
| 1017 | |||
| 1018 | if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> | ||
| 1019 | OCRDMA_MQE_HDR_EMB_SHIFT) | ||
| 1020 | BUG(); | ||
| 1021 | |||
| 1022 | status = ocrdma_mbx_cmd(dev, mqe); | ||
| 1023 | if (!status) | ||
| 1024 | /* For non embedded, only CQE failures are handled in | ||
| 1025 | * ocrdma_mbx_cmd. We need to check for RSP errors. | ||
| 1026 | */ | ||
| 1027 | if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK) | ||
| 1028 | status = ocrdma_get_mbx_errno(rsp->status); | ||
| 1029 | |||
| 1030 | if (status) | ||
| 1031 | pr_err("opcode=0x%x, subsystem=0x%x\n", | ||
| 1032 | (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | ||
| 1033 | OCRDMA_MBX_RSP_OPCODE_SHIFT, | ||
| 1034 | (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> | ||
| 1035 | OCRDMA_MBX_RSP_SUBSYS_SHIFT); | ||
| 1036 | return status; | ||
| 1037 | } | ||
| 1038 | |||
| 978 | static void ocrdma_get_attr(struct ocrdma_dev *dev, | 1039 | static void ocrdma_get_attr(struct ocrdma_dev *dev, |
| 979 | struct ocrdma_dev_attr *attr, | 1040 | struct ocrdma_dev_attr *attr, |
| 980 | struct ocrdma_mbx_query_config *rsp) | 1041 | struct ocrdma_mbx_query_config *rsp) |
| @@ -985,6 +1046,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
| 985 | attr->max_qp = | 1046 | attr->max_qp = |
| 986 | (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> | 1047 | (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> |
| 987 | OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; | 1048 | OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; |
| 1049 | attr->max_srq = | ||
| 1050 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> | ||
| 1051 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; | ||
| 988 | attr->max_send_sge = ((rsp->max_write_send_sge & | 1052 | attr->max_send_sge = ((rsp->max_write_send_sge & |
| 989 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1053 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> |
| 990 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); | 1054 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); |
| @@ -1000,9 +1064,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
| 1000 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & | 1064 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & |
| 1001 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> | 1065 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> |
| 1002 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; | 1066 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; |
| 1003 | attr->max_srq = | ||
| 1004 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> | ||
| 1005 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; | ||
| 1006 | attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & | 1067 | attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & |
| 1007 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> | 1068 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> |
| 1008 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; | 1069 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; |
| @@ -1015,6 +1076,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
| 1015 | attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & | 1076 | attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & |
| 1016 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> | 1077 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> |
| 1017 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; | 1078 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; |
| 1079 | attr->max_mw = rsp->max_mw; | ||
| 1018 | attr->max_mr = rsp->max_mr; | 1080 | attr->max_mr = rsp->max_mr; |
| 1019 | attr->max_mr_size = ~0ull; | 1081 | attr->max_mr_size = ~0ull; |
| 1020 | attr->max_fmr = 0; | 1082 | attr->max_fmr = 0; |
| @@ -1036,7 +1098,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
| 1036 | attr->max_inline_data = | 1098 | attr->max_inline_data = |
| 1037 | attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + | 1099 | attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + |
| 1038 | sizeof(struct ocrdma_sge)); | 1100 | sizeof(struct ocrdma_sge)); |
| 1039 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1101 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
| 1040 | attr->ird = 1; | 1102 | attr->ird = 1; |
| 1041 | attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; | 1103 | attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; |
| 1042 | attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; | 1104 | attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; |
| @@ -1110,6 +1172,96 @@ mbx_err: | |||
| 1110 | return status; | 1172 | return status; |
| 1111 | } | 1173 | } |
| 1112 | 1174 | ||
| 1175 | int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset) | ||
| 1176 | { | ||
| 1177 | struct ocrdma_rdma_stats_req *req = dev->stats_mem.va; | ||
| 1178 | struct ocrdma_mqe *mqe = &dev->stats_mem.mqe; | ||
| 1179 | struct ocrdma_rdma_stats_resp *old_stats = NULL; | ||
| 1180 | int status; | ||
| 1181 | |||
| 1182 | old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL); | ||
| 1183 | if (old_stats == NULL) | ||
| 1184 | return -ENOMEM; | ||
| 1185 | |||
| 1186 | memset(mqe, 0, sizeof(*mqe)); | ||
| 1187 | mqe->hdr.pyld_len = dev->stats_mem.size; | ||
| 1188 | mqe->hdr.spcl_sge_cnt_emb |= | ||
| 1189 | (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & | ||
| 1190 | OCRDMA_MQE_HDR_SGE_CNT_MASK; | ||
| 1191 | mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff); | ||
| 1192 | mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa); | ||
| 1193 | mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size; | ||
| 1194 | |||
| 1195 | /* Cache the old stats */ | ||
| 1196 | memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp)); | ||
| 1197 | memset(req, 0, dev->stats_mem.size); | ||
| 1198 | |||
| 1199 | ocrdma_init_mch((struct ocrdma_mbx_hdr *)req, | ||
| 1200 | OCRDMA_CMD_GET_RDMA_STATS, | ||
| 1201 | OCRDMA_SUBSYS_ROCE, | ||
| 1202 | dev->stats_mem.size); | ||
| 1203 | if (reset) | ||
| 1204 | req->reset_stats = reset; | ||
| 1205 | |||
| 1206 | status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va); | ||
| 1207 | if (status) | ||
| 1208 | /* Copy from cache, if mbox fails */ | ||
| 1209 | memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp)); | ||
| 1210 | else | ||
| 1211 | ocrdma_le32_to_cpu(req, dev->stats_mem.size); | ||
| 1212 | |||
| 1213 | kfree(old_stats); | ||
| 1214 | return status; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) | ||
| 1218 | { | ||
| 1219 | int status = -ENOMEM; | ||
| 1220 | struct ocrdma_dma_mem dma; | ||
| 1221 | struct ocrdma_mqe *mqe; | ||
| 1222 | struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp; | ||
| 1223 | struct mgmt_hba_attribs *hba_attribs; | ||
| 1224 | |||
| 1225 | mqe = ocrdma_alloc_mqe(); | ||
| 1226 | if (!mqe) | ||
| 1227 | return status; | ||
| 1228 | memset(mqe, 0, sizeof(*mqe)); | ||
| 1229 | |||
| 1230 | dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp); | ||
| 1231 | dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev, | ||
| 1232 | dma.size, &dma.pa, GFP_KERNEL); | ||
| 1233 | if (!dma.va) | ||
| 1234 | goto free_mqe; | ||
| 1235 | |||
| 1236 | mqe->hdr.pyld_len = dma.size; | ||
| 1237 | mqe->hdr.spcl_sge_cnt_emb |= | ||
| 1238 | (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & | ||
| 1239 | OCRDMA_MQE_HDR_SGE_CNT_MASK; | ||
| 1240 | mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff); | ||
| 1241 | mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa); | ||
| 1242 | mqe->u.nonemb_req.sge[0].len = dma.size; | ||
| 1243 | |||
| 1244 | memset(dma.va, 0, dma.size); | ||
| 1245 | ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va, | ||
| 1246 | OCRDMA_CMD_GET_CTRL_ATTRIBUTES, | ||
| 1247 | OCRDMA_SUBSYS_COMMON, | ||
| 1248 | dma.size); | ||
| 1249 | |||
| 1250 | status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va); | ||
| 1251 | if (!status) { | ||
| 1252 | ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; | ||
| 1253 | hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; | ||
| 1254 | |||
| 1255 | dev->hba_port_num = hba_attribs->phy_port; | ||
| 1256 | strncpy(dev->model_number, | ||
| 1257 | hba_attribs->controller_model_number, 31); | ||
| 1258 | } | ||
| 1259 | dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa); | ||
| 1260 | free_mqe: | ||
| 1261 | kfree(mqe); | ||
| 1262 | return status; | ||
| 1263 | } | ||
| 1264 | |||
| 1113 | static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) | 1265 | static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) |
| 1114 | { | 1266 | { |
| 1115 | int status = -ENOMEM; | 1267 | int status = -ENOMEM; |
| @@ -1157,6 +1309,35 @@ mbx_err: | |||
| 1157 | return status; | 1309 | return status; |
| 1158 | } | 1310 | } |
| 1159 | 1311 | ||
| 1312 | static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) | ||
| 1313 | { | ||
| 1314 | int status = -ENOMEM; | ||
| 1315 | struct ocrdma_mqe *cmd; | ||
| 1316 | struct ocrdma_get_phy_info_rsp *rsp; | ||
| 1317 | |||
| 1318 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd)); | ||
| 1319 | if (!cmd) | ||
| 1320 | return status; | ||
| 1321 | |||
| 1322 | ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], | ||
| 1323 | OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON, | ||
| 1324 | sizeof(*cmd)); | ||
| 1325 | |||
| 1326 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1327 | if (status) | ||
| 1328 | goto mbx_err; | ||
| 1329 | |||
| 1330 | rsp = (struct ocrdma_get_phy_info_rsp *)cmd; | ||
| 1331 | dev->phy.phy_type = le16_to_cpu(rsp->phy_type); | ||
| 1332 | dev->phy.auto_speeds_supported = | ||
| 1333 | le16_to_cpu(rsp->auto_speeds_supported); | ||
| 1334 | dev->phy.fixed_speeds_supported = | ||
| 1335 | le16_to_cpu(rsp->fixed_speeds_supported); | ||
| 1336 | mbx_err: | ||
| 1337 | kfree(cmd); | ||
| 1338 | return status; | ||
| 1339 | } | ||
| 1340 | |||
| 1160 | int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) | 1341 | int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) |
| 1161 | { | 1342 | { |
| 1162 | int status = -ENOMEM; | 1343 | int status = -ENOMEM; |
| @@ -1226,7 +1407,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, | |||
| 1226 | 1407 | ||
| 1227 | static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) | 1408 | static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) |
| 1228 | { | 1409 | { |
| 1229 | int i ; | 1410 | int i; |
| 1230 | int status = 0; | 1411 | int status = 0; |
| 1231 | int max_ah; | 1412 | int max_ah; |
| 1232 | struct ocrdma_create_ah_tbl *cmd; | 1413 | struct ocrdma_create_ah_tbl *cmd; |
| @@ -1357,12 +1538,10 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id) | |||
| 1357 | int i; | 1538 | int i; |
| 1358 | 1539 | ||
| 1359 | mutex_lock(&dev->dev_lock); | 1540 | mutex_lock(&dev->dev_lock); |
| 1360 | for (i = 0; i < dev->eq_cnt; i++) { | 1541 | i = ocrdma_get_eq_table_index(dev, eq_id); |
| 1361 | if (dev->eq_tbl[i].q.id != eq_id) | 1542 | if (i == -EINVAL) |
| 1362 | continue; | 1543 | BUG(); |
| 1363 | dev->eq_tbl[i].cq_cnt -= 1; | 1544 | dev->eq_tbl[i].cq_cnt -= 1; |
| 1364 | break; | ||
| 1365 | } | ||
| 1366 | mutex_unlock(&dev->dev_lock); | 1545 | mutex_unlock(&dev->dev_lock); |
| 1367 | } | 1546 | } |
| 1368 | 1547 | ||
| @@ -1380,7 +1559,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
| 1380 | __func__, dev->id, dev->attr.max_cqe, entries); | 1559 | __func__, dev->id, dev->attr.max_cqe, entries); |
| 1381 | return -EINVAL; | 1560 | return -EINVAL; |
| 1382 | } | 1561 | } |
| 1383 | if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) | 1562 | if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R)) |
| 1384 | return -EINVAL; | 1563 | return -EINVAL; |
| 1385 | 1564 | ||
| 1386 | if (dpp_cq) { | 1565 | if (dpp_cq) { |
| @@ -1417,6 +1596,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
| 1417 | cq->eqn = ocrdma_bind_eq(dev); | 1596 | cq->eqn = ocrdma_bind_eq(dev); |
| 1418 | cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3; | 1597 | cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3; |
| 1419 | cqe_count = cq->len / cqe_size; | 1598 | cqe_count = cq->len / cqe_size; |
| 1599 | cq->cqe_cnt = cqe_count; | ||
| 1420 | if (cqe_count > 1024) { | 1600 | if (cqe_count > 1024) { |
| 1421 | /* Set cnt to 3 to indicate more than 1024 cq entries */ | 1601 | /* Set cnt to 3 to indicate more than 1024 cq entries */ |
| 1422 | cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); | 1602 | cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); |
| @@ -1439,7 +1619,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
| 1439 | } | 1619 | } |
| 1440 | /* shared eq between all the consumer cqs. */ | 1620 | /* shared eq between all the consumer cqs. */ |
| 1441 | cmd->cmd.eqn = cq->eqn; | 1621 | cmd->cmd.eqn = cq->eqn; |
| 1442 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1622 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
| 1443 | if (dpp_cq) | 1623 | if (dpp_cq) |
| 1444 | cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << | 1624 | cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << |
| 1445 | OCRDMA_CREATE_CQ_TYPE_SHIFT; | 1625 | OCRDMA_CREATE_CQ_TYPE_SHIFT; |
| @@ -1484,12 +1664,9 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) | |||
| 1484 | (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & | 1664 | (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & |
| 1485 | OCRDMA_DESTROY_CQ_QID_MASK; | 1665 | OCRDMA_DESTROY_CQ_QID_MASK; |
| 1486 | 1666 | ||
| 1487 | ocrdma_unbind_eq(dev, cq->eqn); | ||
| 1488 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | 1667 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); |
| 1489 | if (status) | 1668 | ocrdma_unbind_eq(dev, cq->eqn); |
| 1490 | goto mbx_err; | ||
| 1491 | dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); | 1669 | dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); |
| 1492 | mbx_err: | ||
| 1493 | kfree(cmd); | 1670 | kfree(cmd); |
| 1494 | return status; | 1671 | return status; |
| 1495 | } | 1672 | } |
| @@ -2029,8 +2206,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, | |||
| 2029 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; | 2206 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; |
| 2030 | qp->rq_cq = cq; | 2207 | qp->rq_cq = cq; |
| 2031 | 2208 | ||
| 2032 | if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && | 2209 | if (pd->dpp_enabled && pd->num_dpp_qp) { |
| 2033 | (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) { | ||
| 2034 | ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, | 2210 | ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, |
| 2035 | dpp_cq_id); | 2211 | dpp_cq_id); |
| 2036 | } | 2212 | } |
| @@ -2099,7 +2275,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, | |||
| 2099 | memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], | 2275 | memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], |
| 2100 | sizeof(cmd->params.dgid)); | 2276 | sizeof(cmd->params.dgid)); |
| 2101 | status = ocrdma_query_gid(&qp->dev->ibdev, 1, | 2277 | status = ocrdma_query_gid(&qp->dev->ibdev, 1, |
| 2102 | ah_attr->grh.sgid_index, &sgid); | 2278 | ah_attr->grh.sgid_index, &sgid); |
| 2103 | if (status) | 2279 | if (status) |
| 2104 | return status; | 2280 | return status; |
| 2105 | 2281 | ||
| @@ -2127,8 +2303,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, | |||
| 2127 | 2303 | ||
| 2128 | static int ocrdma_set_qp_params(struct ocrdma_qp *qp, | 2304 | static int ocrdma_set_qp_params(struct ocrdma_qp *qp, |
| 2129 | struct ocrdma_modify_qp *cmd, | 2305 | struct ocrdma_modify_qp *cmd, |
| 2130 | struct ib_qp_attr *attrs, int attr_mask, | 2306 | struct ib_qp_attr *attrs, int attr_mask) |
| 2131 | enum ib_qp_state old_qps) | ||
| 2132 | { | 2307 | { |
| 2133 | int status = 0; | 2308 | int status = 0; |
| 2134 | 2309 | ||
| @@ -2233,8 +2408,7 @@ pmtu_err: | |||
| 2233 | } | 2408 | } |
| 2234 | 2409 | ||
| 2235 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | 2410 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, |
| 2236 | struct ib_qp_attr *attrs, int attr_mask, | 2411 | struct ib_qp_attr *attrs, int attr_mask) |
| 2237 | enum ib_qp_state old_qps) | ||
| 2238 | { | 2412 | { |
| 2239 | int status = -ENOMEM; | 2413 | int status = -ENOMEM; |
| 2240 | struct ocrdma_modify_qp *cmd; | 2414 | struct ocrdma_modify_qp *cmd; |
| @@ -2257,7 +2431,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | |||
| 2257 | OCRDMA_QP_PARAMS_STATE_MASK; | 2431 | OCRDMA_QP_PARAMS_STATE_MASK; |
| 2258 | } | 2432 | } |
| 2259 | 2433 | ||
| 2260 | status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps); | 2434 | status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask); |
| 2261 | if (status) | 2435 | if (status) |
| 2262 | goto mbx_err; | 2436 | goto mbx_err; |
| 2263 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | 2437 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); |
| @@ -2488,7 +2662,7 @@ static int ocrdma_create_eqs(struct ocrdma_dev *dev) | |||
| 2488 | 2662 | ||
| 2489 | for (i = 0; i < num_eq; i++) { | 2663 | for (i = 0; i < num_eq; i++) { |
| 2490 | status = ocrdma_create_eq(dev, &dev->eq_tbl[i], | 2664 | status = ocrdma_create_eq(dev, &dev->eq_tbl[i], |
| 2491 | OCRDMA_EQ_LEN); | 2665 | OCRDMA_EQ_LEN); |
| 2492 | if (status) { | 2666 | if (status) { |
| 2493 | status = -EINVAL; | 2667 | status = -EINVAL; |
| 2494 | break; | 2668 | break; |
| @@ -2533,6 +2707,13 @@ int ocrdma_init_hw(struct ocrdma_dev *dev) | |||
| 2533 | status = ocrdma_mbx_create_ah_tbl(dev); | 2707 | status = ocrdma_mbx_create_ah_tbl(dev); |
| 2534 | if (status) | 2708 | if (status) |
| 2535 | goto conf_err; | 2709 | goto conf_err; |
| 2710 | status = ocrdma_mbx_get_phy_info(dev); | ||
| 2711 | if (status) | ||
| 2712 | goto conf_err; | ||
| 2713 | status = ocrdma_mbx_get_ctrl_attribs(dev); | ||
| 2714 | if (status) | ||
| 2715 | goto conf_err; | ||
| 2716 | |||
| 2536 | return 0; | 2717 | return 0; |
| 2537 | 2718 | ||
| 2538 | conf_err: | 2719 | conf_err: |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index 82fe332ae6c6..e513f7293142 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h | |||
| @@ -112,8 +112,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs, | |||
| 112 | u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, | 112 | u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, |
| 113 | u16 *dpp_credit_lmt); | 113 | u16 *dpp_credit_lmt); |
| 114 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, | 114 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, |
| 115 | struct ib_qp_attr *attrs, int attr_mask, | 115 | struct ib_qp_attr *attrs, int attr_mask); |
| 116 | enum ib_qp_state old_qps); | ||
| 117 | int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, | 116 | int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, |
| 118 | struct ocrdma_qp_params *param); | 117 | struct ocrdma_qp_params *param); |
| 119 | int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); | 118 | int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); |
| @@ -132,5 +131,8 @@ int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state, | |||
| 132 | bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); | 131 | bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); |
| 133 | bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); | 132 | bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); |
| 134 | void ocrdma_flush_qp(struct ocrdma_qp *); | 133 | void ocrdma_flush_qp(struct ocrdma_qp *); |
| 134 | int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); | ||
| 135 | 135 | ||
| 136 | int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); | ||
| 137 | char *port_speed_string(struct ocrdma_dev *dev); | ||
| 136 | #endif /* __OCRDMA_HW_H__ */ | 138 | #endif /* __OCRDMA_HW_H__ */ |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 1a8a945efa60..7c504e079744 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
| @@ -39,10 +39,11 @@ | |||
| 39 | #include "ocrdma_ah.h" | 39 | #include "ocrdma_ah.h" |
| 40 | #include "be_roce.h" | 40 | #include "be_roce.h" |
| 41 | #include "ocrdma_hw.h" | 41 | #include "ocrdma_hw.h" |
| 42 | #include "ocrdma_stats.h" | ||
| 42 | #include "ocrdma_abi.h" | 43 | #include "ocrdma_abi.h" |
| 43 | 44 | ||
| 44 | MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION); | 45 | MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); |
| 45 | MODULE_DESCRIPTION("Emulex RoCE HCA Driver"); | 46 | MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); |
| 46 | MODULE_AUTHOR("Emulex Corporation"); | 47 | MODULE_AUTHOR("Emulex Corporation"); |
| 47 | MODULE_LICENSE("GPL"); | 48 | MODULE_LICENSE("GPL"); |
| 48 | 49 | ||
| @@ -286,7 +287,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) | |||
| 286 | 287 | ||
| 287 | dev->ibdev.process_mad = ocrdma_process_mad; | 288 | dev->ibdev.process_mad = ocrdma_process_mad; |
| 288 | 289 | ||
| 289 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 290 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
| 290 | dev->ibdev.uverbs_cmd_mask |= | 291 | dev->ibdev.uverbs_cmd_mask |= |
| 291 | OCRDMA_UVERBS(CREATE_SRQ) | | 292 | OCRDMA_UVERBS(CREATE_SRQ) | |
| 292 | OCRDMA_UVERBS(MODIFY_SRQ) | | 293 | OCRDMA_UVERBS(MODIFY_SRQ) | |
| @@ -338,9 +339,42 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev) | |||
| 338 | kfree(dev->sgid_tbl); | 339 | kfree(dev->sgid_tbl); |
| 339 | } | 340 | } |
| 340 | 341 | ||
| 342 | /* OCRDMA sysfs interface */ | ||
| 343 | static ssize_t show_rev(struct device *device, struct device_attribute *attr, | ||
| 344 | char *buf) | ||
| 345 | { | ||
| 346 | struct ocrdma_dev *dev = dev_get_drvdata(device); | ||
| 347 | |||
| 348 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor); | ||
| 349 | } | ||
| 350 | |||
| 351 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | ||
| 352 | char *buf) | ||
| 353 | { | ||
| 354 | struct ocrdma_dev *dev = dev_get_drvdata(device); | ||
| 355 | |||
| 356 | return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]); | ||
| 357 | } | ||
| 358 | |||
| 359 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
| 360 | static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
| 361 | |||
| 362 | static struct device_attribute *ocrdma_attributes[] = { | ||
| 363 | &dev_attr_hw_rev, | ||
| 364 | &dev_attr_fw_ver | ||
| 365 | }; | ||
| 366 | |||
| 367 | static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) | ||
| 368 | { | ||
| 369 | int i; | ||
| 370 | |||
| 371 | for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) | ||
| 372 | device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); | ||
| 373 | } | ||
| 374 | |||
| 341 | static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | 375 | static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) |
| 342 | { | 376 | { |
| 343 | int status = 0; | 377 | int status = 0, i; |
| 344 | struct ocrdma_dev *dev; | 378 | struct ocrdma_dev *dev; |
| 345 | 379 | ||
| 346 | dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); | 380 | dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); |
| @@ -369,11 +403,25 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | |||
| 369 | if (status) | 403 | if (status) |
| 370 | goto alloc_err; | 404 | goto alloc_err; |
| 371 | 405 | ||
| 406 | for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) | ||
| 407 | if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) | ||
| 408 | goto sysfs_err; | ||
| 372 | spin_lock(&ocrdma_devlist_lock); | 409 | spin_lock(&ocrdma_devlist_lock); |
| 373 | list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); | 410 | list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); |
| 374 | spin_unlock(&ocrdma_devlist_lock); | 411 | spin_unlock(&ocrdma_devlist_lock); |
| 412 | /* Init stats */ | ||
| 413 | ocrdma_add_port_stats(dev); | ||
| 414 | |||
| 415 | pr_info("%s %s: %s \"%s\" port %d\n", | ||
| 416 | dev_name(&dev->nic_info.pdev->dev), hca_name(dev), | ||
| 417 | port_speed_string(dev), dev->model_number, | ||
| 418 | dev->hba_port_num); | ||
| 419 | pr_info("%s ocrdma%d driver loaded successfully\n", | ||
| 420 | dev_name(&dev->nic_info.pdev->dev), dev->id); | ||
| 375 | return dev; | 421 | return dev; |
| 376 | 422 | ||
| 423 | sysfs_err: | ||
| 424 | ocrdma_remove_sysfiles(dev); | ||
| 377 | alloc_err: | 425 | alloc_err: |
| 378 | ocrdma_free_resources(dev); | 426 | ocrdma_free_resources(dev); |
| 379 | ocrdma_cleanup_hw(dev); | 427 | ocrdma_cleanup_hw(dev); |
| @@ -400,6 +448,9 @@ static void ocrdma_remove(struct ocrdma_dev *dev) | |||
| 400 | /* first unregister with stack to stop all the active traffic | 448 | /* first unregister with stack to stop all the active traffic |
| 401 | * of the registered clients. | 449 | * of the registered clients. |
| 402 | */ | 450 | */ |
| 451 | ocrdma_rem_port_stats(dev); | ||
| 452 | ocrdma_remove_sysfiles(dev); | ||
| 453 | |||
| 403 | ib_unregister_device(&dev->ibdev); | 454 | ib_unregister_device(&dev->ibdev); |
| 404 | 455 | ||
| 405 | spin_lock(&ocrdma_devlist_lock); | 456 | spin_lock(&ocrdma_devlist_lock); |
| @@ -437,7 +488,7 @@ static int ocrdma_close(struct ocrdma_dev *dev) | |||
| 437 | cur_qp = dev->qp_tbl; | 488 | cur_qp = dev->qp_tbl; |
| 438 | for (i = 0; i < OCRDMA_MAX_QP; i++) { | 489 | for (i = 0; i < OCRDMA_MAX_QP; i++) { |
| 439 | qp = cur_qp[i]; | 490 | qp = cur_qp[i]; |
| 440 | if (qp) { | 491 | if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { |
| 441 | /* change the QP state to ERROR */ | 492 | /* change the QP state to ERROR */ |
| 442 | _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); | 493 | _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); |
| 443 | 494 | ||
| @@ -478,6 +529,7 @@ static struct ocrdma_driver ocrdma_drv = { | |||
| 478 | .add = ocrdma_add, | 529 | .add = ocrdma_add, |
| 479 | .remove = ocrdma_remove, | 530 | .remove = ocrdma_remove, |
| 480 | .state_change_handler = ocrdma_event_handler, | 531 | .state_change_handler = ocrdma_event_handler, |
| 532 | .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION, | ||
| 481 | }; | 533 | }; |
| 482 | 534 | ||
| 483 | static void ocrdma_unregister_inet6addr_notifier(void) | 535 | static void ocrdma_unregister_inet6addr_notifier(void) |
| @@ -487,10 +539,17 @@ static void ocrdma_unregister_inet6addr_notifier(void) | |||
| 487 | #endif | 539 | #endif |
| 488 | } | 540 | } |
| 489 | 541 | ||
| 542 | static void ocrdma_unregister_inetaddr_notifier(void) | ||
| 543 | { | ||
| 544 | unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier); | ||
| 545 | } | ||
| 546 | |||
| 490 | static int __init ocrdma_init_module(void) | 547 | static int __init ocrdma_init_module(void) |
| 491 | { | 548 | { |
| 492 | int status; | 549 | int status; |
| 493 | 550 | ||
| 551 | ocrdma_init_debugfs(); | ||
| 552 | |||
| 494 | status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); | 553 | status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); |
| 495 | if (status) | 554 | if (status) |
| 496 | return status; | 555 | return status; |
| @@ -498,13 +557,19 @@ static int __init ocrdma_init_module(void) | |||
| 498 | #if IS_ENABLED(CONFIG_IPV6) | 557 | #if IS_ENABLED(CONFIG_IPV6) |
| 499 | status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); | 558 | status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); |
| 500 | if (status) | 559 | if (status) |
| 501 | return status; | 560 | goto err_notifier6; |
| 502 | #endif | 561 | #endif |
| 503 | 562 | ||
| 504 | status = be_roce_register_driver(&ocrdma_drv); | 563 | status = be_roce_register_driver(&ocrdma_drv); |
| 505 | if (status) | 564 | if (status) |
| 506 | ocrdma_unregister_inet6addr_notifier(); | 565 | goto err_be_reg; |
| 507 | 566 | ||
| 567 | return 0; | ||
| 568 | |||
| 569 | err_be_reg: | ||
| 570 | ocrdma_unregister_inet6addr_notifier(); | ||
| 571 | err_notifier6: | ||
| 572 | ocrdma_unregister_inetaddr_notifier(); | ||
| 508 | return status; | 573 | return status; |
| 509 | } | 574 | } |
| 510 | 575 | ||
| @@ -512,6 +577,8 @@ static void __exit ocrdma_exit_module(void) | |||
| 512 | { | 577 | { |
| 513 | be_roce_unregister_driver(&ocrdma_drv); | 578 | be_roce_unregister_driver(&ocrdma_drv); |
| 514 | ocrdma_unregister_inet6addr_notifier(); | 579 | ocrdma_unregister_inet6addr_notifier(); |
| 580 | ocrdma_unregister_inetaddr_notifier(); | ||
| 581 | ocrdma_rem_debugfs(); | ||
| 515 | } | 582 | } |
| 516 | 583 | ||
| 517 | module_init(ocrdma_init_module); | 584 | module_init(ocrdma_init_module); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 60d5ac23ea80..96c9ee602ba4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
| @@ -30,8 +30,16 @@ | |||
| 30 | 30 | ||
| 31 | #define Bit(_b) (1 << (_b)) | 31 | #define Bit(_b) (1 << (_b)) |
| 32 | 32 | ||
| 33 | #define OCRDMA_GEN1_FAMILY 0xB | 33 | enum { |
| 34 | #define OCRDMA_GEN2_FAMILY 0x0F | 34 | OCRDMA_ASIC_GEN_SKH_R = 0x04, |
| 35 | OCRDMA_ASIC_GEN_LANCER = 0x0B | ||
| 36 | }; | ||
| 37 | |||
| 38 | enum { | ||
| 39 | OCRDMA_ASIC_REV_A0 = 0x00, | ||
| 40 | OCRDMA_ASIC_REV_B0 = 0x10, | ||
| 41 | OCRDMA_ASIC_REV_C0 = 0x20 | ||
| 42 | }; | ||
| 35 | 43 | ||
| 36 | #define OCRDMA_SUBSYS_ROCE 10 | 44 | #define OCRDMA_SUBSYS_ROCE 10 |
| 37 | enum { | 45 | enum { |
| @@ -64,6 +72,7 @@ enum { | |||
| 64 | 72 | ||
| 65 | OCRDMA_CMD_ATTACH_MCAST, | 73 | OCRDMA_CMD_ATTACH_MCAST, |
| 66 | OCRDMA_CMD_DETACH_MCAST, | 74 | OCRDMA_CMD_DETACH_MCAST, |
| 75 | OCRDMA_CMD_GET_RDMA_STATS, | ||
| 67 | 76 | ||
| 68 | OCRDMA_CMD_MAX | 77 | OCRDMA_CMD_MAX |
| 69 | }; | 78 | }; |
| @@ -74,12 +83,14 @@ enum { | |||
| 74 | OCRDMA_CMD_CREATE_CQ = 12, | 83 | OCRDMA_CMD_CREATE_CQ = 12, |
| 75 | OCRDMA_CMD_CREATE_EQ = 13, | 84 | OCRDMA_CMD_CREATE_EQ = 13, |
| 76 | OCRDMA_CMD_CREATE_MQ = 21, | 85 | OCRDMA_CMD_CREATE_MQ = 21, |
| 86 | OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, | ||
| 77 | OCRDMA_CMD_GET_FW_VER = 35, | 87 | OCRDMA_CMD_GET_FW_VER = 35, |
| 78 | OCRDMA_CMD_DELETE_MQ = 53, | 88 | OCRDMA_CMD_DELETE_MQ = 53, |
| 79 | OCRDMA_CMD_DELETE_CQ = 54, | 89 | OCRDMA_CMD_DELETE_CQ = 54, |
| 80 | OCRDMA_CMD_DELETE_EQ = 55, | 90 | OCRDMA_CMD_DELETE_EQ = 55, |
| 81 | OCRDMA_CMD_GET_FW_CONFIG = 58, | 91 | OCRDMA_CMD_GET_FW_CONFIG = 58, |
| 82 | OCRDMA_CMD_CREATE_MQ_EXT = 90 | 92 | OCRDMA_CMD_CREATE_MQ_EXT = 90, |
| 93 | OCRDMA_CMD_PHY_DETAILS = 102 | ||
| 83 | }; | 94 | }; |
| 84 | 95 | ||
| 85 | enum { | 96 | enum { |
| @@ -103,7 +114,10 @@ enum { | |||
| 103 | OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET, | 114 | OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET, |
| 104 | OCRDMA_DB_CQ_OFFSET = 0x120, | 115 | OCRDMA_DB_CQ_OFFSET = 0x120, |
| 105 | OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, | 116 | OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, |
| 106 | OCRDMA_DB_MQ_OFFSET = 0x140 | 117 | OCRDMA_DB_MQ_OFFSET = 0x140, |
| 118 | |||
| 119 | OCRDMA_DB_SQ_SHIFT = 16, | ||
| 120 | OCRDMA_DB_RQ_SHIFT = 24 | ||
| 107 | }; | 121 | }; |
| 108 | 122 | ||
| 109 | #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ | 123 | #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ |
| @@ -138,6 +152,10 @@ enum { | |||
| 138 | #define OCRDMA_MIN_Q_PAGE_SIZE (4096) | 152 | #define OCRDMA_MIN_Q_PAGE_SIZE (4096) |
| 139 | #define OCRDMA_MAX_Q_PAGES (8) | 153 | #define OCRDMA_MAX_Q_PAGES (8) |
| 140 | 154 | ||
| 155 | #define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C | ||
| 156 | #define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF | ||
| 157 | #define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00 | ||
| 158 | #define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08 | ||
| 141 | /* | 159 | /* |
| 142 | # 0: 4K Bytes | 160 | # 0: 4K Bytes |
| 143 | # 1: 8K Bytes | 161 | # 1: 8K Bytes |
| @@ -562,6 +580,30 @@ enum { | |||
| 562 | OCRDMA_FN_MODE_RDMA = 0x4 | 580 | OCRDMA_FN_MODE_RDMA = 0x4 |
| 563 | }; | 581 | }; |
| 564 | 582 | ||
| 583 | struct ocrdma_get_phy_info_rsp { | ||
| 584 | struct ocrdma_mqe_hdr hdr; | ||
| 585 | struct ocrdma_mbx_rsp rsp; | ||
| 586 | |||
| 587 | u16 phy_type; | ||
| 588 | u16 interface_type; | ||
| 589 | u32 misc_params; | ||
| 590 | u16 ext_phy_details; | ||
| 591 | u16 rsvd; | ||
| 592 | u16 auto_speeds_supported; | ||
| 593 | u16 fixed_speeds_supported; | ||
| 594 | u32 future_use[2]; | ||
| 595 | }; | ||
| 596 | |||
| 597 | enum { | ||
| 598 | OCRDMA_PHY_SPEED_ZERO = 0x0, | ||
| 599 | OCRDMA_PHY_SPEED_10MBPS = 0x1, | ||
| 600 | OCRDMA_PHY_SPEED_100MBPS = 0x2, | ||
| 601 | OCRDMA_PHY_SPEED_1GBPS = 0x4, | ||
| 602 | OCRDMA_PHY_SPEED_10GBPS = 0x8, | ||
| 603 | OCRDMA_PHY_SPEED_40GBPS = 0x20 | ||
| 604 | }; | ||
| 605 | |||
| 606 | |||
| 565 | struct ocrdma_get_link_speed_rsp { | 607 | struct ocrdma_get_link_speed_rsp { |
| 566 | struct ocrdma_mqe_hdr hdr; | 608 | struct ocrdma_mqe_hdr hdr; |
| 567 | struct ocrdma_mbx_rsp rsp; | 609 | struct ocrdma_mbx_rsp rsp; |
| @@ -590,7 +632,7 @@ enum { | |||
| 590 | 632 | ||
| 591 | enum { | 633 | enum { |
| 592 | OCRDMA_CREATE_CQ_VER2 = 2, | 634 | OCRDMA_CREATE_CQ_VER2 = 2, |
| 593 | OCRDMA_CREATE_CQ_VER3 = 3, | 635 | OCRDMA_CREATE_CQ_VER3 = 3, |
| 594 | 636 | ||
| 595 | OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, | 637 | OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, |
| 596 | OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, | 638 | OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, |
| @@ -1050,6 +1092,7 @@ enum { | |||
| 1050 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << | 1092 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << |
| 1051 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT | 1093 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT |
| 1052 | }; | 1094 | }; |
| 1095 | |||
| 1053 | struct ocrdma_modify_qp_rsp { | 1096 | struct ocrdma_modify_qp_rsp { |
| 1054 | struct ocrdma_mqe_hdr hdr; | 1097 | struct ocrdma_mqe_hdr hdr; |
| 1055 | struct ocrdma_mbx_rsp rsp; | 1098 | struct ocrdma_mbx_rsp rsp; |
| @@ -1062,8 +1105,8 @@ struct ocrdma_query_qp { | |||
| 1062 | struct ocrdma_mqe_hdr hdr; | 1105 | struct ocrdma_mqe_hdr hdr; |
| 1063 | struct ocrdma_mbx_hdr req; | 1106 | struct ocrdma_mbx_hdr req; |
| 1064 | 1107 | ||
| 1065 | #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 | 1108 | #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 |
| 1066 | #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF | 1109 | #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF |
| 1067 | u32 qp_id; | 1110 | u32 qp_id; |
| 1068 | }; | 1111 | }; |
| 1069 | 1112 | ||
| @@ -1703,4 +1746,208 @@ struct ocrdma_av { | |||
| 1703 | u32 valid; | 1746 | u32 valid; |
| 1704 | } __packed; | 1747 | } __packed; |
| 1705 | 1748 | ||
| 1749 | struct ocrdma_rsrc_stats { | ||
| 1750 | u32 dpp_pds; | ||
| 1751 | u32 non_dpp_pds; | ||
| 1752 | u32 rc_dpp_qps; | ||
| 1753 | u32 uc_dpp_qps; | ||
| 1754 | u32 ud_dpp_qps; | ||
| 1755 | u32 rc_non_dpp_qps; | ||
| 1756 | u32 rsvd; | ||
| 1757 | u32 uc_non_dpp_qps; | ||
| 1758 | u32 ud_non_dpp_qps; | ||
| 1759 | u32 rsvd1; | ||
| 1760 | u32 srqs; | ||
| 1761 | u32 rbqs; | ||
| 1762 | u32 r64K_nsmr; | ||
| 1763 | u32 r64K_to_2M_nsmr; | ||
| 1764 | u32 r2M_to_44M_nsmr; | ||
| 1765 | u32 r44M_to_1G_nsmr; | ||
| 1766 | u32 r1G_to_4G_nsmr; | ||
| 1767 | u32 nsmr_count_4G_to_32G; | ||
| 1768 | u32 r32G_to_64G_nsmr; | ||
| 1769 | u32 r64G_to_128G_nsmr; | ||
| 1770 | u32 r128G_to_higher_nsmr; | ||
| 1771 | u32 embedded_nsmr; | ||
| 1772 | u32 frmr; | ||
| 1773 | u32 prefetch_qps; | ||
| 1774 | u32 ondemand_qps; | ||
| 1775 | u32 phy_mr; | ||
| 1776 | u32 mw; | ||
| 1777 | u32 rsvd2[7]; | ||
| 1778 | }; | ||
| 1779 | |||
| 1780 | struct ocrdma_db_err_stats { | ||
| 1781 | u32 sq_doorbell_errors; | ||
| 1782 | u32 cq_doorbell_errors; | ||
| 1783 | u32 rq_srq_doorbell_errors; | ||
| 1784 | u32 cq_overflow_errors; | ||
| 1785 | u32 rsvd[4]; | ||
| 1786 | }; | ||
| 1787 | |||
| 1788 | struct ocrdma_wqe_stats { | ||
| 1789 | u32 large_send_rc_wqes_lo; | ||
| 1790 | u32 large_send_rc_wqes_hi; | ||
| 1791 | u32 large_write_rc_wqes_lo; | ||
| 1792 | u32 large_write_rc_wqes_hi; | ||
| 1793 | u32 rsvd[4]; | ||
| 1794 | u32 read_wqes_lo; | ||
| 1795 | u32 read_wqes_hi; | ||
| 1796 | u32 frmr_wqes_lo; | ||
| 1797 | u32 frmr_wqes_hi; | ||
| 1798 | u32 mw_bind_wqes_lo; | ||
| 1799 | u32 mw_bind_wqes_hi; | ||
| 1800 | u32 invalidate_wqes_lo; | ||
| 1801 | u32 invalidate_wqes_hi; | ||
| 1802 | u32 rsvd1[2]; | ||
| 1803 | u32 dpp_wqe_drops; | ||
| 1804 | u32 rsvd2[5]; | ||
| 1805 | }; | ||
| 1806 | |||
| 1807 | struct ocrdma_tx_stats { | ||
| 1808 | u32 send_pkts_lo; | ||
| 1809 | u32 send_pkts_hi; | ||
| 1810 | u32 write_pkts_lo; | ||
| 1811 | u32 write_pkts_hi; | ||
| 1812 | u32 read_pkts_lo; | ||
| 1813 | u32 read_pkts_hi; | ||
| 1814 | u32 read_rsp_pkts_lo; | ||
| 1815 | u32 read_rsp_pkts_hi; | ||
| 1816 | u32 ack_pkts_lo; | ||
| 1817 | u32 ack_pkts_hi; | ||
| 1818 | u32 send_bytes_lo; | ||
| 1819 | u32 send_bytes_hi; | ||
| 1820 | u32 write_bytes_lo; | ||
| 1821 | u32 write_bytes_hi; | ||
| 1822 | u32 read_req_bytes_lo; | ||
| 1823 | u32 read_req_bytes_hi; | ||
| 1824 | u32 read_rsp_bytes_lo; | ||
| 1825 | u32 read_rsp_bytes_hi; | ||
| 1826 | u32 ack_timeouts; | ||
| 1827 | u32 rsvd[5]; | ||
| 1828 | }; | ||
| 1829 | |||
| 1830 | |||
| 1831 | struct ocrdma_tx_qp_err_stats { | ||
| 1832 | u32 local_length_errors; | ||
| 1833 | u32 local_protection_errors; | ||
| 1834 | u32 local_qp_operation_errors; | ||
| 1835 | u32 retry_count_exceeded_errors; | ||
| 1836 | u32 rnr_retry_count_exceeded_errors; | ||
| 1837 | u32 rsvd[3]; | ||
| 1838 | }; | ||
| 1839 | |||
| 1840 | struct ocrdma_rx_stats { | ||
| 1841 | u32 roce_frame_bytes_lo; | ||
| 1842 | u32 roce_frame_bytes_hi; | ||
| 1843 | u32 roce_frame_icrc_drops; | ||
| 1844 | u32 roce_frame_payload_len_drops; | ||
| 1845 | u32 ud_drops; | ||
| 1846 | u32 qp1_drops; | ||
| 1847 | u32 psn_error_request_packets; | ||
| 1848 | u32 psn_error_resp_packets; | ||
| 1849 | u32 rnr_nak_timeouts; | ||
| 1850 | u32 rnr_nak_receives; | ||
| 1851 | u32 roce_frame_rxmt_drops; | ||
| 1852 | u32 nak_count_psn_sequence_errors; | ||
| 1853 | u32 rc_drop_count_lookup_errors; | ||
| 1854 | u32 rq_rnr_naks; | ||
| 1855 | u32 srq_rnr_naks; | ||
| 1856 | u32 roce_frames_lo; | ||
| 1857 | u32 roce_frames_hi; | ||
| 1858 | u32 rsvd; | ||
| 1859 | }; | ||
| 1860 | |||
| 1861 | struct ocrdma_rx_qp_err_stats { | ||
| 1862 | u32 nak_invalid_requst_errors; | ||
| 1863 | u32 nak_remote_operation_errors; | ||
| 1864 | u32 nak_count_remote_access_errors; | ||
| 1865 | u32 local_length_errors; | ||
| 1866 | u32 local_protection_errors; | ||
| 1867 | u32 local_qp_operation_errors; | ||
| 1868 | u32 rsvd[2]; | ||
| 1869 | }; | ||
| 1870 | |||
| 1871 | struct ocrdma_tx_dbg_stats { | ||
| 1872 | u32 data[100]; | ||
| 1873 | }; | ||
| 1874 | |||
| 1875 | struct ocrdma_rx_dbg_stats { | ||
| 1876 | u32 data[200]; | ||
| 1877 | }; | ||
| 1878 | |||
| 1879 | struct ocrdma_rdma_stats_req { | ||
| 1880 | struct ocrdma_mbx_hdr hdr; | ||
| 1881 | u8 reset_stats; | ||
| 1882 | u8 rsvd[3]; | ||
| 1883 | } __packed; | ||
| 1884 | |||
| 1885 | struct ocrdma_rdma_stats_resp { | ||
| 1886 | struct ocrdma_mbx_hdr hdr; | ||
| 1887 | struct ocrdma_rsrc_stats act_rsrc_stats; | ||
| 1888 | struct ocrdma_rsrc_stats th_rsrc_stats; | ||
| 1889 | struct ocrdma_db_err_stats db_err_stats; | ||
| 1890 | struct ocrdma_wqe_stats wqe_stats; | ||
| 1891 | struct ocrdma_tx_stats tx_stats; | ||
| 1892 | struct ocrdma_tx_qp_err_stats tx_qp_err_stats; | ||
| 1893 | struct ocrdma_rx_stats rx_stats; | ||
| 1894 | struct ocrdma_rx_qp_err_stats rx_qp_err_stats; | ||
| 1895 | struct ocrdma_tx_dbg_stats tx_dbg_stats; | ||
| 1896 | struct ocrdma_rx_dbg_stats rx_dbg_stats; | ||
| 1897 | } __packed; | ||
| 1898 | |||
| 1899 | |||
| 1900 | struct mgmt_hba_attribs { | ||
| 1901 | u8 flashrom_version_string[32]; | ||
| 1902 | u8 manufacturer_name[32]; | ||
| 1903 | u32 supported_modes; | ||
| 1904 | u32 rsvd0[3]; | ||
| 1905 | u8 ncsi_ver_string[12]; | ||
| 1906 | u32 default_extended_timeout; | ||
| 1907 | u8 controller_model_number[32]; | ||
| 1908 | u8 controller_description[64]; | ||
| 1909 | u8 controller_serial_number[32]; | ||
| 1910 | u8 ip_version_string[32]; | ||
| 1911 | u8 firmware_version_string[32]; | ||
| 1912 | u8 bios_version_string[32]; | ||
| 1913 | u8 redboot_version_string[32]; | ||
| 1914 | u8 driver_version_string[32]; | ||
| 1915 | u8 fw_on_flash_version_string[32]; | ||
| 1916 | u32 functionalities_supported; | ||
| 1917 | u16 max_cdblength; | ||
| 1918 | u8 asic_revision; | ||
| 1919 | u8 generational_guid[16]; | ||
| 1920 | u8 hba_port_count; | ||
| 1921 | u16 default_link_down_timeout; | ||
| 1922 | u8 iscsi_ver_min_max; | ||
| 1923 | u8 multifunction_device; | ||
| 1924 | u8 cache_valid; | ||
| 1925 | u8 hba_status; | ||
| 1926 | u8 max_domains_supported; | ||
| 1927 | u8 phy_port; | ||
| 1928 | u32 firmware_post_status; | ||
| 1929 | u32 hba_mtu[8]; | ||
| 1930 | u32 rsvd1[4]; | ||
| 1931 | }; | ||
| 1932 | |||
| 1933 | struct mgmt_controller_attrib { | ||
| 1934 | struct mgmt_hba_attribs hba_attribs; | ||
| 1935 | u16 pci_vendor_id; | ||
| 1936 | u16 pci_device_id; | ||
| 1937 | u16 pci_sub_vendor_id; | ||
| 1938 | u16 pci_sub_system_id; | ||
| 1939 | u8 pci_bus_number; | ||
| 1940 | u8 pci_device_number; | ||
| 1941 | u8 pci_function_number; | ||
| 1942 | u8 interface_type; | ||
| 1943 | u64 unique_identifier; | ||
| 1944 | u32 rsvd0[5]; | ||
| 1945 | }; | ||
| 1946 | |||
| 1947 | struct ocrdma_get_ctrl_attribs_rsp { | ||
| 1948 | struct ocrdma_mbx_hdr hdr; | ||
| 1949 | struct mgmt_controller_attrib ctrl_attribs; | ||
| 1950 | }; | ||
| 1951 | |||
| 1952 | |||
| 1706 | #endif /* __OCRDMA_SLI_H__ */ | 1953 | #endif /* __OCRDMA_SLI_H__ */ |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c new file mode 100644 index 000000000000..6c54106f5e64 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c | |||
| @@ -0,0 +1,623 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2014 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #include <rdma/ib_addr.h> | ||
| 29 | #include "ocrdma_stats.h" | ||
| 30 | |||
| 31 | static struct dentry *ocrdma_dbgfs_dir; | ||
| 32 | |||
| 33 | static int ocrdma_add_stat(char *start, char *pcur, | ||
| 34 | char *name, u64 count) | ||
| 35 | { | ||
| 36 | char buff[128] = {0}; | ||
| 37 | int cpy_len = 0; | ||
| 38 | |||
| 39 | snprintf(buff, 128, "%s: %llu\n", name, count); | ||
| 40 | cpy_len = strlen(buff); | ||
| 41 | |||
| 42 | if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { | ||
| 43 | pr_err("%s: No space in stats buff\n", __func__); | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | memcpy(pcur, buff, cpy_len); | ||
| 48 | return cpy_len; | ||
| 49 | } | ||
| 50 | |||
| 51 | static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) | ||
| 52 | { | ||
| 53 | struct stats_mem *mem = &dev->stats_mem; | ||
| 54 | |||
| 55 | /* Alloc mbox command mem*/ | ||
| 56 | mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), | ||
| 57 | sizeof(struct ocrdma_rdma_stats_resp)); | ||
| 58 | |||
| 59 | mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, | ||
| 60 | &mem->pa, GFP_KERNEL); | ||
| 61 | if (!mem->va) { | ||
| 62 | pr_err("%s: stats mbox allocation failed\n", __func__); | ||
| 63 | return false; | ||
| 64 | } | ||
| 65 | |||
| 66 | memset(mem->va, 0, mem->size); | ||
| 67 | |||
| 68 | /* Alloc debugfs mem */ | ||
| 69 | mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); | ||
| 70 | if (!mem->debugfs_mem) { | ||
| 71 | pr_err("%s: stats debugfs mem allocation failed\n", __func__); | ||
| 72 | return false; | ||
| 73 | } | ||
| 74 | |||
| 75 | return true; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) | ||
| 79 | { | ||
| 80 | struct stats_mem *mem = &dev->stats_mem; | ||
| 81 | |||
| 82 | if (mem->va) | ||
| 83 | dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, | ||
| 84 | mem->va, mem->pa); | ||
| 85 | kfree(mem->debugfs_mem); | ||
| 86 | } | ||
| 87 | |||
| 88 | static char *ocrdma_resource_stats(struct ocrdma_dev *dev) | ||
| 89 | { | ||
| 90 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 91 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 92 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 93 | struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; | ||
| 94 | |||
| 95 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 96 | |||
| 97 | pcur = stats; | ||
| 98 | pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds", | ||
| 99 | (u64)rsrc_stats->dpp_pds); | ||
| 100 | pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds", | ||
| 101 | (u64)rsrc_stats->non_dpp_pds); | ||
| 102 | pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps", | ||
| 103 | (u64)rsrc_stats->rc_dpp_qps); | ||
| 104 | pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps", | ||
| 105 | (u64)rsrc_stats->uc_dpp_qps); | ||
| 106 | pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps", | ||
| 107 | (u64)rsrc_stats->ud_dpp_qps); | ||
| 108 | pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps", | ||
| 109 | (u64)rsrc_stats->rc_non_dpp_qps); | ||
| 110 | pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps", | ||
| 111 | (u64)rsrc_stats->uc_non_dpp_qps); | ||
| 112 | pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps", | ||
| 113 | (u64)rsrc_stats->ud_non_dpp_qps); | ||
| 114 | pcur += ocrdma_add_stat(stats, pcur, "active_srqs", | ||
| 115 | (u64)rsrc_stats->srqs); | ||
| 116 | pcur += ocrdma_add_stat(stats, pcur, "active_rbqs", | ||
| 117 | (u64)rsrc_stats->rbqs); | ||
| 118 | pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr", | ||
| 119 | (u64)rsrc_stats->r64K_nsmr); | ||
| 120 | pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr", | ||
| 121 | (u64)rsrc_stats->r64K_to_2M_nsmr); | ||
| 122 | pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr", | ||
| 123 | (u64)rsrc_stats->r2M_to_44M_nsmr); | ||
| 124 | pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr", | ||
| 125 | (u64)rsrc_stats->r44M_to_1G_nsmr); | ||
| 126 | pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr", | ||
| 127 | (u64)rsrc_stats->r1G_to_4G_nsmr); | ||
| 128 | pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G", | ||
| 129 | (u64)rsrc_stats->nsmr_count_4G_to_32G); | ||
| 130 | pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr", | ||
| 131 | (u64)rsrc_stats->r32G_to_64G_nsmr); | ||
| 132 | pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr", | ||
| 133 | (u64)rsrc_stats->r64G_to_128G_nsmr); | ||
| 134 | pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr", | ||
| 135 | (u64)rsrc_stats->r128G_to_higher_nsmr); | ||
| 136 | pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr", | ||
| 137 | (u64)rsrc_stats->embedded_nsmr); | ||
| 138 | pcur += ocrdma_add_stat(stats, pcur, "active_frmr", | ||
| 139 | (u64)rsrc_stats->frmr); | ||
| 140 | pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps", | ||
| 141 | (u64)rsrc_stats->prefetch_qps); | ||
| 142 | pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps", | ||
| 143 | (u64)rsrc_stats->ondemand_qps); | ||
| 144 | pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr", | ||
| 145 | (u64)rsrc_stats->phy_mr); | ||
| 146 | pcur += ocrdma_add_stat(stats, pcur, "active_mw", | ||
| 147 | (u64)rsrc_stats->mw); | ||
| 148 | |||
| 149 | /* Print the threshold stats */ | ||
| 150 | rsrc_stats = &rdma_stats->th_rsrc_stats; | ||
| 151 | |||
| 152 | pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds", | ||
| 153 | (u64)rsrc_stats->dpp_pds); | ||
| 154 | pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds", | ||
| 155 | (u64)rsrc_stats->non_dpp_pds); | ||
| 156 | pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps", | ||
| 157 | (u64)rsrc_stats->rc_dpp_qps); | ||
| 158 | pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps", | ||
| 159 | (u64)rsrc_stats->uc_dpp_qps); | ||
| 160 | pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps", | ||
| 161 | (u64)rsrc_stats->ud_dpp_qps); | ||
| 162 | pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps", | ||
| 163 | (u64)rsrc_stats->rc_non_dpp_qps); | ||
| 164 | pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps", | ||
| 165 | (u64)rsrc_stats->uc_non_dpp_qps); | ||
| 166 | pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps", | ||
| 167 | (u64)rsrc_stats->ud_non_dpp_qps); | ||
| 168 | pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs", | ||
| 169 | (u64)rsrc_stats->srqs); | ||
| 170 | pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs", | ||
| 171 | (u64)rsrc_stats->rbqs); | ||
| 172 | pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr", | ||
| 173 | (u64)rsrc_stats->r64K_nsmr); | ||
| 174 | pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr", | ||
| 175 | (u64)rsrc_stats->r64K_to_2M_nsmr); | ||
| 176 | pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr", | ||
| 177 | (u64)rsrc_stats->r2M_to_44M_nsmr); | ||
| 178 | pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr", | ||
| 179 | (u64)rsrc_stats->r44M_to_1G_nsmr); | ||
| 180 | pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr", | ||
| 181 | (u64)rsrc_stats->r1G_to_4G_nsmr); | ||
| 182 | pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G", | ||
| 183 | (u64)rsrc_stats->nsmr_count_4G_to_32G); | ||
| 184 | pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr", | ||
| 185 | (u64)rsrc_stats->r32G_to_64G_nsmr); | ||
| 186 | pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr", | ||
| 187 | (u64)rsrc_stats->r64G_to_128G_nsmr); | ||
| 188 | pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr", | ||
| 189 | (u64)rsrc_stats->r128G_to_higher_nsmr); | ||
| 190 | pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr", | ||
| 191 | (u64)rsrc_stats->embedded_nsmr); | ||
| 192 | pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr", | ||
| 193 | (u64)rsrc_stats->frmr); | ||
| 194 | pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps", | ||
| 195 | (u64)rsrc_stats->prefetch_qps); | ||
| 196 | pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps", | ||
| 197 | (u64)rsrc_stats->ondemand_qps); | ||
| 198 | pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr", | ||
| 199 | (u64)rsrc_stats->phy_mr); | ||
| 200 | pcur += ocrdma_add_stat(stats, pcur, "threshold_mw", | ||
| 201 | (u64)rsrc_stats->mw); | ||
| 202 | return stats; | ||
| 203 | } | ||
| 204 | |||
| 205 | static char *ocrdma_rx_stats(struct ocrdma_dev *dev) | ||
| 206 | { | ||
| 207 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 208 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 209 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 210 | struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; | ||
| 211 | |||
| 212 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 213 | |||
| 214 | pcur = stats; | ||
| 215 | pcur += ocrdma_add_stat | ||
| 216 | (stats, pcur, "roce_frame_bytes", | ||
| 217 | convert_to_64bit(rx_stats->roce_frame_bytes_lo, | ||
| 218 | rx_stats->roce_frame_bytes_hi)); | ||
| 219 | pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops", | ||
| 220 | (u64)rx_stats->roce_frame_icrc_drops); | ||
| 221 | pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops", | ||
| 222 | (u64)rx_stats->roce_frame_payload_len_drops); | ||
| 223 | pcur += ocrdma_add_stat(stats, pcur, "ud_drops", | ||
| 224 | (u64)rx_stats->ud_drops); | ||
| 225 | pcur += ocrdma_add_stat(stats, pcur, "qp1_drops", | ||
| 226 | (u64)rx_stats->qp1_drops); | ||
| 227 | pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets", | ||
| 228 | (u64)rx_stats->psn_error_request_packets); | ||
| 229 | pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets", | ||
| 230 | (u64)rx_stats->psn_error_resp_packets); | ||
| 231 | pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts", | ||
| 232 | (u64)rx_stats->rnr_nak_timeouts); | ||
| 233 | pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives", | ||
| 234 | (u64)rx_stats->rnr_nak_receives); | ||
| 235 | pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops", | ||
| 236 | (u64)rx_stats->roce_frame_rxmt_drops); | ||
| 237 | pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors", | ||
| 238 | (u64)rx_stats->nak_count_psn_sequence_errors); | ||
| 239 | pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors", | ||
| 240 | (u64)rx_stats->rc_drop_count_lookup_errors); | ||
| 241 | pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks", | ||
| 242 | (u64)rx_stats->rq_rnr_naks); | ||
| 243 | pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks", | ||
| 244 | (u64)rx_stats->srq_rnr_naks); | ||
| 245 | pcur += ocrdma_add_stat(stats, pcur, "roce_frames", | ||
| 246 | convert_to_64bit(rx_stats->roce_frames_lo, | ||
| 247 | rx_stats->roce_frames_hi)); | ||
| 248 | |||
| 249 | return stats; | ||
| 250 | } | ||
| 251 | |||
| 252 | static char *ocrdma_tx_stats(struct ocrdma_dev *dev) | ||
| 253 | { | ||
| 254 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 255 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 256 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 257 | struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; | ||
| 258 | |||
| 259 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 260 | |||
| 261 | pcur = stats; | ||
| 262 | pcur += ocrdma_add_stat(stats, pcur, "send_pkts", | ||
| 263 | convert_to_64bit(tx_stats->send_pkts_lo, | ||
| 264 | tx_stats->send_pkts_hi)); | ||
| 265 | pcur += ocrdma_add_stat(stats, pcur, "write_pkts", | ||
| 266 | convert_to_64bit(tx_stats->write_pkts_lo, | ||
| 267 | tx_stats->write_pkts_hi)); | ||
| 268 | pcur += ocrdma_add_stat(stats, pcur, "read_pkts", | ||
| 269 | convert_to_64bit(tx_stats->read_pkts_lo, | ||
| 270 | tx_stats->read_pkts_hi)); | ||
| 271 | pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts", | ||
| 272 | convert_to_64bit(tx_stats->read_rsp_pkts_lo, | ||
| 273 | tx_stats->read_rsp_pkts_hi)); | ||
| 274 | pcur += ocrdma_add_stat(stats, pcur, "ack_pkts", | ||
| 275 | convert_to_64bit(tx_stats->ack_pkts_lo, | ||
| 276 | tx_stats->ack_pkts_hi)); | ||
| 277 | pcur += ocrdma_add_stat(stats, pcur, "send_bytes", | ||
| 278 | convert_to_64bit(tx_stats->send_bytes_lo, | ||
| 279 | tx_stats->send_bytes_hi)); | ||
| 280 | pcur += ocrdma_add_stat(stats, pcur, "write_bytes", | ||
| 281 | convert_to_64bit(tx_stats->write_bytes_lo, | ||
| 282 | tx_stats->write_bytes_hi)); | ||
| 283 | pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes", | ||
| 284 | convert_to_64bit(tx_stats->read_req_bytes_lo, | ||
| 285 | tx_stats->read_req_bytes_hi)); | ||
| 286 | pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes", | ||
| 287 | convert_to_64bit(tx_stats->read_rsp_bytes_lo, | ||
| 288 | tx_stats->read_rsp_bytes_hi)); | ||
| 289 | pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts", | ||
| 290 | (u64)tx_stats->ack_timeouts); | ||
| 291 | |||
| 292 | return stats; | ||
| 293 | } | ||
| 294 | |||
| 295 | static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) | ||
| 296 | { | ||
| 297 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 298 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 299 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 300 | struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats; | ||
| 301 | |||
| 302 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 303 | |||
| 304 | pcur = stats; | ||
| 305 | pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes", | ||
| 306 | convert_to_64bit(wqe_stats->large_send_rc_wqes_lo, | ||
| 307 | wqe_stats->large_send_rc_wqes_hi)); | ||
| 308 | pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes", | ||
| 309 | convert_to_64bit(wqe_stats->large_write_rc_wqes_lo, | ||
| 310 | wqe_stats->large_write_rc_wqes_hi)); | ||
| 311 | pcur += ocrdma_add_stat(stats, pcur, "read_wqes", | ||
| 312 | convert_to_64bit(wqe_stats->read_wqes_lo, | ||
| 313 | wqe_stats->read_wqes_hi)); | ||
| 314 | pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes", | ||
| 315 | convert_to_64bit(wqe_stats->frmr_wqes_lo, | ||
| 316 | wqe_stats->frmr_wqes_hi)); | ||
| 317 | pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes", | ||
| 318 | convert_to_64bit(wqe_stats->mw_bind_wqes_lo, | ||
| 319 | wqe_stats->mw_bind_wqes_hi)); | ||
| 320 | pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes", | ||
| 321 | convert_to_64bit(wqe_stats->invalidate_wqes_lo, | ||
| 322 | wqe_stats->invalidate_wqes_hi)); | ||
| 323 | pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops", | ||
| 324 | (u64)wqe_stats->dpp_wqe_drops); | ||
| 325 | return stats; | ||
| 326 | } | ||
| 327 | |||
| 328 | static char *ocrdma_db_errstats(struct ocrdma_dev *dev) | ||
| 329 | { | ||
| 330 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 331 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 332 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 333 | struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats; | ||
| 334 | |||
| 335 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 336 | |||
| 337 | pcur = stats; | ||
| 338 | pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors", | ||
| 339 | (u64)db_err_stats->sq_doorbell_errors); | ||
| 340 | pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors", | ||
| 341 | (u64)db_err_stats->cq_doorbell_errors); | ||
| 342 | pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors", | ||
| 343 | (u64)db_err_stats->rq_srq_doorbell_errors); | ||
| 344 | pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors", | ||
| 345 | (u64)db_err_stats->cq_overflow_errors); | ||
| 346 | return stats; | ||
| 347 | } | ||
| 348 | |||
| 349 | static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev) | ||
| 350 | { | ||
| 351 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 352 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 353 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 354 | struct ocrdma_rx_qp_err_stats *rx_qp_err_stats = | ||
| 355 | &rdma_stats->rx_qp_err_stats; | ||
| 356 | |||
| 357 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 358 | |||
| 359 | pcur = stats; | ||
| 360 | pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors", | ||
| 361 | (u64)rx_qp_err_stats->nak_invalid_requst_errors); | ||
| 362 | pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors", | ||
| 363 | (u64)rx_qp_err_stats->nak_remote_operation_errors); | ||
| 364 | pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors", | ||
| 365 | (u64)rx_qp_err_stats->nak_count_remote_access_errors); | ||
| 366 | pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", | ||
| 367 | (u64)rx_qp_err_stats->local_length_errors); | ||
| 368 | pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", | ||
| 369 | (u64)rx_qp_err_stats->local_protection_errors); | ||
| 370 | pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", | ||
| 371 | (u64)rx_qp_err_stats->local_qp_operation_errors); | ||
| 372 | return stats; | ||
| 373 | } | ||
| 374 | |||
| 375 | static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev) | ||
| 376 | { | ||
| 377 | char *stats = dev->stats_mem.debugfs_mem, *pcur; | ||
| 378 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 379 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 380 | struct ocrdma_tx_qp_err_stats *tx_qp_err_stats = | ||
| 381 | &rdma_stats->tx_qp_err_stats; | ||
| 382 | |||
| 383 | memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 384 | |||
| 385 | pcur = stats; | ||
| 386 | pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", | ||
| 387 | (u64)tx_qp_err_stats->local_length_errors); | ||
| 388 | pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", | ||
| 389 | (u64)tx_qp_err_stats->local_protection_errors); | ||
| 390 | pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", | ||
| 391 | (u64)tx_qp_err_stats->local_qp_operation_errors); | ||
| 392 | pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors", | ||
| 393 | (u64)tx_qp_err_stats->retry_count_exceeded_errors); | ||
| 394 | pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors", | ||
| 395 | (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors); | ||
| 396 | return stats; | ||
| 397 | } | ||
| 398 | |||
| 399 | static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev) | ||
| 400 | { | ||
| 401 | int i; | ||
| 402 | char *pstats = dev->stats_mem.debugfs_mem; | ||
| 403 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 404 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 405 | struct ocrdma_tx_dbg_stats *tx_dbg_stats = | ||
| 406 | &rdma_stats->tx_dbg_stats; | ||
| 407 | |||
| 408 | memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 409 | |||
| 410 | for (i = 0; i < 100; i++) | ||
| 411 | pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, | ||
| 412 | tx_dbg_stats->data[i]); | ||
| 413 | |||
| 414 | return dev->stats_mem.debugfs_mem; | ||
| 415 | } | ||
| 416 | |||
| 417 | static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) | ||
| 418 | { | ||
| 419 | int i; | ||
| 420 | char *pstats = dev->stats_mem.debugfs_mem; | ||
| 421 | struct ocrdma_rdma_stats_resp *rdma_stats = | ||
| 422 | (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; | ||
| 423 | struct ocrdma_rx_dbg_stats *rx_dbg_stats = | ||
| 424 | &rdma_stats->rx_dbg_stats; | ||
| 425 | |||
| 426 | memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); | ||
| 427 | |||
| 428 | for (i = 0; i < 200; i++) | ||
| 429 | pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, | ||
| 430 | rx_dbg_stats->data[i]); | ||
| 431 | |||
| 432 | return dev->stats_mem.debugfs_mem; | ||
| 433 | } | ||
| 434 | |||
| 435 | static void ocrdma_update_stats(struct ocrdma_dev *dev) | ||
| 436 | { | ||
| 437 | ulong now = jiffies, secs; | ||
| 438 | int status = 0; | ||
| 439 | |||
| 440 | secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; | ||
| 441 | if (secs) { | ||
| 442 | /* update */ | ||
| 443 | status = ocrdma_mbx_rdma_stats(dev, false); | ||
| 444 | if (status) | ||
| 445 | pr_err("%s: stats mbox failed with status = %d\n", | ||
| 446 | __func__, status); | ||
| 447 | dev->last_stats_time = jiffies; | ||
| 448 | } | ||
| 449 | } | ||
| 450 | |||
| 451 | static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, | ||
| 452 | size_t usr_buf_len, loff_t *ppos) | ||
| 453 | { | ||
| 454 | struct ocrdma_stats *pstats = filp->private_data; | ||
| 455 | struct ocrdma_dev *dev = pstats->dev; | ||
| 456 | ssize_t status = 0; | ||
| 457 | char *data = NULL; | ||
| 458 | |||
| 459 | /* No partial reads */ | ||
| 460 | if (*ppos != 0) | ||
| 461 | return 0; | ||
| 462 | |||
| 463 | mutex_lock(&dev->stats_lock); | ||
| 464 | |||
| 465 | ocrdma_update_stats(dev); | ||
| 466 | |||
| 467 | switch (pstats->type) { | ||
| 468 | case OCRDMA_RSRC_STATS: | ||
| 469 | data = ocrdma_resource_stats(dev); | ||
| 470 | break; | ||
| 471 | case OCRDMA_RXSTATS: | ||
| 472 | data = ocrdma_rx_stats(dev); | ||
| 473 | break; | ||
| 474 | case OCRDMA_WQESTATS: | ||
| 475 | data = ocrdma_wqe_stats(dev); | ||
| 476 | break; | ||
| 477 | case OCRDMA_TXSTATS: | ||
| 478 | data = ocrdma_tx_stats(dev); | ||
| 479 | break; | ||
| 480 | case OCRDMA_DB_ERRSTATS: | ||
| 481 | data = ocrdma_db_errstats(dev); | ||
| 482 | break; | ||
| 483 | case OCRDMA_RXQP_ERRSTATS: | ||
| 484 | data = ocrdma_rxqp_errstats(dev); | ||
| 485 | break; | ||
| 486 | case OCRDMA_TXQP_ERRSTATS: | ||
| 487 | data = ocrdma_txqp_errstats(dev); | ||
| 488 | break; | ||
| 489 | case OCRDMA_TX_DBG_STATS: | ||
| 490 | data = ocrdma_tx_dbg_stats(dev); | ||
| 491 | break; | ||
| 492 | case OCRDMA_RX_DBG_STATS: | ||
| 493 | data = ocrdma_rx_dbg_stats(dev); | ||
| 494 | break; | ||
| 495 | |||
| 496 | default: | ||
| 497 | status = -EFAULT; | ||
| 498 | goto exit; | ||
| 499 | } | ||
| 500 | |||
| 501 | if (usr_buf_len < strlen(data)) { | ||
| 502 | status = -ENOSPC; | ||
| 503 | goto exit; | ||
| 504 | } | ||
| 505 | |||
| 506 | status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data, | ||
| 507 | strlen(data)); | ||
| 508 | exit: | ||
| 509 | mutex_unlock(&dev->stats_lock); | ||
| 510 | return status; | ||
| 511 | } | ||
| 512 | |||
| 513 | static int ocrdma_debugfs_open(struct inode *inode, struct file *file) | ||
| 514 | { | ||
| 515 | if (inode->i_private) | ||
| 516 | file->private_data = inode->i_private; | ||
| 517 | return 0; | ||
| 518 | } | ||
| 519 | |||
| 520 | static const struct file_operations ocrdma_dbg_ops = { | ||
| 521 | .owner = THIS_MODULE, | ||
| 522 | .open = ocrdma_debugfs_open, | ||
| 523 | .read = ocrdma_dbgfs_ops_read, | ||
| 524 | }; | ||
| 525 | |||
| 526 | void ocrdma_add_port_stats(struct ocrdma_dev *dev) | ||
| 527 | { | ||
| 528 | if (!ocrdma_dbgfs_dir) | ||
| 529 | return; | ||
| 530 | |||
| 531 | /* Create post stats base dir */ | ||
| 532 | dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir); | ||
| 533 | if (!dev->dir) | ||
| 534 | goto err; | ||
| 535 | |||
| 536 | dev->rsrc_stats.type = OCRDMA_RSRC_STATS; | ||
| 537 | dev->rsrc_stats.dev = dev; | ||
| 538 | if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir, | ||
| 539 | &dev->rsrc_stats, &ocrdma_dbg_ops)) | ||
| 540 | goto err; | ||
| 541 | |||
| 542 | dev->rx_stats.type = OCRDMA_RXSTATS; | ||
| 543 | dev->rx_stats.dev = dev; | ||
| 544 | if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir, | ||
| 545 | &dev->rx_stats, &ocrdma_dbg_ops)) | ||
| 546 | goto err; | ||
| 547 | |||
| 548 | dev->wqe_stats.type = OCRDMA_WQESTATS; | ||
| 549 | dev->wqe_stats.dev = dev; | ||
| 550 | if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, | ||
| 551 | &dev->wqe_stats, &ocrdma_dbg_ops)) | ||
| 552 | goto err; | ||
| 553 | |||
| 554 | dev->tx_stats.type = OCRDMA_TXSTATS; | ||
| 555 | dev->tx_stats.dev = dev; | ||
| 556 | if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir, | ||
| 557 | &dev->tx_stats, &ocrdma_dbg_ops)) | ||
| 558 | goto err; | ||
| 559 | |||
| 560 | dev->db_err_stats.type = OCRDMA_DB_ERRSTATS; | ||
| 561 | dev->db_err_stats.dev = dev; | ||
| 562 | if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir, | ||
| 563 | &dev->db_err_stats, &ocrdma_dbg_ops)) | ||
| 564 | goto err; | ||
| 565 | |||
| 566 | |||
| 567 | dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS; | ||
| 568 | dev->tx_qp_err_stats.dev = dev; | ||
| 569 | if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir, | ||
| 570 | &dev->tx_qp_err_stats, &ocrdma_dbg_ops)) | ||
| 571 | goto err; | ||
| 572 | |||
| 573 | dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS; | ||
| 574 | dev->rx_qp_err_stats.dev = dev; | ||
| 575 | if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir, | ||
| 576 | &dev->rx_qp_err_stats, &ocrdma_dbg_ops)) | ||
| 577 | goto err; | ||
| 578 | |||
| 579 | |||
| 580 | dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS; | ||
| 581 | dev->tx_dbg_stats.dev = dev; | ||
| 582 | if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir, | ||
| 583 | &dev->tx_dbg_stats, &ocrdma_dbg_ops)) | ||
| 584 | goto err; | ||
| 585 | |||
| 586 | dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS; | ||
| 587 | dev->rx_dbg_stats.dev = dev; | ||
| 588 | if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir, | ||
| 589 | &dev->rx_dbg_stats, &ocrdma_dbg_ops)) | ||
| 590 | goto err; | ||
| 591 | |||
| 592 | /* Now create dma_mem for stats mbx command */ | ||
| 593 | if (!ocrdma_alloc_stats_mem(dev)) | ||
| 594 | goto err; | ||
| 595 | |||
| 596 | mutex_init(&dev->stats_lock); | ||
| 597 | |||
| 598 | return; | ||
| 599 | err: | ||
| 600 | ocrdma_release_stats_mem(dev); | ||
| 601 | debugfs_remove_recursive(dev->dir); | ||
| 602 | dev->dir = NULL; | ||
| 603 | } | ||
| 604 | |||
| 605 | void ocrdma_rem_port_stats(struct ocrdma_dev *dev) | ||
| 606 | { | ||
| 607 | if (!dev->dir) | ||
| 608 | return; | ||
| 609 | mutex_destroy(&dev->stats_lock); | ||
| 610 | ocrdma_release_stats_mem(dev); | ||
| 611 | debugfs_remove(dev->dir); | ||
| 612 | } | ||
| 613 | |||
| 614 | void ocrdma_init_debugfs(void) | ||
| 615 | { | ||
| 616 | /* Create base dir in debugfs root dir */ | ||
| 617 | ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL); | ||
| 618 | } | ||
| 619 | |||
| 620 | void ocrdma_rem_debugfs(void) | ||
| 621 | { | ||
| 622 | debugfs_remove_recursive(ocrdma_dbgfs_dir); | ||
| 623 | } | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h new file mode 100644 index 000000000000..5f5e20c46d7c --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2014 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_STATS_H__ | ||
| 29 | #define __OCRDMA_STATS_H__ | ||
| 30 | |||
| 31 | #include <linux/debugfs.h> | ||
| 32 | #include "ocrdma.h" | ||
| 33 | #include "ocrdma_hw.h" | ||
| 34 | |||
| 35 | #define OCRDMA_MAX_DBGFS_MEM 4096 | ||
| 36 | |||
| 37 | enum OCRDMA_STATS_TYPE { | ||
| 38 | OCRDMA_RSRC_STATS, | ||
| 39 | OCRDMA_RXSTATS, | ||
| 40 | OCRDMA_WQESTATS, | ||
| 41 | OCRDMA_TXSTATS, | ||
| 42 | OCRDMA_DB_ERRSTATS, | ||
| 43 | OCRDMA_RXQP_ERRSTATS, | ||
| 44 | OCRDMA_TXQP_ERRSTATS, | ||
| 45 | OCRDMA_TX_DBG_STATS, | ||
| 46 | OCRDMA_RX_DBG_STATS | ||
| 47 | }; | ||
| 48 | |||
| 49 | void ocrdma_rem_debugfs(void); | ||
| 50 | void ocrdma_init_debugfs(void); | ||
| 51 | void ocrdma_rem_port_stats(struct ocrdma_dev *dev); | ||
| 52 | void ocrdma_add_port_stats(struct ocrdma_dev *dev); | ||
| 53 | |||
| 54 | #endif /* __OCRDMA_STATS_H__ */ | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e0cc201be41a..edf6211d84b8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | |||
| 53 | 53 | ||
| 54 | dev = get_ocrdma_dev(ibdev); | 54 | dev = get_ocrdma_dev(ibdev); |
| 55 | memset(sgid, 0, sizeof(*sgid)); | 55 | memset(sgid, 0, sizeof(*sgid)); |
| 56 | if (index >= OCRDMA_MAX_SGID) | 56 | if (index > OCRDMA_MAX_SGID) |
| 57 | return -EINVAL; | 57 | return -EINVAL; |
| 58 | 58 | ||
| 59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | 59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); |
| @@ -89,7 +89,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
| 89 | attr->max_cq = dev->attr.max_cq; | 89 | attr->max_cq = dev->attr.max_cq; |
| 90 | attr->max_cqe = dev->attr.max_cqe; | 90 | attr->max_cqe = dev->attr.max_cqe; |
| 91 | attr->max_mr = dev->attr.max_mr; | 91 | attr->max_mr = dev->attr.max_mr; |
| 92 | attr->max_mw = 0; | 92 | attr->max_mw = dev->attr.max_mw; |
| 93 | attr->max_pd = dev->attr.max_pd; | 93 | attr->max_pd = dev->attr.max_pd; |
| 94 | attr->atomic_cap = 0; | 94 | attr->atomic_cap = 0; |
| 95 | attr->max_fmr = 0; | 95 | attr->max_fmr = 0; |
| @@ -144,7 +144,6 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev, | |||
| 144 | } | 144 | } |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | |||
| 148 | int ocrdma_query_port(struct ib_device *ibdev, | 147 | int ocrdma_query_port(struct ib_device *ibdev, |
| 149 | u8 port, struct ib_port_attr *props) | 148 | u8 port, struct ib_port_attr *props) |
| 150 | { | 149 | { |
| @@ -267,7 +266,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, | |||
| 267 | 266 | ||
| 268 | if (udata && uctx) { | 267 | if (udata && uctx) { |
| 269 | pd->dpp_enabled = | 268 | pd->dpp_enabled = |
| 270 | dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY; | 269 | ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; |
| 271 | pd->num_dpp_qp = | 270 | pd->num_dpp_qp = |
| 272 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; | 271 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; |
| 273 | } | 272 | } |
| @@ -726,10 +725,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | |||
| 726 | u32 num_pbes) | 725 | u32 num_pbes) |
| 727 | { | 726 | { |
| 728 | struct ocrdma_pbe *pbe; | 727 | struct ocrdma_pbe *pbe; |
| 729 | struct ib_umem_chunk *chunk; | 728 | struct scatterlist *sg; |
| 730 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; | 729 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; |
| 731 | struct ib_umem *umem = mr->umem; | 730 | struct ib_umem *umem = mr->umem; |
| 732 | int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | 731 | int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0; |
| 733 | 732 | ||
| 734 | if (!mr->hwmr.num_pbes) | 733 | if (!mr->hwmr.num_pbes) |
| 735 | return; | 734 | return; |
| @@ -739,39 +738,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | |||
| 739 | 738 | ||
| 740 | shift = ilog2(umem->page_size); | 739 | shift = ilog2(umem->page_size); |
| 741 | 740 | ||
| 742 | list_for_each_entry(chunk, &umem->chunk_list, list) { | 741 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 743 | /* get all the dma regions from the chunk. */ | 742 | pages = sg_dma_len(sg) >> shift; |
| 744 | for (i = 0; i < chunk->nmap; i++) { | 743 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { |
| 745 | pages = sg_dma_len(&chunk->page_list[i]) >> shift; | 744 | /* store the page address in pbe */ |
| 746 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | 745 | pbe->pa_lo = |
| 747 | /* store the page address in pbe */ | 746 | cpu_to_le32(sg_dma_address |
| 748 | pbe->pa_lo = | 747 | (sg) + |
| 749 | cpu_to_le32(sg_dma_address | 748 | (umem->page_size * pg_cnt)); |
| 750 | (&chunk->page_list[i]) + | 749 | pbe->pa_hi = |
| 751 | (umem->page_size * pg_cnt)); | 750 | cpu_to_le32(upper_32_bits |
| 752 | pbe->pa_hi = | 751 | ((sg_dma_address |
| 753 | cpu_to_le32(upper_32_bits | 752 | (sg) + |
| 754 | ((sg_dma_address | 753 | umem->page_size * pg_cnt))); |
| 755 | (&chunk->page_list[i]) + | 754 | pbe_cnt += 1; |
| 756 | umem->page_size * pg_cnt))); | 755 | total_num_pbes += 1; |
| 757 | pbe_cnt += 1; | 756 | pbe++; |
| 758 | total_num_pbes += 1; | 757 | |
| 759 | pbe++; | 758 | /* if done building pbes, issue the mbx cmd. */ |
| 760 | 759 | if (total_num_pbes == num_pbes) | |
| 761 | /* if done building pbes, issue the mbx cmd. */ | 760 | return; |
| 762 | if (total_num_pbes == num_pbes) | 761 | |
| 763 | return; | 762 | /* if the given pbl is full storing the pbes, |
| 764 | 763 | * move to next pbl. | |
| 765 | /* if the given pbl is full storing the pbes, | 764 | */ |
| 766 | * move to next pbl. | 765 | if (pbe_cnt == |
| 767 | */ | 766 | (mr->hwmr.pbl_size / sizeof(u64))) { |
| 768 | if (pbe_cnt == | 767 | pbl_tbl++; |
| 769 | (mr->hwmr.pbl_size / sizeof(u64))) { | 768 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; |
| 770 | pbl_tbl++; | 769 | pbe_cnt = 0; |
| 771 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | ||
| 772 | pbe_cnt = 0; | ||
| 773 | } | ||
| 774 | } | 770 | } |
| 771 | |||
| 775 | } | 772 | } |
| 776 | } | 773 | } |
| 777 | } | 774 | } |
| @@ -840,8 +837,7 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |||
| 840 | 837 | ||
| 841 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | 838 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); |
| 842 | 839 | ||
| 843 | if (mr->hwmr.fr_mr == 0) | 840 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
| 844 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | ||
| 845 | 841 | ||
| 846 | /* it could be user registered memory. */ | 842 | /* it could be user registered memory. */ |
| 847 | if (mr->umem) | 843 | if (mr->umem) |
| @@ -910,6 +906,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |||
| 910 | spin_lock_init(&cq->comp_handler_lock); | 906 | spin_lock_init(&cq->comp_handler_lock); |
| 911 | INIT_LIST_HEAD(&cq->sq_head); | 907 | INIT_LIST_HEAD(&cq->sq_head); |
| 912 | INIT_LIST_HEAD(&cq->rq_head); | 908 | INIT_LIST_HEAD(&cq->rq_head); |
| 909 | cq->first_arm = true; | ||
| 913 | 910 | ||
| 914 | if (ib_ctx) { | 911 | if (ib_ctx) { |
| 915 | uctx = get_ocrdma_ucontext(ib_ctx); | 912 | uctx = get_ocrdma_ucontext(ib_ctx); |
| @@ -927,9 +924,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |||
| 927 | goto ctx_err; | 924 | goto ctx_err; |
| 928 | } | 925 | } |
| 929 | cq->phase = OCRDMA_CQE_VALID; | 926 | cq->phase = OCRDMA_CQE_VALID; |
| 930 | cq->arm_needed = true; | ||
| 931 | dev->cq_tbl[cq->id] = cq; | 927 | dev->cq_tbl[cq->id] = cq; |
| 932 | |||
| 933 | return &cq->ibcq; | 928 | return &cq->ibcq; |
| 934 | 929 | ||
| 935 | ctx_err: | 930 | ctx_err: |
| @@ -952,15 +947,52 @@ int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, | |||
| 952 | return status; | 947 | return status; |
| 953 | } | 948 | } |
| 954 | 949 | ||
| 950 | static void ocrdma_flush_cq(struct ocrdma_cq *cq) | ||
| 951 | { | ||
| 952 | int cqe_cnt; | ||
| 953 | int valid_count = 0; | ||
| 954 | unsigned long flags; | ||
| 955 | |||
| 956 | struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); | ||
| 957 | struct ocrdma_cqe *cqe = NULL; | ||
| 958 | |||
| 959 | cqe = cq->va; | ||
| 960 | cqe_cnt = cq->cqe_cnt; | ||
| 961 | |||
| 962 | /* Last irq might have scheduled a polling thread | ||
| 963 | * sync-up with it before hard flushing. | ||
| 964 | */ | ||
| 965 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 966 | while (cqe_cnt) { | ||
| 967 | if (is_cqe_valid(cq, cqe)) | ||
| 968 | valid_count++; | ||
| 969 | cqe++; | ||
| 970 | cqe_cnt--; | ||
| 971 | } | ||
| 972 | ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); | ||
| 973 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 974 | } | ||
| 975 | |||
| 955 | int ocrdma_destroy_cq(struct ib_cq *ibcq) | 976 | int ocrdma_destroy_cq(struct ib_cq *ibcq) |
| 956 | { | 977 | { |
| 957 | int status; | 978 | int status; |
| 958 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | 979 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
| 980 | struct ocrdma_eq *eq = NULL; | ||
| 959 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | 981 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); |
| 960 | int pdid = 0; | 982 | int pdid = 0; |
| 983 | u32 irq, indx; | ||
| 961 | 984 | ||
| 962 | status = ocrdma_mbx_destroy_cq(dev, cq); | 985 | dev->cq_tbl[cq->id] = NULL; |
| 986 | indx = ocrdma_get_eq_table_index(dev, cq->eqn); | ||
| 987 | if (indx == -EINVAL) | ||
| 988 | BUG(); | ||
| 963 | 989 | ||
| 990 | eq = &dev->eq_tbl[indx]; | ||
| 991 | irq = ocrdma_get_irq(dev, eq); | ||
| 992 | synchronize_irq(irq); | ||
| 993 | ocrdma_flush_cq(cq); | ||
| 994 | |||
| 995 | status = ocrdma_mbx_destroy_cq(dev, cq); | ||
| 964 | if (cq->ucontext) { | 996 | if (cq->ucontext) { |
| 965 | pdid = cq->ucontext->cntxt_pd->id; | 997 | pdid = cq->ucontext->cntxt_pd->id; |
| 966 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, | 998 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, |
| @@ -969,7 +1001,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) | |||
| 969 | ocrdma_get_db_addr(dev, pdid), | 1001 | ocrdma_get_db_addr(dev, pdid), |
| 970 | dev->nic_info.db_page_size); | 1002 | dev->nic_info.db_page_size); |
| 971 | } | 1003 | } |
| 972 | dev->cq_tbl[cq->id] = NULL; | ||
| 973 | 1004 | ||
| 974 | kfree(cq); | 1005 | kfree(cq); |
| 975 | return status; | 1006 | return status; |
| @@ -1092,15 +1123,9 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |||
| 1092 | } | 1123 | } |
| 1093 | uresp.db_page_addr = usr_db; | 1124 | uresp.db_page_addr = usr_db; |
| 1094 | uresp.db_page_size = dev->nic_info.db_page_size; | 1125 | uresp.db_page_size = dev->nic_info.db_page_size; |
| 1095 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1126 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; |
| 1096 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; | 1127 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; |
| 1097 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; | 1128 | uresp.db_shift = OCRDMA_DB_RQ_SHIFT; |
| 1098 | uresp.db_shift = 24; | ||
| 1099 | } else { | ||
| 1100 | uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; | ||
| 1101 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | ||
| 1102 | uresp.db_shift = 16; | ||
| 1103 | } | ||
| 1104 | 1129 | ||
| 1105 | if (qp->dpp_enabled) { | 1130 | if (qp->dpp_enabled) { |
| 1106 | uresp.dpp_credit = dpp_credit_lmt; | 1131 | uresp.dpp_credit = dpp_credit_lmt; |
| @@ -1132,7 +1157,7 @@ err: | |||
| 1132 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | 1157 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, |
| 1133 | struct ocrdma_pd *pd) | 1158 | struct ocrdma_pd *pd) |
| 1134 | { | 1159 | { |
| 1135 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1160 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
| 1136 | qp->sq_db = dev->nic_info.db + | 1161 | qp->sq_db = dev->nic_info.db + |
| 1137 | (pd->id * dev->nic_info.db_page_size) + | 1162 | (pd->id * dev->nic_info.db_page_size) + |
| 1138 | OCRDMA_DB_GEN2_SQ_OFFSET; | 1163 | OCRDMA_DB_GEN2_SQ_OFFSET; |
| @@ -1182,7 +1207,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | |||
| 1182 | qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; | 1207 | qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; |
| 1183 | } | 1208 | } |
| 1184 | 1209 | ||
| 1185 | |||
| 1186 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, | 1210 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, |
| 1187 | struct ib_qp_init_attr *attrs) | 1211 | struct ib_qp_init_attr *attrs) |
| 1188 | { | 1212 | { |
| @@ -1268,17 +1292,6 @@ gen_err: | |||
| 1268 | return ERR_PTR(status); | 1292 | return ERR_PTR(status); |
| 1269 | } | 1293 | } |
| 1270 | 1294 | ||
| 1271 | |||
| 1272 | static void ocrdma_flush_rq_db(struct ocrdma_qp *qp) | ||
| 1273 | { | ||
| 1274 | if (qp->db_cache) { | ||
| 1275 | u32 val = qp->rq.dbid | (qp->db_cache << | ||
| 1276 | ocrdma_get_num_posted_shift(qp)); | ||
| 1277 | iowrite32(val, qp->rq_db); | ||
| 1278 | qp->db_cache = 0; | ||
| 1279 | } | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 1295 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 1283 | int attr_mask) | 1296 | int attr_mask) |
| 1284 | { | 1297 | { |
| @@ -1296,9 +1309,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 1296 | */ | 1309 | */ |
| 1297 | if (status < 0) | 1310 | if (status < 0) |
| 1298 | return status; | 1311 | return status; |
| 1299 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); | 1312 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); |
| 1300 | if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR) | ||
| 1301 | ocrdma_flush_rq_db(qp); | ||
| 1302 | 1313 | ||
| 1303 | return status; | 1314 | return status; |
| 1304 | } | 1315 | } |
| @@ -1510,7 +1521,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | |||
| 1510 | int discard_cnt = 0; | 1521 | int discard_cnt = 0; |
| 1511 | u32 cur_getp, stop_getp; | 1522 | u32 cur_getp, stop_getp; |
| 1512 | struct ocrdma_cqe *cqe; | 1523 | struct ocrdma_cqe *cqe; |
| 1513 | u32 qpn = 0; | 1524 | u32 qpn = 0, wqe_idx = 0; |
| 1514 | 1525 | ||
| 1515 | spin_lock_irqsave(&cq->cq_lock, cq_flags); | 1526 | spin_lock_irqsave(&cq->cq_lock, cq_flags); |
| 1516 | 1527 | ||
| @@ -1539,24 +1550,29 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | |||
| 1539 | if (qpn == 0 || qpn != qp->id) | 1550 | if (qpn == 0 || qpn != qp->id) |
| 1540 | goto skip_cqe; | 1551 | goto skip_cqe; |
| 1541 | 1552 | ||
| 1542 | /* mark cqe discarded so that it is not picked up later | ||
| 1543 | * in the poll_cq(). | ||
| 1544 | */ | ||
| 1545 | discard_cnt += 1; | ||
| 1546 | cqe->cmn.qpn = 0; | ||
| 1547 | if (is_cqe_for_sq(cqe)) { | 1553 | if (is_cqe_for_sq(cqe)) { |
| 1548 | ocrdma_hwq_inc_tail(&qp->sq); | 1554 | ocrdma_hwq_inc_tail(&qp->sq); |
| 1549 | } else { | 1555 | } else { |
| 1550 | if (qp->srq) { | 1556 | if (qp->srq) { |
| 1557 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> | ||
| 1558 | OCRDMA_CQE_BUFTAG_SHIFT) & | ||
| 1559 | qp->srq->rq.max_wqe_idx; | ||
| 1560 | if (wqe_idx < 1) | ||
| 1561 | BUG(); | ||
| 1551 | spin_lock_irqsave(&qp->srq->q_lock, flags); | 1562 | spin_lock_irqsave(&qp->srq->q_lock, flags); |
| 1552 | ocrdma_hwq_inc_tail(&qp->srq->rq); | 1563 | ocrdma_hwq_inc_tail(&qp->srq->rq); |
| 1553 | ocrdma_srq_toggle_bit(qp->srq, cur_getp); | 1564 | ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); |
| 1554 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); | 1565 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); |
| 1555 | 1566 | ||
| 1556 | } else { | 1567 | } else { |
| 1557 | ocrdma_hwq_inc_tail(&qp->rq); | 1568 | ocrdma_hwq_inc_tail(&qp->rq); |
| 1558 | } | 1569 | } |
| 1559 | } | 1570 | } |
| 1571 | /* mark cqe discarded so that it is not picked up later | ||
| 1572 | * in the poll_cq(). | ||
| 1573 | */ | ||
| 1574 | discard_cnt += 1; | ||
| 1575 | cqe->cmn.qpn = 0; | ||
| 1560 | skip_cqe: | 1576 | skip_cqe: |
| 1561 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | 1577 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; |
| 1562 | } while (cur_getp != stop_getp); | 1578 | } while (cur_getp != stop_getp); |
| @@ -1659,7 +1675,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, | |||
| 1659 | (srq->pd->id * dev->nic_info.db_page_size); | 1675 | (srq->pd->id * dev->nic_info.db_page_size); |
| 1660 | uresp.db_page_size = dev->nic_info.db_page_size; | 1676 | uresp.db_page_size = dev->nic_info.db_page_size; |
| 1661 | uresp.num_rqe_allocated = srq->rq.max_cnt; | 1677 | uresp.num_rqe_allocated = srq->rq.max_cnt; |
| 1662 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1678 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
| 1663 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; | 1679 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; |
| 1664 | uresp.db_shift = 24; | 1680 | uresp.db_shift = 24; |
| 1665 | } else { | 1681 | } else { |
| @@ -2009,15 +2025,15 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |||
| 2009 | fast_reg->num_sges = wr->wr.fast_reg.page_list_len; | 2025 | fast_reg->num_sges = wr->wr.fast_reg.page_list_len; |
| 2010 | fast_reg->size_sge = | 2026 | fast_reg->size_sge = |
| 2011 | get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); | 2027 | get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); |
| 2012 | mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) & | 2028 | mr = (struct ocrdma_mr *) (unsigned long) |
| 2013 | (OCRDMA_MAX_STAG - 1)]; | 2029 | qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; |
| 2014 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); | 2030 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); |
| 2015 | return 0; | 2031 | return 0; |
| 2016 | } | 2032 | } |
| 2017 | 2033 | ||
| 2018 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) | 2034 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) |
| 2019 | { | 2035 | { |
| 2020 | u32 val = qp->sq.dbid | (1 << 16); | 2036 | u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); |
| 2021 | 2037 | ||
| 2022 | iowrite32(val, qp->sq_db); | 2038 | iowrite32(val, qp->sq_db); |
| 2023 | } | 2039 | } |
| @@ -2122,12 +2138,9 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2122 | 2138 | ||
| 2123 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | 2139 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) |
| 2124 | { | 2140 | { |
| 2125 | u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp)); | 2141 | u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); |
| 2126 | 2142 | ||
| 2127 | if (qp->state != OCRDMA_QPS_INIT) | 2143 | iowrite32(val, qp->rq_db); |
| 2128 | iowrite32(val, qp->rq_db); | ||
| 2129 | else | ||
| 2130 | qp->db_cache++; | ||
| 2131 | } | 2144 | } |
| 2132 | 2145 | ||
| 2133 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, | 2146 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, |
| @@ -2213,7 +2226,7 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) | |||
| 2213 | 2226 | ||
| 2214 | if (row == srq->bit_fields_len) | 2227 | if (row == srq->bit_fields_len) |
| 2215 | BUG(); | 2228 | BUG(); |
| 2216 | return indx; | 2229 | return indx + 1; /* Use from index 1 */ |
| 2217 | } | 2230 | } |
| 2218 | 2231 | ||
| 2219 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) | 2232 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) |
| @@ -2550,10 +2563,13 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, | |||
| 2550 | 2563 | ||
| 2551 | srq = get_ocrdma_srq(qp->ibqp.srq); | 2564 | srq = get_ocrdma_srq(qp->ibqp.srq); |
| 2552 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> | 2565 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> |
| 2553 | OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; | 2566 | OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; |
| 2567 | if (wqe_idx < 1) | ||
| 2568 | BUG(); | ||
| 2569 | |||
| 2554 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; | 2570 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; |
| 2555 | spin_lock_irqsave(&srq->q_lock, flags); | 2571 | spin_lock_irqsave(&srq->q_lock, flags); |
| 2556 | ocrdma_srq_toggle_bit(srq, wqe_idx); | 2572 | ocrdma_srq_toggle_bit(srq, wqe_idx - 1); |
| 2557 | spin_unlock_irqrestore(&srq->q_lock, flags); | 2573 | spin_unlock_irqrestore(&srq->q_lock, flags); |
| 2558 | ocrdma_hwq_inc_tail(&srq->rq); | 2574 | ocrdma_hwq_inc_tail(&srq->rq); |
| 2559 | } | 2575 | } |
| @@ -2705,10 +2721,18 @@ expand_cqe: | |||
| 2705 | } | 2721 | } |
| 2706 | stop_cqe: | 2722 | stop_cqe: |
| 2707 | cq->getp = cur_getp; | 2723 | cq->getp = cur_getp; |
| 2708 | if (polled_hw_cqes || expand || stop) { | 2724 | if (cq->deferred_arm) { |
| 2709 | ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, | 2725 | ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, |
| 2726 | polled_hw_cqes); | ||
| 2727 | cq->deferred_arm = false; | ||
| 2728 | cq->deferred_sol = false; | ||
| 2729 | } else { | ||
| 2730 | /* We need to pop the CQE. No need to arm */ | ||
| 2731 | ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, | ||
| 2710 | polled_hw_cqes); | 2732 | polled_hw_cqes); |
| 2733 | cq->deferred_sol = false; | ||
| 2711 | } | 2734 | } |
| 2735 | |||
| 2712 | return i; | 2736 | return i; |
| 2713 | } | 2737 | } |
| 2714 | 2738 | ||
| @@ -2780,30 +2804,28 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |||
| 2780 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | 2804 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
| 2781 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | 2805 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); |
| 2782 | u16 cq_id; | 2806 | u16 cq_id; |
| 2783 | u16 cur_getp; | ||
| 2784 | struct ocrdma_cqe *cqe; | ||
| 2785 | unsigned long flags; | 2807 | unsigned long flags; |
| 2808 | bool arm_needed = false, sol_needed = false; | ||
| 2786 | 2809 | ||
| 2787 | cq_id = cq->id; | 2810 | cq_id = cq->id; |
| 2788 | 2811 | ||
| 2789 | spin_lock_irqsave(&cq->cq_lock, flags); | 2812 | spin_lock_irqsave(&cq->cq_lock, flags); |
| 2790 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) | 2813 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) |
| 2791 | cq->armed = true; | 2814 | arm_needed = true; |
| 2792 | if (cq_flags & IB_CQ_SOLICITED) | 2815 | if (cq_flags & IB_CQ_SOLICITED) |
| 2793 | cq->solicited = true; | 2816 | sol_needed = true; |
| 2794 | |||
| 2795 | cur_getp = cq->getp; | ||
| 2796 | cqe = cq->va + cur_getp; | ||
| 2797 | 2817 | ||
| 2798 | /* check whether any valid cqe exist or not, if not then safe to | 2818 | if (cq->first_arm) { |
| 2799 | * arm. If cqe is not yet consumed, then let it get consumed and then | 2819 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); |
| 2800 | * we arm it to avoid false interrupts. | 2820 | cq->first_arm = false; |
| 2801 | */ | 2821 | goto skip_defer; |
| 2802 | if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { | ||
| 2803 | cq->arm_needed = false; | ||
| 2804 | ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); | ||
| 2805 | } | 2822 | } |
| 2823 | cq->deferred_arm = true; | ||
| 2824 | |||
| 2825 | skip_defer: | ||
| 2826 | cq->deferred_sol = sol_needed; | ||
| 2806 | spin_unlock_irqrestore(&cq->cq_lock, flags); | 2827 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
| 2828 | |||
| 2807 | return 0; | 2829 | return 0; |
| 2808 | } | 2830 | } |
| 2809 | 2831 | ||
| @@ -2838,7 +2860,8 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) | |||
| 2838 | goto mbx_err; | 2860 | goto mbx_err; |
| 2839 | mr->ibmr.rkey = mr->hwmr.lkey; | 2861 | mr->ibmr.rkey = mr->hwmr.lkey; |
| 2840 | mr->ibmr.lkey = mr->hwmr.lkey; | 2862 | mr->ibmr.lkey = mr->hwmr.lkey; |
| 2841 | dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr; | 2863 | dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = |
| 2864 | (unsigned long) mr; | ||
| 2842 | return &mr->ibmr; | 2865 | return &mr->ibmr; |
| 2843 | mbx_err: | 2866 | mbx_err: |
| 2844 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | 2867 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 1946101419a3..c00ae093b6f8 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
| @@ -868,8 +868,10 @@ struct qib_devdata { | |||
| 868 | /* last buffer for user use */ | 868 | /* last buffer for user use */ |
| 869 | u32 lastctxt_piobuf; | 869 | u32 lastctxt_piobuf; |
| 870 | 870 | ||
| 871 | /* saturating counter of (non-port-specific) device interrupts */ | 871 | /* reset value */ |
| 872 | u32 int_counter; | 872 | u64 z_int_counter; |
| 873 | /* percpu intcounter */ | ||
| 874 | u64 __percpu *int_counter; | ||
| 873 | 875 | ||
| 874 | /* pio bufs allocated per ctxt */ | 876 | /* pio bufs allocated per ctxt */ |
| 875 | u32 pbufsctxt; | 877 | u32 pbufsctxt; |
| @@ -1184,7 +1186,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *); | |||
| 1184 | void qib_set_ctxtcnt(struct qib_devdata *); | 1186 | void qib_set_ctxtcnt(struct qib_devdata *); |
| 1185 | int qib_create_ctxts(struct qib_devdata *dd); | 1187 | int qib_create_ctxts(struct qib_devdata *dd); |
| 1186 | struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); | 1188 | struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); |
| 1187 | void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); | 1189 | int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); |
| 1188 | void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); | 1190 | void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); |
| 1189 | 1191 | ||
| 1190 | u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); | 1192 | u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); |
| @@ -1449,6 +1451,10 @@ void qib_nomsi(struct qib_devdata *); | |||
| 1449 | void qib_nomsix(struct qib_devdata *); | 1451 | void qib_nomsix(struct qib_devdata *); |
| 1450 | void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); | 1452 | void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); |
| 1451 | void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); | 1453 | void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); |
| 1454 | /* interrupts for device */ | ||
| 1455 | u64 qib_int_counter(struct qib_devdata *); | ||
| 1456 | /* interrupt for all devices */ | ||
| 1457 | u64 qib_sps_ints(void); | ||
| 1452 | 1458 | ||
| 1453 | /* | 1459 | /* |
| 1454 | * dma_addr wrappers - all 0's invalid for hw | 1460 | * dma_addr wrappers - all 0's invalid for hw |
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 1686fd4bda87..5dfda4c5cc9c 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c | |||
| @@ -546,7 +546,7 @@ static ssize_t qib_diagpkt_write(struct file *fp, | |||
| 546 | size_t count, loff_t *off) | 546 | size_t count, loff_t *off) |
| 547 | { | 547 | { |
| 548 | u32 __iomem *piobuf; | 548 | u32 __iomem *piobuf; |
| 549 | u32 plen, clen, pbufn; | 549 | u32 plen, pbufn, maxlen_reserve; |
| 550 | struct qib_diag_xpkt dp; | 550 | struct qib_diag_xpkt dp; |
| 551 | u32 *tmpbuf = NULL; | 551 | u32 *tmpbuf = NULL; |
| 552 | struct qib_devdata *dd; | 552 | struct qib_devdata *dd; |
| @@ -590,15 +590,20 @@ static ssize_t qib_diagpkt_write(struct file *fp, | |||
| 590 | } | 590 | } |
| 591 | ppd = &dd->pport[dp.port - 1]; | 591 | ppd = &dd->pport[dp.port - 1]; |
| 592 | 592 | ||
| 593 | /* need total length before first word written */ | 593 | /* |
| 594 | /* +1 word is for the qword padding */ | 594 | * need total length before first word written, plus 2 Dwords. One Dword |
| 595 | plen = sizeof(u32) + dp.len; | 595 | * is for padding so we get the full user data when not aligned on |
| 596 | clen = dp.len >> 2; | 596 | * a word boundary. The other Dword is to make sure we have room for the |
| 597 | 597 | * ICRC which gets tacked on later. | |
| 598 | if ((plen + 4) > ppd->ibmaxlen) { | 598 | */ |
| 599 | maxlen_reserve = 2 * sizeof(u32); | ||
| 600 | if (dp.len > ppd->ibmaxlen - maxlen_reserve) { | ||
| 599 | ret = -EINVAL; | 601 | ret = -EINVAL; |
| 600 | goto bail; /* before writing pbc */ | 602 | goto bail; |
| 601 | } | 603 | } |
| 604 | |||
| 605 | plen = sizeof(u32) + dp.len; | ||
| 606 | |||
| 602 | tmpbuf = vmalloc(plen); | 607 | tmpbuf = vmalloc(plen); |
| 603 | if (!tmpbuf) { | 608 | if (!tmpbuf) { |
| 604 | qib_devinfo(dd->pcidev, | 609 | qib_devinfo(dd->pcidev, |
| @@ -638,11 +643,11 @@ static ssize_t qib_diagpkt_write(struct file *fp, | |||
| 638 | */ | 643 | */ |
| 639 | if (dd->flags & QIB_PIO_FLUSH_WC) { | 644 | if (dd->flags & QIB_PIO_FLUSH_WC) { |
| 640 | qib_flush_wc(); | 645 | qib_flush_wc(); |
| 641 | qib_pio_copy(piobuf + 2, tmpbuf, clen - 1); | 646 | qib_pio_copy(piobuf + 2, tmpbuf, plen - 1); |
| 642 | qib_flush_wc(); | 647 | qib_flush_wc(); |
| 643 | __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); | 648 | __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); |
| 644 | } else | 649 | } else |
| 645 | qib_pio_copy(piobuf + 2, tmpbuf, clen); | 650 | qib_pio_copy(piobuf + 2, tmpbuf, plen); |
| 646 | 651 | ||
| 647 | if (dd->flags & QIB_USE_SPCL_TRIG) { | 652 | if (dd->flags & QIB_USE_SPCL_TRIG) { |
| 648 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; | 653 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; |
| @@ -689,28 +694,23 @@ int qib_register_observer(struct qib_devdata *dd, | |||
| 689 | const struct diag_observer *op) | 694 | const struct diag_observer *op) |
| 690 | { | 695 | { |
| 691 | struct diag_observer_list_elt *olp; | 696 | struct diag_observer_list_elt *olp; |
| 692 | int ret = -EINVAL; | 697 | unsigned long flags; |
| 693 | 698 | ||
| 694 | if (!dd || !op) | 699 | if (!dd || !op) |
| 695 | goto bail; | 700 | return -EINVAL; |
| 696 | ret = -ENOMEM; | ||
| 697 | olp = vmalloc(sizeof *olp); | 701 | olp = vmalloc(sizeof *olp); |
| 698 | if (!olp) { | 702 | if (!olp) { |
| 699 | pr_err("vmalloc for observer failed\n"); | 703 | pr_err("vmalloc for observer failed\n"); |
| 700 | goto bail; | 704 | return -ENOMEM; |
| 701 | } | 705 | } |
| 702 | if (olp) { | ||
| 703 | unsigned long flags; | ||
| 704 | 706 | ||
| 705 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | 707 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); |
| 706 | olp->op = op; | 708 | olp->op = op; |
| 707 | olp->next = dd->diag_observer_list; | 709 | olp->next = dd->diag_observer_list; |
| 708 | dd->diag_observer_list = olp; | 710 | dd->diag_observer_list = olp; |
| 709 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | 711 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); |
| 710 | ret = 0; | 712 | |
| 711 | } | 713 | return 0; |
| 712 | bail: | ||
| 713 | return ret; | ||
| 714 | } | 714 | } |
| 715 | 715 | ||
| 716 | /* Remove all registered observers when device is closed */ | 716 | /* Remove all registered observers when device is closed */ |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 275f247f9fca..b15e34eeef68 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
| @@ -1459,7 +1459,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, | |||
| 1459 | cused++; | 1459 | cused++; |
| 1460 | else | 1460 | else |
| 1461 | cfree++; | 1461 | cfree++; |
| 1462 | if (pusable && cfree && cused < inuse) { | 1462 | if (cfree && cused < inuse) { |
| 1463 | udd = dd; | 1463 | udd = dd; |
| 1464 | inuse = cused; | 1464 | inuse = cused; |
| 1465 | } | 1465 | } |
| @@ -1578,7 +1578,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp) | |||
| 1578 | struct qib_ctxtdata *rcd = fd->rcd; | 1578 | struct qib_ctxtdata *rcd = fd->rcd; |
| 1579 | struct qib_devdata *dd = rcd->dd; | 1579 | struct qib_devdata *dd = rcd->dd; |
| 1580 | 1580 | ||
| 1581 | if (dd->flags & QIB_HAS_SEND_DMA) | 1581 | if (dd->flags & QIB_HAS_SEND_DMA) { |
| 1582 | 1582 | ||
| 1583 | fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, | 1583 | fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, |
| 1584 | dd->unit, | 1584 | dd->unit, |
| @@ -1586,6 +1586,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp) | |||
| 1586 | fd->subctxt); | 1586 | fd->subctxt); |
| 1587 | if (!fd->pq) | 1587 | if (!fd->pq) |
| 1588 | return -ENOMEM; | 1588 | return -ENOMEM; |
| 1589 | } | ||
| 1589 | 1590 | ||
| 1590 | return 0; | 1591 | return 0; |
| 1591 | } | 1592 | } |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index c61e2a92b3c1..cab610ccd50e 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
| @@ -105,6 +105,7 @@ static int create_file(const char *name, umode_t mode, | |||
| 105 | static ssize_t driver_stats_read(struct file *file, char __user *buf, | 105 | static ssize_t driver_stats_read(struct file *file, char __user *buf, |
| 106 | size_t count, loff_t *ppos) | 106 | size_t count, loff_t *ppos) |
| 107 | { | 107 | { |
| 108 | qib_stats.sps_ints = qib_sps_ints(); | ||
| 108 | return simple_read_from_buffer(buf, count, ppos, &qib_stats, | 109 | return simple_read_from_buffer(buf, count, ppos, &qib_stats, |
| 109 | sizeof qib_stats); | 110 | sizeof qib_stats); |
| 110 | } | 111 | } |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 84e593d6007b..d68266ac7619 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
| @@ -1634,9 +1634,7 @@ static irqreturn_t qib_6120intr(int irq, void *data) | |||
| 1634 | goto bail; | 1634 | goto bail; |
| 1635 | } | 1635 | } |
| 1636 | 1636 | ||
| 1637 | qib_stats.sps_ints++; | 1637 | this_cpu_inc(*dd->int_counter); |
| 1638 | if (dd->int_counter != (u32) -1) | ||
| 1639 | dd->int_counter++; | ||
| 1640 | 1638 | ||
| 1641 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | | 1639 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | |
| 1642 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) | 1640 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) |
| @@ -1808,7 +1806,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) | |||
| 1808 | * isn't set. | 1806 | * isn't set. |
| 1809 | */ | 1807 | */ |
| 1810 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | 1808 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); |
| 1811 | dd->int_counter = 0; /* so we check interrupts work again */ | 1809 | /* so we check interrupts work again */ |
| 1810 | dd->z_int_counter = qib_int_counter(dd); | ||
| 1812 | val = dd->control | QLOGIC_IB_C_RESET; | 1811 | val = dd->control | QLOGIC_IB_C_RESET; |
| 1813 | writeq(val, &dd->kregbase[kr_control]); | 1812 | writeq(val, &dd->kregbase[kr_control]); |
| 1814 | mb(); /* prevent compiler re-ordering around actual reset */ | 1813 | mb(); /* prevent compiler re-ordering around actual reset */ |
| @@ -3266,7 +3265,9 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
| 3266 | 3265 | ||
| 3267 | dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); | 3266 | dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); |
| 3268 | 3267 | ||
| 3269 | qib_init_pportdata(ppd, dd, 0, 1); | 3268 | ret = qib_init_pportdata(ppd, dd, 0, 1); |
| 3269 | if (ret) | ||
| 3270 | goto bail; | ||
| 3270 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | 3271 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; |
| 3271 | ppd->link_speed_supported = QIB_IB_SDR; | 3272 | ppd->link_speed_supported = QIB_IB_SDR; |
| 3272 | ppd->link_width_enabled = IB_WIDTH_4X; | 3273 | ppd->link_width_enabled = IB_WIDTH_4X; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 454c2e7668fe..7dec89fdc124 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
| @@ -1962,10 +1962,7 @@ static irqreturn_t qib_7220intr(int irq, void *data) | |||
| 1962 | goto bail; | 1962 | goto bail; |
| 1963 | } | 1963 | } |
| 1964 | 1964 | ||
| 1965 | qib_stats.sps_ints++; | 1965 | this_cpu_inc(*dd->int_counter); |
| 1966 | if (dd->int_counter != (u32) -1) | ||
| 1967 | dd->int_counter++; | ||
| 1968 | |||
| 1969 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | | 1966 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | |
| 1970 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) | 1967 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) |
| 1971 | unlikely_7220_intr(dd, istat); | 1968 | unlikely_7220_intr(dd, istat); |
| @@ -2120,7 +2117,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) | |||
| 2120 | * isn't set. | 2117 | * isn't set. |
| 2121 | */ | 2118 | */ |
| 2122 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | 2119 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); |
| 2123 | dd->int_counter = 0; /* so we check interrupts work again */ | 2120 | /* so we check interrupts work again */ |
| 2121 | dd->z_int_counter = qib_int_counter(dd); | ||
| 2124 | val = dd->control | QLOGIC_IB_C_RESET; | 2122 | val = dd->control | QLOGIC_IB_C_RESET; |
| 2125 | writeq(val, &dd->kregbase[kr_control]); | 2123 | writeq(val, &dd->kregbase[kr_control]); |
| 2126 | mb(); /* prevent compiler reordering around actual reset */ | 2124 | mb(); /* prevent compiler reordering around actual reset */ |
| @@ -4061,7 +4059,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
| 4061 | init_waitqueue_head(&cpspec->autoneg_wait); | 4059 | init_waitqueue_head(&cpspec->autoneg_wait); |
| 4062 | INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); | 4060 | INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); |
| 4063 | 4061 | ||
| 4064 | qib_init_pportdata(ppd, dd, 0, 1); | 4062 | ret = qib_init_pportdata(ppd, dd, 0, 1); |
| 4063 | if (ret) | ||
| 4064 | goto bail; | ||
| 4065 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | 4065 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; |
| 4066 | ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; | 4066 | ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; |
| 4067 | 4067 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index d1bd21319d7d..a7eb32517a04 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -3115,9 +3115,7 @@ static irqreturn_t qib_7322intr(int irq, void *data) | |||
| 3115 | goto bail; | 3115 | goto bail; |
| 3116 | } | 3116 | } |
| 3117 | 3117 | ||
| 3118 | qib_stats.sps_ints++; | 3118 | this_cpu_inc(*dd->int_counter); |
| 3119 | if (dd->int_counter != (u32) -1) | ||
| 3120 | dd->int_counter++; | ||
| 3121 | 3119 | ||
| 3122 | /* handle "errors" of various kinds first, device ahead of port */ | 3120 | /* handle "errors" of various kinds first, device ahead of port */ |
| 3123 | if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | | 3121 | if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | |
| @@ -3186,9 +3184,7 @@ static irqreturn_t qib_7322pintr(int irq, void *data) | |||
| 3186 | */ | 3184 | */ |
| 3187 | return IRQ_HANDLED; | 3185 | return IRQ_HANDLED; |
| 3188 | 3186 | ||
| 3189 | qib_stats.sps_ints++; | 3187 | this_cpu_inc(*dd->int_counter); |
| 3190 | if (dd->int_counter != (u32) -1) | ||
| 3191 | dd->int_counter++; | ||
| 3192 | 3188 | ||
| 3193 | /* Clear the interrupt bit we expect to be set. */ | 3189 | /* Clear the interrupt bit we expect to be set. */ |
| 3194 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | | 3190 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | |
| @@ -3215,9 +3211,7 @@ static irqreturn_t qib_7322bufavail(int irq, void *data) | |||
| 3215 | */ | 3211 | */ |
| 3216 | return IRQ_HANDLED; | 3212 | return IRQ_HANDLED; |
| 3217 | 3213 | ||
| 3218 | qib_stats.sps_ints++; | 3214 | this_cpu_inc(*dd->int_counter); |
| 3219 | if (dd->int_counter != (u32) -1) | ||
| 3220 | dd->int_counter++; | ||
| 3221 | 3215 | ||
| 3222 | /* Clear the interrupt bit we expect to be set. */ | 3216 | /* Clear the interrupt bit we expect to be set. */ |
| 3223 | qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); | 3217 | qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); |
| @@ -3248,9 +3242,7 @@ static irqreturn_t sdma_intr(int irq, void *data) | |||
| 3248 | */ | 3242 | */ |
| 3249 | return IRQ_HANDLED; | 3243 | return IRQ_HANDLED; |
| 3250 | 3244 | ||
| 3251 | qib_stats.sps_ints++; | 3245 | this_cpu_inc(*dd->int_counter); |
| 3252 | if (dd->int_counter != (u32) -1) | ||
| 3253 | dd->int_counter++; | ||
| 3254 | 3246 | ||
| 3255 | /* Clear the interrupt bit we expect to be set. */ | 3247 | /* Clear the interrupt bit we expect to be set. */ |
| 3256 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 3248 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| @@ -3277,9 +3269,7 @@ static irqreturn_t sdma_idle_intr(int irq, void *data) | |||
| 3277 | */ | 3269 | */ |
| 3278 | return IRQ_HANDLED; | 3270 | return IRQ_HANDLED; |
| 3279 | 3271 | ||
| 3280 | qib_stats.sps_ints++; | 3272 | this_cpu_inc(*dd->int_counter); |
| 3281 | if (dd->int_counter != (u32) -1) | ||
| 3282 | dd->int_counter++; | ||
| 3283 | 3273 | ||
| 3284 | /* Clear the interrupt bit we expect to be set. */ | 3274 | /* Clear the interrupt bit we expect to be set. */ |
| 3285 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 3275 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| @@ -3306,9 +3296,7 @@ static irqreturn_t sdma_progress_intr(int irq, void *data) | |||
| 3306 | */ | 3296 | */ |
| 3307 | return IRQ_HANDLED; | 3297 | return IRQ_HANDLED; |
| 3308 | 3298 | ||
| 3309 | qib_stats.sps_ints++; | 3299 | this_cpu_inc(*dd->int_counter); |
| 3310 | if (dd->int_counter != (u32) -1) | ||
| 3311 | dd->int_counter++; | ||
| 3312 | 3300 | ||
| 3313 | /* Clear the interrupt bit we expect to be set. */ | 3301 | /* Clear the interrupt bit we expect to be set. */ |
| 3314 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 3302 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| @@ -3336,9 +3324,7 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data) | |||
| 3336 | */ | 3324 | */ |
| 3337 | return IRQ_HANDLED; | 3325 | return IRQ_HANDLED; |
| 3338 | 3326 | ||
| 3339 | qib_stats.sps_ints++; | 3327 | this_cpu_inc(*dd->int_counter); |
| 3340 | if (dd->int_counter != (u32) -1) | ||
| 3341 | dd->int_counter++; | ||
| 3342 | 3328 | ||
| 3343 | /* Clear the interrupt bit we expect to be set. */ | 3329 | /* Clear the interrupt bit we expect to be set. */ |
| 3344 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | 3330 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? |
| @@ -3723,7 +3709,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd) | |||
| 3723 | dd->pport->cpspec->ibsymdelta = 0; | 3709 | dd->pport->cpspec->ibsymdelta = 0; |
| 3724 | dd->pport->cpspec->iblnkerrdelta = 0; | 3710 | dd->pport->cpspec->iblnkerrdelta = 0; |
| 3725 | dd->pport->cpspec->ibmalfdelta = 0; | 3711 | dd->pport->cpspec->ibmalfdelta = 0; |
| 3726 | dd->int_counter = 0; /* so we check interrupts work again */ | 3712 | /* so we check interrupts work again */ |
| 3713 | dd->z_int_counter = qib_int_counter(dd); | ||
| 3727 | 3714 | ||
| 3728 | /* | 3715 | /* |
| 3729 | * Keep chip from being accessed until we are ready. Use | 3716 | * Keep chip from being accessed until we are ready. Use |
| @@ -6557,7 +6544,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
| 6557 | } | 6544 | } |
| 6558 | 6545 | ||
| 6559 | dd->num_pports++; | 6546 | dd->num_pports++; |
| 6560 | qib_init_pportdata(ppd, dd, pidx, dd->num_pports); | 6547 | ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); |
| 6548 | if (ret) { | ||
| 6549 | dd->num_pports--; | ||
| 6550 | goto bail; | ||
| 6551 | } | ||
| 6561 | 6552 | ||
| 6562 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | 6553 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; |
| 6563 | ppd->link_width_enabled = IB_WIDTH_4X; | 6554 | ppd->link_width_enabled = IB_WIDTH_4X; |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 24e802f4ea2f..5b7aeb224a30 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -130,7 +130,6 @@ void qib_set_ctxtcnt(struct qib_devdata *dd) | |||
| 130 | int qib_create_ctxts(struct qib_devdata *dd) | 130 | int qib_create_ctxts(struct qib_devdata *dd) |
| 131 | { | 131 | { |
| 132 | unsigned i; | 132 | unsigned i; |
| 133 | int ret; | ||
| 134 | int local_node_id = pcibus_to_node(dd->pcidev->bus); | 133 | int local_node_id = pcibus_to_node(dd->pcidev->bus); |
| 135 | 134 | ||
| 136 | if (local_node_id < 0) | 135 | if (local_node_id < 0) |
| @@ -145,8 +144,7 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
| 145 | if (!dd->rcd) { | 144 | if (!dd->rcd) { |
| 146 | qib_dev_err(dd, | 145 | qib_dev_err(dd, |
| 147 | "Unable to allocate ctxtdata array, failing\n"); | 146 | "Unable to allocate ctxtdata array, failing\n"); |
| 148 | ret = -ENOMEM; | 147 | return -ENOMEM; |
| 149 | goto done; | ||
| 150 | } | 148 | } |
| 151 | 149 | ||
| 152 | /* create (one or more) kctxt */ | 150 | /* create (one or more) kctxt */ |
| @@ -163,15 +161,14 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
| 163 | if (!rcd) { | 161 | if (!rcd) { |
| 164 | qib_dev_err(dd, | 162 | qib_dev_err(dd, |
| 165 | "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); | 163 | "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); |
| 166 | ret = -ENOMEM; | 164 | kfree(dd->rcd); |
| 167 | goto done; | 165 | dd->rcd = NULL; |
| 166 | return -ENOMEM; | ||
| 168 | } | 167 | } |
| 169 | rcd->pkeys[0] = QIB_DEFAULT_P_KEY; | 168 | rcd->pkeys[0] = QIB_DEFAULT_P_KEY; |
| 170 | rcd->seq_cnt = 1; | 169 | rcd->seq_cnt = 1; |
| 171 | } | 170 | } |
| 172 | ret = 0; | 171 | return 0; |
| 173 | done: | ||
| 174 | return ret; | ||
| 175 | } | 172 | } |
| 176 | 173 | ||
| 177 | /* | 174 | /* |
| @@ -233,7 +230,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, | |||
| 233 | /* | 230 | /* |
| 234 | * Common code for initializing the physical port structure. | 231 | * Common code for initializing the physical port structure. |
| 235 | */ | 232 | */ |
| 236 | void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | 233 | int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, |
| 237 | u8 hw_pidx, u8 port) | 234 | u8 hw_pidx, u8 port) |
| 238 | { | 235 | { |
| 239 | int size; | 236 | int size; |
| @@ -243,6 +240,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | |||
| 243 | 240 | ||
| 244 | spin_lock_init(&ppd->sdma_lock); | 241 | spin_lock_init(&ppd->sdma_lock); |
| 245 | spin_lock_init(&ppd->lflags_lock); | 242 | spin_lock_init(&ppd->lflags_lock); |
| 243 | spin_lock_init(&ppd->cc_shadow_lock); | ||
| 246 | init_waitqueue_head(&ppd->state_wait); | 244 | init_waitqueue_head(&ppd->state_wait); |
| 247 | 245 | ||
| 248 | init_timer(&ppd->symerr_clear_timer); | 246 | init_timer(&ppd->symerr_clear_timer); |
| @@ -250,8 +248,10 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | |||
| 250 | ppd->symerr_clear_timer.data = (unsigned long)ppd; | 248 | ppd->symerr_clear_timer.data = (unsigned long)ppd; |
| 251 | 249 | ||
| 252 | ppd->qib_wq = NULL; | 250 | ppd->qib_wq = NULL; |
| 253 | 251 | ppd->ibport_data.pmastats = | |
| 254 | spin_lock_init(&ppd->cc_shadow_lock); | 252 | alloc_percpu(struct qib_pma_counters); |
| 253 | if (!ppd->ibport_data.pmastats) | ||
| 254 | return -ENOMEM; | ||
| 255 | 255 | ||
| 256 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) | 256 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) |
| 257 | goto bail; | 257 | goto bail; |
| @@ -299,7 +299,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | |||
| 299 | goto bail_3; | 299 | goto bail_3; |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | return; | 302 | return 0; |
| 303 | 303 | ||
| 304 | bail_3: | 304 | bail_3: |
| 305 | kfree(ppd->ccti_entries_shadow); | 305 | kfree(ppd->ccti_entries_shadow); |
| @@ -313,7 +313,7 @@ bail_1: | |||
| 313 | bail: | 313 | bail: |
| 314 | /* User is intentionally disabling the congestion control agent */ | 314 | /* User is intentionally disabling the congestion control agent */ |
| 315 | if (!qib_cc_table_size) | 315 | if (!qib_cc_table_size) |
| 316 | return; | 316 | return 0; |
| 317 | 317 | ||
| 318 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { | 318 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { |
| 319 | qib_cc_table_size = 0; | 319 | qib_cc_table_size = 0; |
| @@ -324,7 +324,7 @@ bail: | |||
| 324 | 324 | ||
| 325 | qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", | 325 | qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", |
| 326 | port); | 326 | port); |
| 327 | return; | 327 | return 0; |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | static int init_pioavailregs(struct qib_devdata *dd) | 330 | static int init_pioavailregs(struct qib_devdata *dd) |
| @@ -525,6 +525,7 @@ static void enable_chip(struct qib_devdata *dd) | |||
| 525 | static void verify_interrupt(unsigned long opaque) | 525 | static void verify_interrupt(unsigned long opaque) |
| 526 | { | 526 | { |
| 527 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | 527 | struct qib_devdata *dd = (struct qib_devdata *) opaque; |
| 528 | u64 int_counter; | ||
| 528 | 529 | ||
| 529 | if (!dd) | 530 | if (!dd) |
| 530 | return; /* being torn down */ | 531 | return; /* being torn down */ |
| @@ -533,7 +534,8 @@ static void verify_interrupt(unsigned long opaque) | |||
| 533 | * If we don't have a lid or any interrupts, let the user know and | 534 | * If we don't have a lid or any interrupts, let the user know and |
| 534 | * don't bother checking again. | 535 | * don't bother checking again. |
| 535 | */ | 536 | */ |
| 536 | if (dd->int_counter == 0) { | 537 | int_counter = qib_int_counter(dd) - dd->z_int_counter; |
| 538 | if (int_counter == 0) { | ||
| 537 | if (!dd->f_intr_fallback(dd)) | 539 | if (!dd->f_intr_fallback(dd)) |
| 538 | dev_err(&dd->pcidev->dev, | 540 | dev_err(&dd->pcidev->dev, |
| 539 | "No interrupts detected, not usable.\n"); | 541 | "No interrupts detected, not usable.\n"); |
| @@ -633,6 +635,12 @@ wq_error: | |||
| 633 | return -ENOMEM; | 635 | return -ENOMEM; |
| 634 | } | 636 | } |
| 635 | 637 | ||
| 638 | static void qib_free_pportdata(struct qib_pportdata *ppd) | ||
| 639 | { | ||
| 640 | free_percpu(ppd->ibport_data.pmastats); | ||
| 641 | ppd->ibport_data.pmastats = NULL; | ||
| 642 | } | ||
| 643 | |||
| 636 | /** | 644 | /** |
| 637 | * qib_init - do the actual initialization sequence on the chip | 645 | * qib_init - do the actual initialization sequence on the chip |
| 638 | * @dd: the qlogic_ib device | 646 | * @dd: the qlogic_ib device |
| @@ -920,6 +928,7 @@ static void qib_shutdown_device(struct qib_devdata *dd) | |||
| 920 | destroy_workqueue(ppd->qib_wq); | 928 | destroy_workqueue(ppd->qib_wq); |
| 921 | ppd->qib_wq = NULL; | 929 | ppd->qib_wq = NULL; |
| 922 | } | 930 | } |
| 931 | qib_free_pportdata(ppd); | ||
| 923 | } | 932 | } |
| 924 | 933 | ||
| 925 | qib_update_eeprom_log(dd); | 934 | qib_update_eeprom_log(dd); |
| @@ -1079,9 +1088,34 @@ void qib_free_devdata(struct qib_devdata *dd) | |||
| 1079 | #ifdef CONFIG_DEBUG_FS | 1088 | #ifdef CONFIG_DEBUG_FS |
| 1080 | qib_dbg_ibdev_exit(&dd->verbs_dev); | 1089 | qib_dbg_ibdev_exit(&dd->verbs_dev); |
| 1081 | #endif | 1090 | #endif |
| 1091 | free_percpu(dd->int_counter); | ||
| 1082 | ib_dealloc_device(&dd->verbs_dev.ibdev); | 1092 | ib_dealloc_device(&dd->verbs_dev.ibdev); |
| 1083 | } | 1093 | } |
| 1084 | 1094 | ||
| 1095 | u64 qib_int_counter(struct qib_devdata *dd) | ||
| 1096 | { | ||
| 1097 | int cpu; | ||
| 1098 | u64 int_counter = 0; | ||
| 1099 | |||
| 1100 | for_each_possible_cpu(cpu) | ||
| 1101 | int_counter += *per_cpu_ptr(dd->int_counter, cpu); | ||
| 1102 | return int_counter; | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | u64 qib_sps_ints(void) | ||
| 1106 | { | ||
| 1107 | unsigned long flags; | ||
| 1108 | struct qib_devdata *dd; | ||
| 1109 | u64 sps_ints = 0; | ||
| 1110 | |||
| 1111 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
| 1112 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
| 1113 | sps_ints += qib_int_counter(dd); | ||
| 1114 | } | ||
| 1115 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
| 1116 | return sps_ints; | ||
| 1117 | } | ||
| 1118 | |||
| 1085 | /* | 1119 | /* |
| 1086 | * Allocate our primary per-unit data structure. Must be done via verbs | 1120 | * Allocate our primary per-unit data structure. Must be done via verbs |
| 1087 | * allocator, because the verbs cleanup process both does cleanup and | 1121 | * allocator, because the verbs cleanup process both does cleanup and |
| @@ -1097,14 +1131,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 1097 | int ret; | 1131 | int ret; |
| 1098 | 1132 | ||
| 1099 | dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); | 1133 | dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); |
| 1100 | if (!dd) { | 1134 | if (!dd) |
| 1101 | dd = ERR_PTR(-ENOMEM); | 1135 | return ERR_PTR(-ENOMEM); |
| 1102 | goto bail; | ||
| 1103 | } | ||
| 1104 | 1136 | ||
| 1105 | #ifdef CONFIG_DEBUG_FS | 1137 | INIT_LIST_HEAD(&dd->list); |
| 1106 | qib_dbg_ibdev_init(&dd->verbs_dev); | ||
| 1107 | #endif | ||
| 1108 | 1138 | ||
| 1109 | idr_preload(GFP_KERNEL); | 1139 | idr_preload(GFP_KERNEL); |
| 1110 | spin_lock_irqsave(&qib_devs_lock, flags); | 1140 | spin_lock_irqsave(&qib_devs_lock, flags); |
| @@ -1121,11 +1151,13 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 1121 | if (ret < 0) { | 1151 | if (ret < 0) { |
| 1122 | qib_early_err(&pdev->dev, | 1152 | qib_early_err(&pdev->dev, |
| 1123 | "Could not allocate unit ID: error %d\n", -ret); | 1153 | "Could not allocate unit ID: error %d\n", -ret); |
| 1124 | #ifdef CONFIG_DEBUG_FS | 1154 | goto bail; |
| 1125 | qib_dbg_ibdev_exit(&dd->verbs_dev); | 1155 | } |
| 1126 | #endif | 1156 | dd->int_counter = alloc_percpu(u64); |
| 1127 | ib_dealloc_device(&dd->verbs_dev.ibdev); | 1157 | if (!dd->int_counter) { |
| 1128 | dd = ERR_PTR(ret); | 1158 | ret = -ENOMEM; |
| 1159 | qib_early_err(&pdev->dev, | ||
| 1160 | "Could not allocate per-cpu int_counter\n"); | ||
| 1129 | goto bail; | 1161 | goto bail; |
| 1130 | } | 1162 | } |
| 1131 | 1163 | ||
| @@ -1139,9 +1171,15 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 1139 | qib_early_err(&pdev->dev, | 1171 | qib_early_err(&pdev->dev, |
| 1140 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); | 1172 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); |
| 1141 | } | 1173 | } |
| 1142 | 1174 | #ifdef CONFIG_DEBUG_FS | |
| 1143 | bail: | 1175 | qib_dbg_ibdev_init(&dd->verbs_dev); |
| 1176 | #endif | ||
| 1144 | return dd; | 1177 | return dd; |
| 1178 | bail: | ||
| 1179 | if (!list_empty(&dd->list)) | ||
| 1180 | list_del_init(&dd->list); | ||
| 1181 | ib_dealloc_device(&dd->verbs_dev.ibdev); | ||
| 1182 | return ERR_PTR(ret);; | ||
| 1145 | } | 1183 | } |
| 1146 | 1184 | ||
| 1147 | /* | 1185 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index ccb119143d20..edad991d60ed 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
| @@ -1634,6 +1634,23 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, | |||
| 1634 | return reply((struct ib_smp *)pmp); | 1634 | return reply((struct ib_smp *)pmp); |
| 1635 | } | 1635 | } |
| 1636 | 1636 | ||
| 1637 | static void qib_snapshot_pmacounters( | ||
| 1638 | struct qib_ibport *ibp, | ||
| 1639 | struct qib_pma_counters *pmacounters) | ||
| 1640 | { | ||
| 1641 | struct qib_pma_counters *p; | ||
| 1642 | int cpu; | ||
| 1643 | |||
| 1644 | memset(pmacounters, 0, sizeof(*pmacounters)); | ||
| 1645 | for_each_possible_cpu(cpu) { | ||
| 1646 | p = per_cpu_ptr(ibp->pmastats, cpu); | ||
| 1647 | pmacounters->n_unicast_xmit += p->n_unicast_xmit; | ||
| 1648 | pmacounters->n_unicast_rcv += p->n_unicast_rcv; | ||
| 1649 | pmacounters->n_multicast_xmit += p->n_multicast_xmit; | ||
| 1650 | pmacounters->n_multicast_rcv += p->n_multicast_rcv; | ||
| 1651 | } | ||
| 1652 | } | ||
| 1653 | |||
| 1637 | static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, | 1654 | static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, |
| 1638 | struct ib_device *ibdev, u8 port) | 1655 | struct ib_device *ibdev, u8 port) |
| 1639 | { | 1656 | { |
| @@ -1642,6 +1659,7 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, | |||
| 1642 | struct qib_ibport *ibp = to_iport(ibdev, port); | 1659 | struct qib_ibport *ibp = to_iport(ibdev, port); |
| 1643 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | 1660 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); |
| 1644 | u64 swords, rwords, spkts, rpkts, xwait; | 1661 | u64 swords, rwords, spkts, rpkts, xwait; |
| 1662 | struct qib_pma_counters pma; | ||
| 1645 | u8 port_select = p->port_select; | 1663 | u8 port_select = p->port_select; |
| 1646 | 1664 | ||
| 1647 | memset(pmp->data, 0, sizeof(pmp->data)); | 1665 | memset(pmp->data, 0, sizeof(pmp->data)); |
| @@ -1664,10 +1682,17 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, | |||
| 1664 | p->port_rcv_data = cpu_to_be64(rwords); | 1682 | p->port_rcv_data = cpu_to_be64(rwords); |
| 1665 | p->port_xmit_packets = cpu_to_be64(spkts); | 1683 | p->port_xmit_packets = cpu_to_be64(spkts); |
| 1666 | p->port_rcv_packets = cpu_to_be64(rpkts); | 1684 | p->port_rcv_packets = cpu_to_be64(rpkts); |
| 1667 | p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); | 1685 | |
| 1668 | p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); | 1686 | qib_snapshot_pmacounters(ibp, &pma); |
| 1669 | p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); | 1687 | |
| 1670 | p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); | 1688 | p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit |
| 1689 | - ibp->z_unicast_xmit); | ||
| 1690 | p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv | ||
| 1691 | - ibp->z_unicast_rcv); | ||
| 1692 | p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit | ||
| 1693 | - ibp->z_multicast_xmit); | ||
| 1694 | p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv | ||
| 1695 | - ibp->z_multicast_rcv); | ||
| 1671 | 1696 | ||
| 1672 | bail: | 1697 | bail: |
| 1673 | return reply((struct ib_smp *) pmp); | 1698 | return reply((struct ib_smp *) pmp); |
| @@ -1795,6 +1820,7 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, | |||
| 1795 | struct qib_ibport *ibp = to_iport(ibdev, port); | 1820 | struct qib_ibport *ibp = to_iport(ibdev, port); |
| 1796 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | 1821 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); |
| 1797 | u64 swords, rwords, spkts, rpkts, xwait; | 1822 | u64 swords, rwords, spkts, rpkts, xwait; |
| 1823 | struct qib_pma_counters pma; | ||
| 1798 | 1824 | ||
| 1799 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); | 1825 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); |
| 1800 | 1826 | ||
| @@ -1810,17 +1836,19 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, | |||
| 1810 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) | 1836 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) |
| 1811 | ibp->z_port_rcv_packets = rpkts; | 1837 | ibp->z_port_rcv_packets = rpkts; |
| 1812 | 1838 | ||
| 1839 | qib_snapshot_pmacounters(ibp, &pma); | ||
| 1840 | |||
| 1813 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) | 1841 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) |
| 1814 | ibp->n_unicast_xmit = 0; | 1842 | ibp->z_unicast_xmit = pma.n_unicast_xmit; |
| 1815 | 1843 | ||
| 1816 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) | 1844 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) |
| 1817 | ibp->n_unicast_rcv = 0; | 1845 | ibp->z_unicast_rcv = pma.n_unicast_rcv; |
| 1818 | 1846 | ||
| 1819 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) | 1847 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) |
| 1820 | ibp->n_multicast_xmit = 0; | 1848 | ibp->z_multicast_xmit = pma.n_multicast_xmit; |
| 1821 | 1849 | ||
| 1822 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) | 1850 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) |
| 1823 | ibp->n_multicast_rcv = 0; | 1851 | ibp->z_multicast_rcv = pma.n_multicast_rcv; |
| 1824 | 1852 | ||
| 1825 | return pma_get_portcounters_ext(pmp, ibdev, port); | 1853 | return pma_get_portcounters_ext(pmp, ibdev, port); |
| 1826 | } | 1854 | } |
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index e6687ded8210..9bbb55347cc1 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
| @@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 232 | { | 232 | { |
| 233 | struct qib_mr *mr; | 233 | struct qib_mr *mr; |
| 234 | struct ib_umem *umem; | 234 | struct ib_umem *umem; |
| 235 | struct ib_umem_chunk *chunk; | 235 | struct scatterlist *sg; |
| 236 | int n, m, i; | 236 | int n, m, entry; |
| 237 | struct ib_mr *ret; | 237 | struct ib_mr *ret; |
| 238 | 238 | ||
| 239 | if (length == 0) { | 239 | if (length == 0) { |
| @@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 246 | if (IS_ERR(umem)) | 246 | if (IS_ERR(umem)) |
| 247 | return (void *) umem; | 247 | return (void *) umem; |
| 248 | 248 | ||
| 249 | n = 0; | 249 | n = umem->nmap; |
| 250 | list_for_each_entry(chunk, &umem->chunk_list, list) | ||
| 251 | n += chunk->nents; | ||
| 252 | 250 | ||
| 253 | mr = alloc_mr(n, pd); | 251 | mr = alloc_mr(n, pd); |
| 254 | if (IS_ERR(mr)) { | 252 | if (IS_ERR(mr)) { |
| @@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 268 | mr->mr.page_shift = ilog2(umem->page_size); | 266 | mr->mr.page_shift = ilog2(umem->page_size); |
| 269 | m = 0; | 267 | m = 0; |
| 270 | n = 0; | 268 | n = 0; |
| 271 | list_for_each_entry(chunk, &umem->chunk_list, list) { | 269 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 272 | for (i = 0; i < chunk->nents; i++) { | ||
| 273 | void *vaddr; | 270 | void *vaddr; |
| 274 | 271 | ||
| 275 | vaddr = page_address(sg_page(&chunk->page_list[i])); | 272 | vaddr = page_address(sg_page(sg)); |
| 276 | if (!vaddr) { | 273 | if (!vaddr) { |
| 277 | ret = ERR_PTR(-EINVAL); | 274 | ret = ERR_PTR(-EINVAL); |
| 278 | goto bail; | 275 | goto bail; |
| @@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 284 | m++; | 281 | m++; |
| 285 | n = 0; | 282 | n = 0; |
| 286 | } | 283 | } |
| 287 | } | ||
| 288 | } | 284 | } |
| 289 | ret = &mr->ibmr; | 285 | ret = &mr->ibmr; |
| 290 | 286 | ||
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 3ab341320ead..2f2501890c4e 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -752,7 +752,7 @@ void qib_send_rc_ack(struct qib_qp *qp) | |||
| 752 | qib_flush_wc(); | 752 | qib_flush_wc(); |
| 753 | qib_sendbuf_done(dd, pbufn); | 753 | qib_sendbuf_done(dd, pbufn); |
| 754 | 754 | ||
| 755 | ibp->n_unicast_xmit++; | 755 | this_cpu_inc(ibp->pmastats->n_unicast_xmit); |
| 756 | goto done; | 756 | goto done; |
| 757 | 757 | ||
| 758 | queue_ack: | 758 | queue_ack: |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 357b6cfcd46c..4c07a8b34ffe 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
| @@ -703,6 +703,7 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, | |||
| 703 | ohdr->bth[0] = cpu_to_be32(bth0); | 703 | ohdr->bth[0] = cpu_to_be32(bth0); |
| 704 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | 704 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |
| 705 | ohdr->bth[2] = cpu_to_be32(bth2); | 705 | ohdr->bth[2] = cpu_to_be32(bth2); |
| 706 | this_cpu_inc(ibp->pmastats->n_unicast_xmit); | ||
| 706 | } | 707 | } |
| 707 | 708 | ||
| 708 | /** | 709 | /** |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 3ad651c3356c..aaf7039f8ed2 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
| @@ -280,11 +280,11 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
| 280 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 280 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; |
| 281 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { | 281 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { |
| 282 | if (ah_attr->dlid != QIB_PERMISSIVE_LID) | 282 | if (ah_attr->dlid != QIB_PERMISSIVE_LID) |
| 283 | ibp->n_multicast_xmit++; | 283 | this_cpu_inc(ibp->pmastats->n_multicast_xmit); |
| 284 | else | 284 | else |
| 285 | ibp->n_unicast_xmit++; | 285 | this_cpu_inc(ibp->pmastats->n_unicast_xmit); |
| 286 | } else { | 286 | } else { |
| 287 | ibp->n_unicast_xmit++; | 287 | this_cpu_inc(ibp->pmastats->n_unicast_xmit); |
| 288 | lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); | 288 | lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); |
| 289 | if (unlikely(lid == ppd->lid)) { | 289 | if (unlikely(lid == ppd->lid)) { |
| 290 | /* | 290 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 165aee2ca8a0..d2806cae234c 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c | |||
| @@ -52,6 +52,17 @@ | |||
| 52 | /* attempt to drain the queue for 5secs */ | 52 | /* attempt to drain the queue for 5secs */ |
| 53 | #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 | 53 | #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 |
| 54 | 54 | ||
| 55 | /* | ||
| 56 | * track how many times a process open this driver. | ||
| 57 | */ | ||
| 58 | static struct rb_root qib_user_sdma_rb_root = RB_ROOT; | ||
| 59 | |||
| 60 | struct qib_user_sdma_rb_node { | ||
| 61 | struct rb_node node; | ||
| 62 | int refcount; | ||
| 63 | pid_t pid; | ||
| 64 | }; | ||
| 65 | |||
| 55 | struct qib_user_sdma_pkt { | 66 | struct qib_user_sdma_pkt { |
| 56 | struct list_head list; /* list element */ | 67 | struct list_head list; /* list element */ |
| 57 | 68 | ||
| @@ -120,15 +131,60 @@ struct qib_user_sdma_queue { | |||
| 120 | /* dma page table */ | 131 | /* dma page table */ |
| 121 | struct rb_root dma_pages_root; | 132 | struct rb_root dma_pages_root; |
| 122 | 133 | ||
| 134 | struct qib_user_sdma_rb_node *sdma_rb_node; | ||
| 135 | |||
| 123 | /* protect everything above... */ | 136 | /* protect everything above... */ |
| 124 | struct mutex lock; | 137 | struct mutex lock; |
| 125 | }; | 138 | }; |
| 126 | 139 | ||
| 140 | static struct qib_user_sdma_rb_node * | ||
| 141 | qib_user_sdma_rb_search(struct rb_root *root, pid_t pid) | ||
| 142 | { | ||
| 143 | struct qib_user_sdma_rb_node *sdma_rb_node; | ||
| 144 | struct rb_node *node = root->rb_node; | ||
| 145 | |||
| 146 | while (node) { | ||
| 147 | sdma_rb_node = container_of(node, | ||
| 148 | struct qib_user_sdma_rb_node, node); | ||
| 149 | if (pid < sdma_rb_node->pid) | ||
| 150 | node = node->rb_left; | ||
| 151 | else if (pid > sdma_rb_node->pid) | ||
| 152 | node = node->rb_right; | ||
| 153 | else | ||
| 154 | return sdma_rb_node; | ||
| 155 | } | ||
| 156 | return NULL; | ||
| 157 | } | ||
| 158 | |||
| 159 | static int | ||
| 160 | qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new) | ||
| 161 | { | ||
| 162 | struct rb_node **node = &(root->rb_node); | ||
| 163 | struct rb_node *parent = NULL; | ||
| 164 | struct qib_user_sdma_rb_node *got; | ||
| 165 | |||
| 166 | while (*node) { | ||
| 167 | got = container_of(*node, struct qib_user_sdma_rb_node, node); | ||
| 168 | parent = *node; | ||
| 169 | if (new->pid < got->pid) | ||
| 170 | node = &((*node)->rb_left); | ||
| 171 | else if (new->pid > got->pid) | ||
| 172 | node = &((*node)->rb_right); | ||
| 173 | else | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | rb_link_node(&new->node, parent, node); | ||
| 178 | rb_insert_color(&new->node, root); | ||
| 179 | return 1; | ||
| 180 | } | ||
| 181 | |||
| 127 | struct qib_user_sdma_queue * | 182 | struct qib_user_sdma_queue * |
| 128 | qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) | 183 | qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) |
| 129 | { | 184 | { |
| 130 | struct qib_user_sdma_queue *pq = | 185 | struct qib_user_sdma_queue *pq = |
| 131 | kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); | 186 | kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); |
| 187 | struct qib_user_sdma_rb_node *sdma_rb_node; | ||
| 132 | 188 | ||
| 133 | if (!pq) | 189 | if (!pq) |
| 134 | goto done; | 190 | goto done; |
| @@ -138,6 +194,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) | |||
| 138 | pq->num_pending = 0; | 194 | pq->num_pending = 0; |
| 139 | pq->num_sending = 0; | 195 | pq->num_sending = 0; |
| 140 | pq->added = 0; | 196 | pq->added = 0; |
| 197 | pq->sdma_rb_node = NULL; | ||
| 141 | 198 | ||
| 142 | INIT_LIST_HEAD(&pq->sent); | 199 | INIT_LIST_HEAD(&pq->sent); |
| 143 | spin_lock_init(&pq->sent_lock); | 200 | spin_lock_init(&pq->sent_lock); |
| @@ -163,8 +220,30 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) | |||
| 163 | 220 | ||
| 164 | pq->dma_pages_root = RB_ROOT; | 221 | pq->dma_pages_root = RB_ROOT; |
| 165 | 222 | ||
| 223 | sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root, | ||
| 224 | current->pid); | ||
| 225 | if (sdma_rb_node) { | ||
| 226 | sdma_rb_node->refcount++; | ||
| 227 | } else { | ||
| 228 | int ret; | ||
| 229 | sdma_rb_node = kmalloc(sizeof( | ||
| 230 | struct qib_user_sdma_rb_node), GFP_KERNEL); | ||
| 231 | if (!sdma_rb_node) | ||
| 232 | goto err_rb; | ||
| 233 | |||
| 234 | sdma_rb_node->refcount = 1; | ||
| 235 | sdma_rb_node->pid = current->pid; | ||
| 236 | |||
| 237 | ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, | ||
| 238 | sdma_rb_node); | ||
| 239 | BUG_ON(ret == 0); | ||
| 240 | } | ||
| 241 | pq->sdma_rb_node = sdma_rb_node; | ||
| 242 | |||
| 166 | goto done; | 243 | goto done; |
| 167 | 244 | ||
| 245 | err_rb: | ||
| 246 | dma_pool_destroy(pq->header_cache); | ||
| 168 | err_slab: | 247 | err_slab: |
| 169 | kmem_cache_destroy(pq->pkt_slab); | 248 | kmem_cache_destroy(pq->pkt_slab); |
| 170 | err_kfree: | 249 | err_kfree: |
| @@ -1020,8 +1099,13 @@ void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) | |||
| 1020 | if (!pq) | 1099 | if (!pq) |
| 1021 | return; | 1100 | return; |
| 1022 | 1101 | ||
| 1023 | kmem_cache_destroy(pq->pkt_slab); | 1102 | pq->sdma_rb_node->refcount--; |
| 1103 | if (pq->sdma_rb_node->refcount == 0) { | ||
| 1104 | rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); | ||
| 1105 | kfree(pq->sdma_rb_node); | ||
| 1106 | } | ||
| 1024 | dma_pool_destroy(pq->header_cache); | 1107 | dma_pool_destroy(pq->header_cache); |
| 1108 | kmem_cache_destroy(pq->pkt_slab); | ||
| 1025 | kfree(pq); | 1109 | kfree(pq); |
| 1026 | } | 1110 | } |
| 1027 | 1111 | ||
| @@ -1241,26 +1325,52 @@ static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd, | |||
| 1241 | struct qib_user_sdma_queue *pq, | 1325 | struct qib_user_sdma_queue *pq, |
| 1242 | struct list_head *pktlist, int count) | 1326 | struct list_head *pktlist, int count) |
| 1243 | { | 1327 | { |
| 1244 | int ret = 0; | ||
| 1245 | unsigned long flags; | 1328 | unsigned long flags; |
| 1246 | 1329 | ||
| 1247 | if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) | 1330 | if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) |
| 1248 | return -ECOMM; | 1331 | return -ECOMM; |
| 1249 | 1332 | ||
| 1250 | spin_lock_irqsave(&ppd->sdma_lock, flags); | 1333 | /* non-blocking mode */ |
| 1251 | 1334 | if (pq->sdma_rb_node->refcount > 1) { | |
| 1252 | if (unlikely(!__qib_sdma_running(ppd))) { | 1335 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
| 1253 | ret = -ECOMM; | 1336 | if (unlikely(!__qib_sdma_running(ppd))) { |
| 1254 | goto unlock; | 1337 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); |
| 1338 | return -ECOMM; | ||
| 1339 | } | ||
| 1340 | pq->num_pending += count; | ||
| 1341 | list_splice_tail_init(pktlist, &ppd->sdma_userpending); | ||
| 1342 | qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); | ||
| 1343 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
| 1344 | return 0; | ||
| 1255 | } | 1345 | } |
| 1256 | 1346 | ||
| 1347 | /* In this case, descriptors from this process are not | ||
| 1348 | * linked to ppd pending queue, interrupt handler | ||
| 1349 | * won't update this process, it is OK to directly | ||
| 1350 | * modify without sdma lock. | ||
| 1351 | */ | ||
| 1352 | |||
| 1353 | |||
| 1257 | pq->num_pending += count; | 1354 | pq->num_pending += count; |
| 1258 | list_splice_tail_init(pktlist, &ppd->sdma_userpending); | 1355 | /* |
| 1259 | qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); | 1356 | * Blocking mode for single rail process, we must |
| 1357 | * release/regain sdma_lock to give other process | ||
| 1358 | * chance to make progress. This is important for | ||
| 1359 | * performance. | ||
| 1360 | */ | ||
| 1361 | do { | ||
| 1362 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
| 1363 | if (unlikely(!__qib_sdma_running(ppd))) { | ||
| 1364 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
| 1365 | return -ECOMM; | ||
| 1366 | } | ||
| 1367 | qib_user_sdma_send_desc(ppd, pktlist); | ||
| 1368 | if (!list_empty(pktlist)) | ||
| 1369 | qib_sdma_make_progress(ppd); | ||
| 1370 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
| 1371 | } while (!list_empty(pktlist)); | ||
| 1260 | 1372 | ||
| 1261 | unlock: | 1373 | return 0; |
| 1262 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
| 1263 | return ret; | ||
| 1264 | } | 1374 | } |
| 1265 | 1375 | ||
| 1266 | int qib_user_sdma_writev(struct qib_ctxtdata *rcd, | 1376 | int qib_user_sdma_writev(struct qib_ctxtdata *rcd, |
| @@ -1290,7 +1400,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd, | |||
| 1290 | qib_user_sdma_queue_clean(ppd, pq); | 1400 | qib_user_sdma_queue_clean(ppd, pq); |
| 1291 | 1401 | ||
| 1292 | while (dim) { | 1402 | while (dim) { |
| 1293 | int mxp = 8; | 1403 | int mxp = 1; |
| 1294 | int ndesc = 0; | 1404 | int ndesc = 0; |
| 1295 | 1405 | ||
| 1296 | ret = qib_user_sdma_queue_pkts(dd, ppd, pq, | 1406 | ret = qib_user_sdma_queue_pkts(dd, ppd, pq, |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 092b0bb1bb78..9bcfbd842980 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
| @@ -662,7 +662,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
| 662 | mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); | 662 | mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); |
| 663 | if (mcast == NULL) | 663 | if (mcast == NULL) |
| 664 | goto drop; | 664 | goto drop; |
| 665 | ibp->n_multicast_rcv++; | 665 | this_cpu_inc(ibp->pmastats->n_multicast_rcv); |
| 666 | list_for_each_entry_rcu(p, &mcast->qp_list, list) | 666 | list_for_each_entry_rcu(p, &mcast->qp_list, list) |
| 667 | qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); | 667 | qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); |
| 668 | /* | 668 | /* |
| @@ -678,8 +678,8 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
| 678 | &rcd->lookaside_qp->refcount)) | 678 | &rcd->lookaside_qp->refcount)) |
| 679 | wake_up( | 679 | wake_up( |
| 680 | &rcd->lookaside_qp->wait); | 680 | &rcd->lookaside_qp->wait); |
| 681 | rcd->lookaside_qp = NULL; | 681 | rcd->lookaside_qp = NULL; |
| 682 | } | 682 | } |
| 683 | } | 683 | } |
| 684 | if (!rcd->lookaside_qp) { | 684 | if (!rcd->lookaside_qp) { |
| 685 | qp = qib_lookup_qpn(ibp, qp_num); | 685 | qp = qib_lookup_qpn(ibp, qp_num); |
| @@ -689,7 +689,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
| 689 | rcd->lookaside_qpn = qp_num; | 689 | rcd->lookaside_qpn = qp_num; |
| 690 | } else | 690 | } else |
| 691 | qp = rcd->lookaside_qp; | 691 | qp = rcd->lookaside_qp; |
| 692 | ibp->n_unicast_rcv++; | 692 | this_cpu_inc(ibp->pmastats->n_unicast_rcv); |
| 693 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); | 693 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); |
| 694 | } | 694 | } |
| 695 | return; | 695 | return; |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index a01c7d2cf541..bfc8948fdd35 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -664,6 +664,13 @@ struct qib_opcode_stats_perctx { | |||
| 664 | struct qib_opcode_stats stats[128]; | 664 | struct qib_opcode_stats stats[128]; |
| 665 | }; | 665 | }; |
| 666 | 666 | ||
| 667 | struct qib_pma_counters { | ||
| 668 | u64 n_unicast_xmit; /* total unicast packets sent */ | ||
| 669 | u64 n_unicast_rcv; /* total unicast packets received */ | ||
| 670 | u64 n_multicast_xmit; /* total multicast packets sent */ | ||
| 671 | u64 n_multicast_rcv; /* total multicast packets received */ | ||
| 672 | }; | ||
| 673 | |||
| 667 | struct qib_ibport { | 674 | struct qib_ibport { |
| 668 | struct qib_qp __rcu *qp0; | 675 | struct qib_qp __rcu *qp0; |
| 669 | struct qib_qp __rcu *qp1; | 676 | struct qib_qp __rcu *qp1; |
| @@ -680,10 +687,11 @@ struct qib_ibport { | |||
| 680 | __be64 mkey; | 687 | __be64 mkey; |
| 681 | __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ | 688 | __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ |
| 682 | u64 tid; /* TID for traps */ | 689 | u64 tid; /* TID for traps */ |
| 683 | u64 n_unicast_xmit; /* total unicast packets sent */ | 690 | struct qib_pma_counters __percpu *pmastats; |
| 684 | u64 n_unicast_rcv; /* total unicast packets received */ | 691 | u64 z_unicast_xmit; /* starting count for PMA */ |
| 685 | u64 n_multicast_xmit; /* total multicast packets sent */ | 692 | u64 z_unicast_rcv; /* starting count for PMA */ |
| 686 | u64 n_multicast_rcv; /* total multicast packets received */ | 693 | u64 z_multicast_xmit; /* starting count for PMA */ |
| 694 | u64 z_multicast_rcv; /* starting count for PMA */ | ||
| 687 | u64 z_symbol_error_counter; /* starting count for PMA */ | 695 | u64 z_symbol_error_counter; /* starting count for PMA */ |
| 688 | u64 z_link_error_recovery_counter; /* starting count for PMA */ | 696 | u64 z_link_error_recovery_counter; /* starting count for PMA */ |
| 689 | u64 z_link_downed_counter; /* starting count for PMA */ | 697 | u64 z_link_downed_counter; /* starting count for PMA */ |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 16755cdab2c0..801a1d6937e4 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -286,7 +286,7 @@ iter_chunk: | |||
| 286 | err = iommu_map(pd->domain, va_start, pa_start, | 286 | err = iommu_map(pd->domain, va_start, pa_start, |
| 287 | size, flags); | 287 | size, flags); |
| 288 | if (err) { | 288 | if (err) { |
| 289 | usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n", | 289 | usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", |
| 290 | va_start, &pa_start, size, err); | 290 | va_start, &pa_start, size, err); |
| 291 | goto err_out; | 291 | goto err_out; |
| 292 | } | 292 | } |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index dd03cfe596d6..25f195ef44b0 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * Copyright (C) 2004 Alex Aizman | 5 | * Copyright (C) 2004 Alex Aizman |
| 6 | * Copyright (C) 2005 Mike Christie | 6 | * Copyright (C) 2005 Mike Christie |
| 7 | * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. | 7 | * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. |
| 8 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. | 8 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
| 9 | * maintained by openib-general@openib.org | 9 | * maintained by openib-general@openib.org |
| 10 | * | 10 | * |
| 11 | * This software is available to you under a choice of one of two | 11 | * This software is available to you under a choice of one of two |
| @@ -82,6 +82,8 @@ static unsigned int iscsi_max_lun = 512; | |||
| 82 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); | 82 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); |
| 83 | 83 | ||
| 84 | int iser_debug_level = 0; | 84 | int iser_debug_level = 0; |
| 85 | bool iser_pi_enable = false; | ||
| 86 | int iser_pi_guard = 0; | ||
| 85 | 87 | ||
| 86 | MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); | 88 | MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); |
| 87 | MODULE_LICENSE("Dual BSD/GPL"); | 89 | MODULE_LICENSE("Dual BSD/GPL"); |
| @@ -91,6 +93,12 @@ MODULE_VERSION(DRV_VER); | |||
| 91 | module_param_named(debug_level, iser_debug_level, int, 0644); | 93 | module_param_named(debug_level, iser_debug_level, int, 0644); |
| 92 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); | 94 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); |
| 93 | 95 | ||
| 96 | module_param_named(pi_enable, iser_pi_enable, bool, 0644); | ||
| 97 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | ||
| 98 | |||
| 99 | module_param_named(pi_guard, iser_pi_guard, int, 0644); | ||
| 100 | MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); | ||
| 101 | |||
| 94 | struct iser_global ig; | 102 | struct iser_global ig; |
| 95 | 103 | ||
| 96 | void | 104 | void |
| @@ -138,8 +146,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) | |||
| 138 | int iser_initialize_task_headers(struct iscsi_task *task, | 146 | int iser_initialize_task_headers(struct iscsi_task *task, |
| 139 | struct iser_tx_desc *tx_desc) | 147 | struct iser_tx_desc *tx_desc) |
| 140 | { | 148 | { |
| 141 | struct iscsi_iser_conn *iser_conn = task->conn->dd_data; | 149 | struct iser_conn *ib_conn = task->conn->dd_data; |
| 142 | struct iser_device *device = iser_conn->ib_conn->device; | 150 | struct iser_device *device = ib_conn->device; |
| 143 | struct iscsi_iser_task *iser_task = task->dd_data; | 151 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 144 | u64 dma_addr; | 152 | u64 dma_addr; |
| 145 | 153 | ||
| @@ -153,7 +161,7 @@ int iser_initialize_task_headers(struct iscsi_task *task, | |||
| 153 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; | 161 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
| 154 | tx_desc->tx_sg[0].lkey = device->mr->lkey; | 162 | tx_desc->tx_sg[0].lkey = device->mr->lkey; |
| 155 | 163 | ||
| 156 | iser_task->iser_conn = iser_conn; | 164 | iser_task->ib_conn = ib_conn; |
| 157 | return 0; | 165 | return 0; |
| 158 | } | 166 | } |
| 159 | /** | 167 | /** |
| @@ -176,6 +184,8 @@ iscsi_iser_task_init(struct iscsi_task *task) | |||
| 176 | 184 | ||
| 177 | iser_task->command_sent = 0; | 185 | iser_task->command_sent = 0; |
| 178 | iser_task_rdma_init(iser_task); | 186 | iser_task_rdma_init(iser_task); |
| 187 | iser_task->sc = task->sc; | ||
| 188 | |||
| 179 | return 0; | 189 | return 0; |
| 180 | } | 190 | } |
| 181 | 191 | ||
| @@ -278,10 +288,9 @@ iscsi_iser_task_xmit(struct iscsi_task *task) | |||
| 278 | static void iscsi_iser_cleanup_task(struct iscsi_task *task) | 288 | static void iscsi_iser_cleanup_task(struct iscsi_task *task) |
| 279 | { | 289 | { |
| 280 | struct iscsi_iser_task *iser_task = task->dd_data; | 290 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 281 | struct iser_tx_desc *tx_desc = &iser_task->desc; | 291 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
| 282 | 292 | struct iser_conn *ib_conn = task->conn->dd_data; | |
| 283 | struct iscsi_iser_conn *iser_conn = task->conn->dd_data; | 293 | struct iser_device *device = ib_conn->device; |
| 284 | struct iser_device *device = iser_conn->ib_conn->device; | ||
| 285 | 294 | ||
| 286 | ib_dma_unmap_single(device->ib_device, | 295 | ib_dma_unmap_single(device->ib_device, |
| 287 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); | 296 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); |
| @@ -296,14 +305,25 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) | |||
| 296 | } | 305 | } |
| 297 | } | 306 | } |
| 298 | 307 | ||
| 308 | static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) | ||
| 309 | { | ||
| 310 | struct iscsi_iser_task *iser_task = task->dd_data; | ||
| 311 | |||
| 312 | if (iser_task->dir[ISER_DIR_IN]) | ||
| 313 | return iser_check_task_pi_status(iser_task, ISER_DIR_IN, | ||
| 314 | sector); | ||
| 315 | else | ||
| 316 | return iser_check_task_pi_status(iser_task, ISER_DIR_OUT, | ||
| 317 | sector); | ||
| 318 | } | ||
| 319 | |||
| 299 | static struct iscsi_cls_conn * | 320 | static struct iscsi_cls_conn * |
| 300 | iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | 321 | iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) |
| 301 | { | 322 | { |
| 302 | struct iscsi_conn *conn; | 323 | struct iscsi_conn *conn; |
| 303 | struct iscsi_cls_conn *cls_conn; | 324 | struct iscsi_cls_conn *cls_conn; |
| 304 | struct iscsi_iser_conn *iser_conn; | ||
| 305 | 325 | ||
| 306 | cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx); | 326 | cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx); |
| 307 | if (!cls_conn) | 327 | if (!cls_conn) |
| 308 | return NULL; | 328 | return NULL; |
| 309 | conn = cls_conn->dd_data; | 329 | conn = cls_conn->dd_data; |
| @@ -314,10 +334,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
| 314 | */ | 334 | */ |
| 315 | conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; | 335 | conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; |
| 316 | 336 | ||
| 317 | iser_conn = conn->dd_data; | ||
| 318 | conn->dd_data = iser_conn; | ||
| 319 | iser_conn->iscsi_conn = conn; | ||
| 320 | |||
| 321 | return cls_conn; | 337 | return cls_conn; |
| 322 | } | 338 | } |
| 323 | 339 | ||
| @@ -325,8 +341,7 @@ static void | |||
| 325 | iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | 341 | iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) |
| 326 | { | 342 | { |
| 327 | struct iscsi_conn *conn = cls_conn->dd_data; | 343 | struct iscsi_conn *conn = cls_conn->dd_data; |
| 328 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 344 | struct iser_conn *ib_conn = conn->dd_data; |
| 329 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
| 330 | 345 | ||
| 331 | iscsi_conn_teardown(cls_conn); | 346 | iscsi_conn_teardown(cls_conn); |
| 332 | /* | 347 | /* |
| @@ -335,7 +350,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
| 335 | * we free it here. | 350 | * we free it here. |
| 336 | */ | 351 | */ |
| 337 | if (ib_conn) { | 352 | if (ib_conn) { |
| 338 | ib_conn->iser_conn = NULL; | 353 | ib_conn->iscsi_conn = NULL; |
| 339 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | 354 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ |
| 340 | } | 355 | } |
| 341 | } | 356 | } |
| @@ -346,7 +361,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 346 | int is_leading) | 361 | int is_leading) |
| 347 | { | 362 | { |
| 348 | struct iscsi_conn *conn = cls_conn->dd_data; | 363 | struct iscsi_conn *conn = cls_conn->dd_data; |
| 349 | struct iscsi_iser_conn *iser_conn; | ||
| 350 | struct iscsi_session *session; | 364 | struct iscsi_session *session; |
| 351 | struct iser_conn *ib_conn; | 365 | struct iser_conn *ib_conn; |
| 352 | struct iscsi_endpoint *ep; | 366 | struct iscsi_endpoint *ep; |
| @@ -373,11 +387,11 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
| 373 | /* binds the iSER connection retrieved from the previously | 387 | /* binds the iSER connection retrieved from the previously |
| 374 | * connected ep_handle to the iSCSI layer connection. exchanges | 388 | * connected ep_handle to the iSCSI layer connection. exchanges |
| 375 | * connection pointers */ | 389 | * connection pointers */ |
| 376 | iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n", | 390 | iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn); |
| 377 | conn, conn->dd_data, ib_conn); | 391 | |
| 378 | iser_conn = conn->dd_data; | 392 | conn->dd_data = ib_conn; |
| 379 | ib_conn->iser_conn = iser_conn; | 393 | ib_conn->iscsi_conn = conn; |
| 380 | iser_conn->ib_conn = ib_conn; | 394 | |
| 381 | iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ | 395 | iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ |
| 382 | return 0; | 396 | return 0; |
| 383 | } | 397 | } |
| @@ -386,8 +400,7 @@ static void | |||
| 386 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | 400 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) |
| 387 | { | 401 | { |
| 388 | struct iscsi_conn *conn = cls_conn->dd_data; | 402 | struct iscsi_conn *conn = cls_conn->dd_data; |
| 389 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 403 | struct iser_conn *ib_conn = conn->dd_data; |
| 390 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
| 391 | 404 | ||
| 392 | /* | 405 | /* |
| 393 | * Userspace may have goofed up and not bound the connection or | 406 | * Userspace may have goofed up and not bound the connection or |
| @@ -401,7 +414,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
| 401 | */ | 414 | */ |
| 402 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | 415 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ |
| 403 | } | 416 | } |
| 404 | iser_conn->ib_conn = NULL; | 417 | conn->dd_data = NULL; |
| 405 | } | 418 | } |
| 406 | 419 | ||
| 407 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | 420 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
| @@ -413,6 +426,17 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | |||
| 413 | iscsi_host_free(shost); | 426 | iscsi_host_free(shost); |
| 414 | } | 427 | } |
| 415 | 428 | ||
| 429 | static inline unsigned int | ||
| 430 | iser_dif_prot_caps(int prot_caps) | ||
| 431 | { | ||
| 432 | return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION | | ||
| 433 | SHOST_DIX_TYPE1_PROTECTION : 0) | | ||
| 434 | ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION | | ||
| 435 | SHOST_DIX_TYPE2_PROTECTION : 0) | | ||
| 436 | ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION | | ||
| 437 | SHOST_DIX_TYPE3_PROTECTION : 0); | ||
| 438 | } | ||
| 439 | |||
| 416 | static struct iscsi_cls_session * | 440 | static struct iscsi_cls_session * |
| 417 | iscsi_iser_session_create(struct iscsi_endpoint *ep, | 441 | iscsi_iser_session_create(struct iscsi_endpoint *ep, |
| 418 | uint16_t cmds_max, uint16_t qdepth, | 442 | uint16_t cmds_max, uint16_t qdepth, |
| @@ -437,8 +461,18 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
| 437 | * older userspace tools (before 2.0-870) did not pass us | 461 | * older userspace tools (before 2.0-870) did not pass us |
| 438 | * the leading conn's ep so this will be NULL; | 462 | * the leading conn's ep so this will be NULL; |
| 439 | */ | 463 | */ |
| 440 | if (ep) | 464 | if (ep) { |
| 441 | ib_conn = ep->dd_data; | 465 | ib_conn = ep->dd_data; |
| 466 | if (ib_conn->pi_support) { | ||
| 467 | u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap; | ||
| 468 | |||
| 469 | scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); | ||
| 470 | if (iser_pi_guard) | ||
| 471 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); | ||
| 472 | else | ||
| 473 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); | ||
| 474 | } | ||
| 475 | } | ||
| 442 | 476 | ||
| 443 | if (iscsi_host_add(shost, | 477 | if (iscsi_host_add(shost, |
| 444 | ep ? ib_conn->device->ib_device->dma_device : NULL)) | 478 | ep ? ib_conn->device->ib_device->dma_device : NULL)) |
| @@ -618,7 +652,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 618 | struct iser_conn *ib_conn; | 652 | struct iser_conn *ib_conn; |
| 619 | 653 | ||
| 620 | ib_conn = ep->dd_data; | 654 | ib_conn = ep->dd_data; |
| 621 | if (ib_conn->iser_conn) | 655 | if (ib_conn->iscsi_conn) |
| 622 | /* | 656 | /* |
| 623 | * Must suspend xmit path if the ep is bound to the | 657 | * Must suspend xmit path if the ep is bound to the |
| 624 | * iscsi_conn, so we know we are not accessing the ib_conn | 658 | * iscsi_conn, so we know we are not accessing the ib_conn |
| @@ -626,7 +660,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 626 | * | 660 | * |
| 627 | * This may not be bound if the ep poll failed. | 661 | * This may not be bound if the ep poll failed. |
| 628 | */ | 662 | */ |
| 629 | iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); | 663 | iscsi_suspend_tx(ib_conn->iscsi_conn); |
| 630 | 664 | ||
| 631 | 665 | ||
| 632 | iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); | 666 | iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); |
| @@ -732,6 +766,7 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
| 732 | .xmit_task = iscsi_iser_task_xmit, | 766 | .xmit_task = iscsi_iser_task_xmit, |
| 733 | .cleanup_task = iscsi_iser_cleanup_task, | 767 | .cleanup_task = iscsi_iser_cleanup_task, |
| 734 | .alloc_pdu = iscsi_iser_pdu_alloc, | 768 | .alloc_pdu = iscsi_iser_pdu_alloc, |
| 769 | .check_protection = iscsi_iser_check_protection, | ||
| 735 | /* recovery */ | 770 | /* recovery */ |
| 736 | .session_recovery_timedout = iscsi_session_recovery_timedout, | 771 | .session_recovery_timedout = iscsi_session_recovery_timedout, |
| 737 | 772 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 67914027c614..324129f80d40 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * | 8 | * |
| 9 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. | 9 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
| 10 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. | 10 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
| 11 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. | 11 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
| 12 | * | 12 | * |
| 13 | * This software is available to you under a choice of one of two | 13 | * This software is available to you under a choice of one of two |
| 14 | * licenses. You may choose to be licensed under the terms of the GNU | 14 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -46,6 +46,8 @@ | |||
| 46 | #include <linux/printk.h> | 46 | #include <linux/printk.h> |
| 47 | #include <scsi/libiscsi.h> | 47 | #include <scsi/libiscsi.h> |
| 48 | #include <scsi/scsi_transport_iscsi.h> | 48 | #include <scsi/scsi_transport_iscsi.h> |
| 49 | #include <scsi/scsi_cmnd.h> | ||
| 50 | #include <scsi/scsi_device.h> | ||
| 49 | 51 | ||
| 50 | #include <linux/interrupt.h> | 52 | #include <linux/interrupt.h> |
| 51 | #include <linux/wait.h> | 53 | #include <linux/wait.h> |
| @@ -67,7 +69,7 @@ | |||
| 67 | 69 | ||
| 68 | #define DRV_NAME "iser" | 70 | #define DRV_NAME "iser" |
| 69 | #define PFX DRV_NAME ": " | 71 | #define PFX DRV_NAME ": " |
| 70 | #define DRV_VER "1.1" | 72 | #define DRV_VER "1.3" |
| 71 | 73 | ||
| 72 | #define iser_dbg(fmt, arg...) \ | 74 | #define iser_dbg(fmt, arg...) \ |
| 73 | do { \ | 75 | do { \ |
| @@ -134,10 +136,21 @@ | |||
| 134 | ISER_MAX_TX_MISC_PDUS + \ | 136 | ISER_MAX_TX_MISC_PDUS + \ |
| 135 | ISER_MAX_RX_MISC_PDUS) | 137 | ISER_MAX_RX_MISC_PDUS) |
| 136 | 138 | ||
| 139 | /* Max registration work requests per command */ | ||
| 140 | #define ISER_MAX_REG_WR_PER_CMD 5 | ||
| 141 | |||
| 142 | /* For Signature we don't support DATAOUTs so no need to make room for them */ | ||
| 143 | #define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ | ||
| 144 | (1 + ISER_MAX_REG_WR_PER_CMD) + \ | ||
| 145 | ISER_MAX_TX_MISC_PDUS + \ | ||
| 146 | ISER_MAX_RX_MISC_PDUS) | ||
| 147 | |||
| 137 | #define ISER_VER 0x10 | 148 | #define ISER_VER 0x10 |
| 138 | #define ISER_WSV 0x08 | 149 | #define ISER_WSV 0x08 |
| 139 | #define ISER_RSV 0x04 | 150 | #define ISER_RSV 0x04 |
| 140 | 151 | ||
| 152 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | ||
| 153 | |||
| 141 | struct iser_hdr { | 154 | struct iser_hdr { |
| 142 | u8 flags; | 155 | u8 flags; |
| 143 | u8 rsvd[3]; | 156 | u8 rsvd[3]; |
| @@ -201,7 +214,6 @@ struct iser_data_buf { | |||
| 201 | /* fwd declarations */ | 214 | /* fwd declarations */ |
| 202 | struct iser_device; | 215 | struct iser_device; |
| 203 | struct iser_cq_desc; | 216 | struct iser_cq_desc; |
| 204 | struct iscsi_iser_conn; | ||
| 205 | struct iscsi_iser_task; | 217 | struct iscsi_iser_task; |
| 206 | struct iscsi_endpoint; | 218 | struct iscsi_endpoint; |
| 207 | 219 | ||
| @@ -258,6 +270,7 @@ struct iscsi_iser_task; | |||
| 258 | struct iser_device { | 270 | struct iser_device { |
| 259 | struct ib_device *ib_device; | 271 | struct ib_device *ib_device; |
| 260 | struct ib_pd *pd; | 272 | struct ib_pd *pd; |
| 273 | struct ib_device_attr dev_attr; | ||
| 261 | struct ib_cq *rx_cq[ISER_MAX_CQ]; | 274 | struct ib_cq *rx_cq[ISER_MAX_CQ]; |
| 262 | struct ib_cq *tx_cq[ISER_MAX_CQ]; | 275 | struct ib_cq *tx_cq[ISER_MAX_CQ]; |
| 263 | struct ib_mr *mr; | 276 | struct ib_mr *mr; |
| @@ -277,17 +290,35 @@ struct iser_device { | |||
| 277 | enum iser_data_dir cmd_dir); | 290 | enum iser_data_dir cmd_dir); |
| 278 | }; | 291 | }; |
| 279 | 292 | ||
| 293 | #define ISER_CHECK_GUARD 0xc0 | ||
| 294 | #define ISER_CHECK_REFTAG 0x0f | ||
| 295 | #define ISER_CHECK_APPTAG 0x30 | ||
| 296 | |||
| 297 | enum iser_reg_indicator { | ||
| 298 | ISER_DATA_KEY_VALID = 1 << 0, | ||
| 299 | ISER_PROT_KEY_VALID = 1 << 1, | ||
| 300 | ISER_SIG_KEY_VALID = 1 << 2, | ||
| 301 | ISER_FASTREG_PROTECTED = 1 << 3, | ||
| 302 | }; | ||
| 303 | |||
| 304 | struct iser_pi_context { | ||
| 305 | struct ib_mr *prot_mr; | ||
| 306 | struct ib_fast_reg_page_list *prot_frpl; | ||
| 307 | struct ib_mr *sig_mr; | ||
| 308 | }; | ||
| 309 | |||
| 280 | struct fast_reg_descriptor { | 310 | struct fast_reg_descriptor { |
| 281 | struct list_head list; | 311 | struct list_head list; |
| 282 | /* For fast registration - FRWR */ | 312 | /* For fast registration - FRWR */ |
| 283 | struct ib_mr *data_mr; | 313 | struct ib_mr *data_mr; |
| 284 | struct ib_fast_reg_page_list *data_frpl; | 314 | struct ib_fast_reg_page_list *data_frpl; |
| 285 | /* Valid for fast registration flag */ | 315 | struct iser_pi_context *pi_ctx; |
| 286 | bool valid; | 316 | /* registration indicators container */ |
| 317 | u8 reg_indicators; | ||
| 287 | }; | 318 | }; |
| 288 | 319 | ||
| 289 | struct iser_conn { | 320 | struct iser_conn { |
| 290 | struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ | 321 | struct iscsi_conn *iscsi_conn; |
| 291 | struct iscsi_endpoint *ep; | 322 | struct iscsi_endpoint *ep; |
| 292 | enum iser_ib_conn_state state; /* rdma connection state */ | 323 | enum iser_ib_conn_state state; /* rdma connection state */ |
| 293 | atomic_t refcount; | 324 | atomic_t refcount; |
| @@ -310,6 +341,9 @@ struct iser_conn { | |||
| 310 | unsigned int rx_desc_head; | 341 | unsigned int rx_desc_head; |
| 311 | struct iser_rx_desc *rx_descs; | 342 | struct iser_rx_desc *rx_descs; |
| 312 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; | 343 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; |
| 344 | bool pi_support; | ||
| 345 | |||
| 346 | /* Connection memory registration pool */ | ||
| 313 | union { | 347 | union { |
| 314 | struct { | 348 | struct { |
| 315 | struct ib_fmr_pool *pool; /* pool of IB FMRs */ | 349 | struct ib_fmr_pool *pool; /* pool of IB FMRs */ |
| @@ -319,24 +353,22 @@ struct iser_conn { | |||
| 319 | struct { | 353 | struct { |
| 320 | struct list_head pool; | 354 | struct list_head pool; |
| 321 | int pool_size; | 355 | int pool_size; |
| 322 | } frwr; | 356 | } fastreg; |
| 323 | } fastreg; | 357 | }; |
| 324 | }; | ||
| 325 | |||
| 326 | struct iscsi_iser_conn { | ||
| 327 | struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ | ||
| 328 | struct iser_conn *ib_conn; /* iSER IB conn */ | ||
| 329 | }; | 358 | }; |
| 330 | 359 | ||
| 331 | struct iscsi_iser_task { | 360 | struct iscsi_iser_task { |
| 332 | struct iser_tx_desc desc; | 361 | struct iser_tx_desc desc; |
| 333 | struct iscsi_iser_conn *iser_conn; | 362 | struct iser_conn *ib_conn; |
| 334 | enum iser_task_status status; | 363 | enum iser_task_status status; |
| 364 | struct scsi_cmnd *sc; | ||
| 335 | int command_sent; /* set if command sent */ | 365 | int command_sent; /* set if command sent */ |
| 336 | int dir[ISER_DIRS_NUM]; /* set if dir use*/ | 366 | int dir[ISER_DIRS_NUM]; /* set if dir use*/ |
| 337 | struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ | 367 | struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ |
| 338 | struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ | 368 | struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ |
| 339 | struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ | 369 | struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ |
| 370 | struct iser_data_buf prot[ISER_DIRS_NUM]; /* prot desc */ | ||
| 371 | struct iser_data_buf prot_copy[ISER_DIRS_NUM];/* prot copy */ | ||
| 340 | }; | 372 | }; |
| 341 | 373 | ||
| 342 | struct iser_page_vec { | 374 | struct iser_page_vec { |
| @@ -362,6 +394,8 @@ struct iser_global { | |||
| 362 | 394 | ||
| 363 | extern struct iser_global ig; | 395 | extern struct iser_global ig; |
| 364 | extern int iser_debug_level; | 396 | extern int iser_debug_level; |
| 397 | extern bool iser_pi_enable; | ||
| 398 | extern int iser_pi_guard; | ||
| 365 | 399 | ||
| 366 | /* allocate connection resources needed for rdma functionality */ | 400 | /* allocate connection resources needed for rdma functionality */ |
| 367 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); | 401 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); |
| @@ -401,13 +435,15 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task); | |||
| 401 | 435 | ||
| 402 | void iser_free_rx_descriptors(struct iser_conn *ib_conn); | 436 | void iser_free_rx_descriptors(struct iser_conn *ib_conn); |
| 403 | 437 | ||
| 404 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, | 438 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
| 405 | enum iser_data_dir cmd_dir); | 439 | struct iser_data_buf *mem, |
| 440 | struct iser_data_buf *mem_copy, | ||
| 441 | enum iser_data_dir cmd_dir); | ||
| 406 | 442 | ||
| 407 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, | 443 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, |
| 408 | enum iser_data_dir cmd_dir); | 444 | enum iser_data_dir cmd_dir); |
| 409 | int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task, | 445 | int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, |
| 410 | enum iser_data_dir cmd_dir); | 446 | enum iser_data_dir cmd_dir); |
| 411 | 447 | ||
| 412 | int iser_connect(struct iser_conn *ib_conn, | 448 | int iser_connect(struct iser_conn *ib_conn, |
| 413 | struct sockaddr_in *src_addr, | 449 | struct sockaddr_in *src_addr, |
| @@ -420,8 +456,8 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, | |||
| 420 | 456 | ||
| 421 | void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, | 457 | void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, |
| 422 | enum iser_data_dir cmd_dir); | 458 | enum iser_data_dir cmd_dir); |
| 423 | void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, | 459 | void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, |
| 424 | enum iser_data_dir cmd_dir); | 460 | enum iser_data_dir cmd_dir); |
| 425 | 461 | ||
| 426 | int iser_post_recvl(struct iser_conn *ib_conn); | 462 | int iser_post_recvl(struct iser_conn *ib_conn); |
| 427 | int iser_post_recvm(struct iser_conn *ib_conn, int count); | 463 | int iser_post_recvm(struct iser_conn *ib_conn, int count); |
| @@ -432,12 +468,15 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | |||
| 432 | enum iser_data_dir iser_dir, | 468 | enum iser_data_dir iser_dir, |
| 433 | enum dma_data_direction dma_dir); | 469 | enum dma_data_direction dma_dir); |
| 434 | 470 | ||
| 435 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); | 471 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, |
| 472 | struct iser_data_buf *data); | ||
| 436 | int iser_initialize_task_headers(struct iscsi_task *task, | 473 | int iser_initialize_task_headers(struct iscsi_task *task, |
| 437 | struct iser_tx_desc *tx_desc); | 474 | struct iser_tx_desc *tx_desc); |
| 438 | int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); | 475 | int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); |
| 439 | int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); | 476 | int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); |
| 440 | void iser_free_fmr_pool(struct iser_conn *ib_conn); | 477 | void iser_free_fmr_pool(struct iser_conn *ib_conn); |
| 441 | int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max); | 478 | int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max); |
| 442 | void iser_free_frwr_pool(struct iser_conn *ib_conn); | 479 | void iser_free_fastreg_pool(struct iser_conn *ib_conn); |
| 480 | u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, | ||
| 481 | enum iser_data_dir cmd_dir, sector_t *sector); | ||
| 443 | #endif | 482 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 334f34b1cd46..2e2d903db838 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
| 3 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
| 4 | * | 4 | * |
| 5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, | |||
| 49 | 49 | ||
| 50 | { | 50 | { |
| 51 | struct iscsi_iser_task *iser_task = task->dd_data; | 51 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 52 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | 52 | struct iser_device *device = iser_task->ib_conn->device; |
| 53 | struct iser_regd_buf *regd_buf; | 53 | struct iser_regd_buf *regd_buf; |
| 54 | int err; | 54 | int err; |
| 55 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 55 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
| @@ -62,11 +62,22 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, | |||
| 62 | if (err) | 62 | if (err) |
| 63 | return err; | 63 | return err; |
| 64 | 64 | ||
| 65 | if (scsi_prot_sg_count(iser_task->sc)) { | ||
| 66 | struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; | ||
| 67 | |||
| 68 | err = iser_dma_map_task_data(iser_task, | ||
| 69 | pbuf_in, | ||
| 70 | ISER_DIR_IN, | ||
| 71 | DMA_FROM_DEVICE); | ||
| 72 | if (err) | ||
| 73 | return err; | ||
| 74 | } | ||
| 75 | |||
| 65 | if (edtl > iser_task->data[ISER_DIR_IN].data_len) { | 76 | if (edtl > iser_task->data[ISER_DIR_IN].data_len) { |
| 66 | iser_err("Total data length: %ld, less than EDTL: " | 77 | iser_err("Total data length: %ld, less than EDTL: " |
| 67 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", | 78 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", |
| 68 | iser_task->data[ISER_DIR_IN].data_len, edtl, | 79 | iser_task->data[ISER_DIR_IN].data_len, edtl, |
| 69 | task->itt, iser_task->iser_conn); | 80 | task->itt, iser_task->ib_conn); |
| 70 | return -EINVAL; | 81 | return -EINVAL; |
| 71 | } | 82 | } |
| 72 | 83 | ||
| @@ -99,7 +110,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
| 99 | unsigned int edtl) | 110 | unsigned int edtl) |
| 100 | { | 111 | { |
| 101 | struct iscsi_iser_task *iser_task = task->dd_data; | 112 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 102 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | 113 | struct iser_device *device = iser_task->ib_conn->device; |
| 103 | struct iser_regd_buf *regd_buf; | 114 | struct iser_regd_buf *regd_buf; |
| 104 | int err; | 115 | int err; |
| 105 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 116 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
| @@ -113,6 +124,17 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
| 113 | if (err) | 124 | if (err) |
| 114 | return err; | 125 | return err; |
| 115 | 126 | ||
| 127 | if (scsi_prot_sg_count(iser_task->sc)) { | ||
| 128 | struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; | ||
| 129 | |||
| 130 | err = iser_dma_map_task_data(iser_task, | ||
| 131 | pbuf_out, | ||
| 132 | ISER_DIR_OUT, | ||
| 133 | DMA_TO_DEVICE); | ||
| 134 | if (err) | ||
| 135 | return err; | ||
| 136 | } | ||
| 137 | |||
| 116 | if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { | 138 | if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { |
| 117 | iser_err("Total data length: %ld, less than EDTL: %d, " | 139 | iser_err("Total data length: %ld, less than EDTL: %d, " |
| 118 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", | 140 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", |
| @@ -327,7 +349,7 @@ free_login_buf: | |||
| 327 | 349 | ||
| 328 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | 350 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) |
| 329 | { | 351 | { |
| 330 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 352 | struct iser_conn *ib_conn = conn->dd_data; |
| 331 | struct iscsi_session *session = conn->session; | 353 | struct iscsi_session *session = conn->session; |
| 332 | 354 | ||
| 333 | iser_dbg("req op %x flags %x\n", req->opcode, req->flags); | 355 | iser_dbg("req op %x flags %x\n", req->opcode, req->flags); |
| @@ -340,19 +362,18 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | |||
| 340 | * response) and no posted send buffers left - they must have been | 362 | * response) and no posted send buffers left - they must have been |
| 341 | * consumed during previous login phases. | 363 | * consumed during previous login phases. |
| 342 | */ | 364 | */ |
| 343 | WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); | 365 | WARN_ON(ib_conn->post_recv_buf_count != 1); |
| 344 | WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); | 366 | WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0); |
| 345 | 367 | ||
| 346 | if (session->discovery_sess) { | 368 | if (session->discovery_sess) { |
| 347 | iser_info("Discovery session, re-using login RX buffer\n"); | 369 | iser_info("Discovery session, re-using login RX buffer\n"); |
| 348 | return 0; | 370 | return 0; |
| 349 | } else | 371 | } else |
| 350 | iser_info("Normal session, posting batch of RX %d buffers\n", | 372 | iser_info("Normal session, posting batch of RX %d buffers\n", |
| 351 | iser_conn->ib_conn->min_posted_rx); | 373 | ib_conn->min_posted_rx); |
| 352 | 374 | ||
| 353 | /* Initial post receive buffers */ | 375 | /* Initial post receive buffers */ |
| 354 | if (iser_post_recvm(iser_conn->ib_conn, | 376 | if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx)) |
| 355 | iser_conn->ib_conn->min_posted_rx)) | ||
| 356 | return -ENOMEM; | 377 | return -ENOMEM; |
| 357 | 378 | ||
| 358 | return 0; | 379 | return 0; |
| @@ -364,11 +385,11 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | |||
| 364 | int iser_send_command(struct iscsi_conn *conn, | 385 | int iser_send_command(struct iscsi_conn *conn, |
| 365 | struct iscsi_task *task) | 386 | struct iscsi_task *task) |
| 366 | { | 387 | { |
| 367 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 388 | struct iser_conn *ib_conn = conn->dd_data; |
| 368 | struct iscsi_iser_task *iser_task = task->dd_data; | 389 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 369 | unsigned long edtl; | 390 | unsigned long edtl; |
| 370 | int err; | 391 | int err; |
| 371 | struct iser_data_buf *data_buf; | 392 | struct iser_data_buf *data_buf, *prot_buf; |
| 372 | struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; | 393 | struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; |
| 373 | struct scsi_cmnd *sc = task->sc; | 394 | struct scsi_cmnd *sc = task->sc; |
| 374 | struct iser_tx_desc *tx_desc = &iser_task->desc; | 395 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
| @@ -377,20 +398,28 @@ int iser_send_command(struct iscsi_conn *conn, | |||
| 377 | 398 | ||
| 378 | /* build the tx desc regd header and add it to the tx desc dto */ | 399 | /* build the tx desc regd header and add it to the tx desc dto */ |
| 379 | tx_desc->type = ISCSI_TX_SCSI_COMMAND; | 400 | tx_desc->type = ISCSI_TX_SCSI_COMMAND; |
| 380 | iser_create_send_desc(iser_conn->ib_conn, tx_desc); | 401 | iser_create_send_desc(ib_conn, tx_desc); |
| 381 | 402 | ||
| 382 | if (hdr->flags & ISCSI_FLAG_CMD_READ) | 403 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
| 383 | data_buf = &iser_task->data[ISER_DIR_IN]; | 404 | data_buf = &iser_task->data[ISER_DIR_IN]; |
| 384 | else | 405 | prot_buf = &iser_task->prot[ISER_DIR_IN]; |
| 406 | } else { | ||
| 385 | data_buf = &iser_task->data[ISER_DIR_OUT]; | 407 | data_buf = &iser_task->data[ISER_DIR_OUT]; |
| 408 | prot_buf = &iser_task->prot[ISER_DIR_OUT]; | ||
| 409 | } | ||
| 386 | 410 | ||
| 387 | if (scsi_sg_count(sc)) { /* using a scatter list */ | 411 | if (scsi_sg_count(sc)) { /* using a scatter list */ |
| 388 | data_buf->buf = scsi_sglist(sc); | 412 | data_buf->buf = scsi_sglist(sc); |
| 389 | data_buf->size = scsi_sg_count(sc); | 413 | data_buf->size = scsi_sg_count(sc); |
| 390 | } | 414 | } |
| 391 | |||
| 392 | data_buf->data_len = scsi_bufflen(sc); | 415 | data_buf->data_len = scsi_bufflen(sc); |
| 393 | 416 | ||
| 417 | if (scsi_prot_sg_count(sc)) { | ||
| 418 | prot_buf->buf = scsi_prot_sglist(sc); | ||
| 419 | prot_buf->size = scsi_prot_sg_count(sc); | ||
| 420 | prot_buf->data_len = sc->prot_sdb->length; | ||
| 421 | } | ||
| 422 | |||
| 394 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { | 423 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
| 395 | err = iser_prepare_read_cmd(task, edtl); | 424 | err = iser_prepare_read_cmd(task, edtl); |
| 396 | if (err) | 425 | if (err) |
| @@ -408,7 +437,7 @@ int iser_send_command(struct iscsi_conn *conn, | |||
| 408 | 437 | ||
| 409 | iser_task->status = ISER_TASK_STATUS_STARTED; | 438 | iser_task->status = ISER_TASK_STATUS_STARTED; |
| 410 | 439 | ||
| 411 | err = iser_post_send(iser_conn->ib_conn, tx_desc); | 440 | err = iser_post_send(ib_conn, tx_desc); |
| 412 | if (!err) | 441 | if (!err) |
| 413 | return 0; | 442 | return 0; |
| 414 | 443 | ||
| @@ -424,7 +453,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
| 424 | struct iscsi_task *task, | 453 | struct iscsi_task *task, |
| 425 | struct iscsi_data *hdr) | 454 | struct iscsi_data *hdr) |
| 426 | { | 455 | { |
| 427 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 456 | struct iser_conn *ib_conn = conn->dd_data; |
| 428 | struct iscsi_iser_task *iser_task = task->dd_data; | 457 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 429 | struct iser_tx_desc *tx_desc = NULL; | 458 | struct iser_tx_desc *tx_desc = NULL; |
| 430 | struct iser_regd_buf *regd_buf; | 459 | struct iser_regd_buf *regd_buf; |
| @@ -473,7 +502,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
| 473 | itt, buf_offset, data_seg_len); | 502 | itt, buf_offset, data_seg_len); |
| 474 | 503 | ||
| 475 | 504 | ||
| 476 | err = iser_post_send(iser_conn->ib_conn, tx_desc); | 505 | err = iser_post_send(ib_conn, tx_desc); |
| 477 | if (!err) | 506 | if (!err) |
| 478 | return 0; | 507 | return 0; |
| 479 | 508 | ||
| @@ -486,19 +515,18 @@ send_data_out_error: | |||
| 486 | int iser_send_control(struct iscsi_conn *conn, | 515 | int iser_send_control(struct iscsi_conn *conn, |
| 487 | struct iscsi_task *task) | 516 | struct iscsi_task *task) |
| 488 | { | 517 | { |
| 489 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 518 | struct iser_conn *ib_conn = conn->dd_data; |
| 490 | struct iscsi_iser_task *iser_task = task->dd_data; | 519 | struct iscsi_iser_task *iser_task = task->dd_data; |
| 491 | struct iser_tx_desc *mdesc = &iser_task->desc; | 520 | struct iser_tx_desc *mdesc = &iser_task->desc; |
| 492 | unsigned long data_seg_len; | 521 | unsigned long data_seg_len; |
| 493 | int err = 0; | 522 | int err = 0; |
| 494 | struct iser_device *device; | 523 | struct iser_device *device; |
| 495 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
| 496 | 524 | ||
| 497 | /* build the tx desc regd header and add it to the tx desc dto */ | 525 | /* build the tx desc regd header and add it to the tx desc dto */ |
| 498 | mdesc->type = ISCSI_TX_CONTROL; | 526 | mdesc->type = ISCSI_TX_CONTROL; |
| 499 | iser_create_send_desc(iser_conn->ib_conn, mdesc); | 527 | iser_create_send_desc(ib_conn, mdesc); |
| 500 | 528 | ||
| 501 | device = iser_conn->ib_conn->device; | 529 | device = ib_conn->device; |
| 502 | 530 | ||
| 503 | data_seg_len = ntoh24(task->hdr->dlength); | 531 | data_seg_len = ntoh24(task->hdr->dlength); |
| 504 | 532 | ||
| @@ -513,14 +541,13 @@ int iser_send_control(struct iscsi_conn *conn, | |||
| 513 | ib_conn->login_req_dma, task->data_count, | 541 | ib_conn->login_req_dma, task->data_count, |
| 514 | DMA_TO_DEVICE); | 542 | DMA_TO_DEVICE); |
| 515 | 543 | ||
| 516 | memcpy(iser_conn->ib_conn->login_req_buf, task->data, | 544 | memcpy(ib_conn->login_req_buf, task->data, task->data_count); |
| 517 | task->data_count); | ||
| 518 | 545 | ||
| 519 | ib_dma_sync_single_for_device(device->ib_device, | 546 | ib_dma_sync_single_for_device(device->ib_device, |
| 520 | ib_conn->login_req_dma, task->data_count, | 547 | ib_conn->login_req_dma, task->data_count, |
| 521 | DMA_TO_DEVICE); | 548 | DMA_TO_DEVICE); |
| 522 | 549 | ||
| 523 | tx_dsg->addr = iser_conn->ib_conn->login_req_dma; | 550 | tx_dsg->addr = ib_conn->login_req_dma; |
| 524 | tx_dsg->length = task->data_count; | 551 | tx_dsg->length = task->data_count; |
| 525 | tx_dsg->lkey = device->mr->lkey; | 552 | tx_dsg->lkey = device->mr->lkey; |
| 526 | mdesc->num_sge = 2; | 553 | mdesc->num_sge = 2; |
| @@ -529,7 +556,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
| 529 | if (task == conn->login_task) { | 556 | if (task == conn->login_task) { |
| 530 | iser_dbg("op %x dsl %lx, posting login rx buffer\n", | 557 | iser_dbg("op %x dsl %lx, posting login rx buffer\n", |
| 531 | task->hdr->opcode, data_seg_len); | 558 | task->hdr->opcode, data_seg_len); |
| 532 | err = iser_post_recvl(iser_conn->ib_conn); | 559 | err = iser_post_recvl(ib_conn); |
| 533 | if (err) | 560 | if (err) |
| 534 | goto send_control_error; | 561 | goto send_control_error; |
| 535 | err = iser_post_rx_bufs(conn, task->hdr); | 562 | err = iser_post_rx_bufs(conn, task->hdr); |
| @@ -537,7 +564,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
| 537 | goto send_control_error; | 564 | goto send_control_error; |
| 538 | } | 565 | } |
| 539 | 566 | ||
| 540 | err = iser_post_send(iser_conn->ib_conn, mdesc); | 567 | err = iser_post_send(ib_conn, mdesc); |
| 541 | if (!err) | 568 | if (!err) |
| 542 | return 0; | 569 | return 0; |
| 543 | 570 | ||
| @@ -553,7 +580,6 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
| 553 | unsigned long rx_xfer_len, | 580 | unsigned long rx_xfer_len, |
| 554 | struct iser_conn *ib_conn) | 581 | struct iser_conn *ib_conn) |
| 555 | { | 582 | { |
| 556 | struct iscsi_iser_conn *conn = ib_conn->iser_conn; | ||
| 557 | struct iscsi_hdr *hdr; | 583 | struct iscsi_hdr *hdr; |
| 558 | u64 rx_dma; | 584 | u64 rx_dma; |
| 559 | int rx_buflen, outstanding, count, err; | 585 | int rx_buflen, outstanding, count, err; |
| @@ -575,17 +601,17 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
| 575 | iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, | 601 | iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, |
| 576 | hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); | 602 | hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); |
| 577 | 603 | ||
| 578 | iscsi_iser_recv(conn->iscsi_conn, hdr, | 604 | iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data, |
| 579 | rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN); | 605 | rx_xfer_len - ISER_HEADERS_LEN); |
| 580 | 606 | ||
| 581 | ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, | 607 | ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, |
| 582 | rx_buflen, DMA_FROM_DEVICE); | 608 | rx_buflen, DMA_FROM_DEVICE); |
| 583 | 609 | ||
| 584 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * | 610 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * |
| 585 | * task eliminates the need to worry on tasks which are completed in * | 611 | * task eliminates the need to worry on tasks which are completed in * |
| 586 | * parallel to the execution of iser_conn_term. So the code that waits * | 612 | * parallel to the execution of iser_conn_term. So the code that waits * |
| 587 | * for the posted rx bufs refcount to become zero handles everything */ | 613 | * for the posted rx bufs refcount to become zero handles everything */ |
| 588 | conn->ib_conn->post_recv_buf_count--; | 614 | ib_conn->post_recv_buf_count--; |
| 589 | 615 | ||
| 590 | if (rx_dma == ib_conn->login_resp_dma) | 616 | if (rx_dma == ib_conn->login_resp_dma) |
| 591 | return; | 617 | return; |
| @@ -635,6 +661,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) | |||
| 635 | iser_task->data[ISER_DIR_IN].data_len = 0; | 661 | iser_task->data[ISER_DIR_IN].data_len = 0; |
| 636 | iser_task->data[ISER_DIR_OUT].data_len = 0; | 662 | iser_task->data[ISER_DIR_OUT].data_len = 0; |
| 637 | 663 | ||
| 664 | iser_task->prot[ISER_DIR_IN].data_len = 0; | ||
| 665 | iser_task->prot[ISER_DIR_OUT].data_len = 0; | ||
| 666 | |||
| 638 | memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, | 667 | memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, |
| 639 | sizeof(struct iser_regd_buf)); | 668 | sizeof(struct iser_regd_buf)); |
| 640 | memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, | 669 | memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, |
| @@ -643,28 +672,63 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) | |||
| 643 | 672 | ||
| 644 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) | 673 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) |
| 645 | { | 674 | { |
| 646 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | 675 | struct iser_device *device = iser_task->ib_conn->device; |
| 647 | int is_rdma_aligned = 1; | 676 | int is_rdma_data_aligned = 1; |
| 677 | int is_rdma_prot_aligned = 1; | ||
| 678 | int prot_count = scsi_prot_sg_count(iser_task->sc); | ||
| 648 | 679 | ||
| 649 | /* if we were reading, copy back to unaligned sglist, | 680 | /* if we were reading, copy back to unaligned sglist, |
| 650 | * anyway dma_unmap and free the copy | 681 | * anyway dma_unmap and free the copy |
| 651 | */ | 682 | */ |
| 652 | if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { | 683 | if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { |
| 653 | is_rdma_aligned = 0; | 684 | is_rdma_data_aligned = 0; |
| 654 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); | 685 | iser_finalize_rdma_unaligned_sg(iser_task, |
| 686 | &iser_task->data[ISER_DIR_IN], | ||
| 687 | &iser_task->data_copy[ISER_DIR_IN], | ||
| 688 | ISER_DIR_IN); | ||
| 655 | } | 689 | } |
| 690 | |||
| 656 | if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { | 691 | if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { |
| 657 | is_rdma_aligned = 0; | 692 | is_rdma_data_aligned = 0; |
| 658 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); | 693 | iser_finalize_rdma_unaligned_sg(iser_task, |
| 694 | &iser_task->data[ISER_DIR_OUT], | ||
| 695 | &iser_task->data_copy[ISER_DIR_OUT], | ||
| 696 | ISER_DIR_OUT); | ||
| 697 | } | ||
| 698 | |||
| 699 | if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) { | ||
| 700 | is_rdma_prot_aligned = 0; | ||
| 701 | iser_finalize_rdma_unaligned_sg(iser_task, | ||
| 702 | &iser_task->prot[ISER_DIR_IN], | ||
| 703 | &iser_task->prot_copy[ISER_DIR_IN], | ||
| 704 | ISER_DIR_IN); | ||
| 705 | } | ||
| 706 | |||
| 707 | if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) { | ||
| 708 | is_rdma_prot_aligned = 0; | ||
| 709 | iser_finalize_rdma_unaligned_sg(iser_task, | ||
| 710 | &iser_task->prot[ISER_DIR_OUT], | ||
| 711 | &iser_task->prot_copy[ISER_DIR_OUT], | ||
| 712 | ISER_DIR_OUT); | ||
| 659 | } | 713 | } |
| 660 | 714 | ||
| 661 | if (iser_task->dir[ISER_DIR_IN]) | 715 | if (iser_task->dir[ISER_DIR_IN]) { |
| 662 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); | 716 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); |
| 717 | if (is_rdma_data_aligned) | ||
| 718 | iser_dma_unmap_task_data(iser_task, | ||
| 719 | &iser_task->data[ISER_DIR_IN]); | ||
| 720 | if (prot_count && is_rdma_prot_aligned) | ||
| 721 | iser_dma_unmap_task_data(iser_task, | ||
| 722 | &iser_task->prot[ISER_DIR_IN]); | ||
| 723 | } | ||
| 663 | 724 | ||
| 664 | if (iser_task->dir[ISER_DIR_OUT]) | 725 | if (iser_task->dir[ISER_DIR_OUT]) { |
| 665 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); | 726 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); |
| 666 | 727 | if (is_rdma_data_aligned) | |
| 667 | /* if the data was unaligned, it was already unmapped and then copied */ | 728 | iser_dma_unmap_task_data(iser_task, |
| 668 | if (is_rdma_aligned) | 729 | &iser_task->data[ISER_DIR_OUT]); |
| 669 | iser_dma_unmap_task_data(iser_task); | 730 | if (prot_count && is_rdma_prot_aligned) |
| 731 | iser_dma_unmap_task_data(iser_task, | ||
| 732 | &iser_task->prot[ISER_DIR_OUT]); | ||
| 733 | } | ||
| 670 | } | 734 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 1ce0c97d2ccb..47acd3ad3a17 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
| 3 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
| 4 | * | 4 | * |
| 5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -45,13 +45,19 @@ | |||
| 45 | * iser_start_rdma_unaligned_sg | 45 | * iser_start_rdma_unaligned_sg |
| 46 | */ | 46 | */ |
| 47 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | 47 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
| 48 | struct iser_data_buf *data, | ||
| 49 | struct iser_data_buf *data_copy, | ||
| 48 | enum iser_data_dir cmd_dir) | 50 | enum iser_data_dir cmd_dir) |
| 49 | { | 51 | { |
| 50 | int dma_nents; | 52 | struct ib_device *dev = iser_task->ib_conn->device->ib_device; |
| 51 | struct ib_device *dev; | 53 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 54 | struct scatterlist *sg; | ||
| 52 | char *mem = NULL; | 55 | char *mem = NULL; |
| 53 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; | 56 | unsigned long cmd_data_len = 0; |
| 54 | unsigned long cmd_data_len = data->data_len; | 57 | int dma_nents, i; |
| 58 | |||
| 59 | for_each_sg(sgl, sg, data->size, i) | ||
| 60 | cmd_data_len += ib_sg_dma_len(dev, sg); | ||
| 55 | 61 | ||
| 56 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 62 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
| 57 | mem = (void *)__get_free_pages(GFP_ATOMIC, | 63 | mem = (void *)__get_free_pages(GFP_ATOMIC, |
| @@ -61,17 +67,16 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
| 61 | 67 | ||
| 62 | if (mem == NULL) { | 68 | if (mem == NULL) { |
| 63 | iser_err("Failed to allocate mem size %d %d for copying sglist\n", | 69 | iser_err("Failed to allocate mem size %d %d for copying sglist\n", |
| 64 | data->size,(int)cmd_data_len); | 70 | data->size, (int)cmd_data_len); |
| 65 | return -ENOMEM; | 71 | return -ENOMEM; |
| 66 | } | 72 | } |
| 67 | 73 | ||
| 68 | if (cmd_dir == ISER_DIR_OUT) { | 74 | if (cmd_dir == ISER_DIR_OUT) { |
| 69 | /* copy the unaligned sg the buffer which is used for RDMA */ | 75 | /* copy the unaligned sg the buffer which is used for RDMA */ |
| 70 | struct scatterlist *sgl = (struct scatterlist *)data->buf; | ||
| 71 | struct scatterlist *sg; | ||
| 72 | int i; | 76 | int i; |
| 73 | char *p, *from; | 77 | char *p, *from; |
| 74 | 78 | ||
| 79 | sgl = (struct scatterlist *)data->buf; | ||
| 75 | p = mem; | 80 | p = mem; |
| 76 | for_each_sg(sgl, sg, data->size, i) { | 81 | for_each_sg(sgl, sg, data->size, i) { |
| 77 | from = kmap_atomic(sg_page(sg)); | 82 | from = kmap_atomic(sg_page(sg)); |
| @@ -83,39 +88,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
| 83 | } | 88 | } |
| 84 | } | 89 | } |
| 85 | 90 | ||
| 86 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); | 91 | sg_init_one(&data_copy->sg_single, mem, cmd_data_len); |
| 87 | iser_task->data_copy[cmd_dir].buf = | 92 | data_copy->buf = &data_copy->sg_single; |
| 88 | &iser_task->data_copy[cmd_dir].sg_single; | 93 | data_copy->size = 1; |
| 89 | iser_task->data_copy[cmd_dir].size = 1; | 94 | data_copy->copy_buf = mem; |
| 90 | 95 | ||
| 91 | iser_task->data_copy[cmd_dir].copy_buf = mem; | 96 | dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1, |
| 92 | |||
| 93 | dev = iser_task->iser_conn->ib_conn->device->ib_device; | ||
| 94 | dma_nents = ib_dma_map_sg(dev, | ||
| 95 | &iser_task->data_copy[cmd_dir].sg_single, | ||
| 96 | 1, | ||
| 97 | (cmd_dir == ISER_DIR_OUT) ? | 97 | (cmd_dir == ISER_DIR_OUT) ? |
| 98 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 98 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
| 99 | BUG_ON(dma_nents == 0); | 99 | BUG_ON(dma_nents == 0); |
| 100 | 100 | ||
| 101 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; | 101 | data_copy->dma_nents = dma_nents; |
| 102 | data_copy->data_len = cmd_data_len; | ||
| 103 | |||
| 102 | return 0; | 104 | return 0; |
| 103 | } | 105 | } |
| 104 | 106 | ||
| 105 | /** | 107 | /** |
| 106 | * iser_finalize_rdma_unaligned_sg | 108 | * iser_finalize_rdma_unaligned_sg |
| 107 | */ | 109 | */ |
| 110 | |||
| 108 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | 111 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
| 109 | enum iser_data_dir cmd_dir) | 112 | struct iser_data_buf *data, |
| 113 | struct iser_data_buf *data_copy, | ||
| 114 | enum iser_data_dir cmd_dir) | ||
| 110 | { | 115 | { |
| 111 | struct ib_device *dev; | 116 | struct ib_device *dev; |
| 112 | struct iser_data_buf *mem_copy; | ||
| 113 | unsigned long cmd_data_len; | 117 | unsigned long cmd_data_len; |
| 114 | 118 | ||
| 115 | dev = iser_task->iser_conn->ib_conn->device->ib_device; | 119 | dev = iser_task->ib_conn->device->ib_device; |
| 116 | mem_copy = &iser_task->data_copy[cmd_dir]; | ||
| 117 | 120 | ||
| 118 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, | 121 | ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, |
| 119 | (cmd_dir == ISER_DIR_OUT) ? | 122 | (cmd_dir == ISER_DIR_OUT) ? |
| 120 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 123 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
| 121 | 124 | ||
| @@ -127,10 +130,10 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
| 127 | int i; | 130 | int i; |
| 128 | 131 | ||
| 129 | /* copy back read RDMA to unaligned sg */ | 132 | /* copy back read RDMA to unaligned sg */ |
| 130 | mem = mem_copy->copy_buf; | 133 | mem = data_copy->copy_buf; |
| 131 | 134 | ||
| 132 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; | 135 | sgl = (struct scatterlist *)data->buf; |
| 133 | sg_size = iser_task->data[ISER_DIR_IN].size; | 136 | sg_size = data->size; |
| 134 | 137 | ||
| 135 | p = mem; | 138 | p = mem; |
| 136 | for_each_sg(sgl, sg, sg_size, i) { | 139 | for_each_sg(sgl, sg, sg_size, i) { |
| @@ -143,15 +146,15 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
| 143 | } | 146 | } |
| 144 | } | 147 | } |
| 145 | 148 | ||
| 146 | cmd_data_len = iser_task->data[cmd_dir].data_len; | 149 | cmd_data_len = data->data_len; |
| 147 | 150 | ||
| 148 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 151 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
| 149 | free_pages((unsigned long)mem_copy->copy_buf, | 152 | free_pages((unsigned long)data_copy->copy_buf, |
| 150 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); | 153 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
| 151 | else | 154 | else |
| 152 | kfree(mem_copy->copy_buf); | 155 | kfree(data_copy->copy_buf); |
| 153 | 156 | ||
| 154 | mem_copy->copy_buf = NULL; | 157 | data_copy->copy_buf = NULL; |
| 155 | } | 158 | } |
| 156 | 159 | ||
| 157 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) | 160 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) |
| @@ -319,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | |||
| 319 | struct ib_device *dev; | 322 | struct ib_device *dev; |
| 320 | 323 | ||
| 321 | iser_task->dir[iser_dir] = 1; | 324 | iser_task->dir[iser_dir] = 1; |
| 322 | dev = iser_task->iser_conn->ib_conn->device->ib_device; | 325 | dev = iser_task->ib_conn->device->ib_device; |
| 323 | 326 | ||
| 324 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); | 327 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
| 325 | if (data->dma_nents == 0) { | 328 | if (data->dma_nents == 0) { |
| @@ -329,31 +332,23 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | |||
| 329 | return 0; | 332 | return 0; |
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) | 335 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, |
| 336 | struct iser_data_buf *data) | ||
| 333 | { | 337 | { |
| 334 | struct ib_device *dev; | 338 | struct ib_device *dev; |
| 335 | struct iser_data_buf *data; | ||
| 336 | 339 | ||
| 337 | dev = iser_task->iser_conn->ib_conn->device->ib_device; | 340 | dev = iser_task->ib_conn->device->ib_device; |
| 338 | 341 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | |
| 339 | if (iser_task->dir[ISER_DIR_IN]) { | ||
| 340 | data = &iser_task->data[ISER_DIR_IN]; | ||
| 341 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | ||
| 342 | } | ||
| 343 | |||
| 344 | if (iser_task->dir[ISER_DIR_OUT]) { | ||
| 345 | data = &iser_task->data[ISER_DIR_OUT]; | ||
| 346 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); | ||
| 347 | } | ||
| 348 | } | 342 | } |
| 349 | 343 | ||
| 350 | static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, | 344 | static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, |
| 351 | struct ib_device *ibdev, | 345 | struct ib_device *ibdev, |
| 346 | struct iser_data_buf *mem, | ||
| 347 | struct iser_data_buf *mem_copy, | ||
| 352 | enum iser_data_dir cmd_dir, | 348 | enum iser_data_dir cmd_dir, |
| 353 | int aligned_len) | 349 | int aligned_len) |
| 354 | { | 350 | { |
| 355 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; | 351 | struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; |
| 356 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; | ||
| 357 | 352 | ||
| 358 | iscsi_conn->fmr_unalign_cnt++; | 353 | iscsi_conn->fmr_unalign_cnt++; |
| 359 | iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", | 354 | iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", |
| @@ -363,12 +358,12 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, | |||
| 363 | iser_data_buf_dump(mem, ibdev); | 358 | iser_data_buf_dump(mem, ibdev); |
| 364 | 359 | ||
| 365 | /* unmap the command data before accessing it */ | 360 | /* unmap the command data before accessing it */ |
| 366 | iser_dma_unmap_task_data(iser_task); | 361 | iser_dma_unmap_task_data(iser_task, mem); |
| 367 | 362 | ||
| 368 | /* allocate copy buf, if we are writing, copy the */ | 363 | /* allocate copy buf, if we are writing, copy the */ |
| 369 | /* unaligned scatterlist, dma map the copy */ | 364 | /* unaligned scatterlist, dma map the copy */ |
| 370 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) | 365 | if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) |
| 371 | return -ENOMEM; | 366 | return -ENOMEM; |
| 372 | 367 | ||
| 373 | return 0; | 368 | return 0; |
| 374 | } | 369 | } |
| @@ -382,7 +377,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, | |||
| 382 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | 377 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, |
| 383 | enum iser_data_dir cmd_dir) | 378 | enum iser_data_dir cmd_dir) |
| 384 | { | 379 | { |
| 385 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; | 380 | struct iser_conn *ib_conn = iser_task->ib_conn; |
| 386 | struct iser_device *device = ib_conn->device; | 381 | struct iser_device *device = ib_conn->device; |
| 387 | struct ib_device *ibdev = device->ib_device; | 382 | struct ib_device *ibdev = device->ib_device; |
| 388 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; | 383 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
| @@ -396,7 +391,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
| 396 | 391 | ||
| 397 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 392 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 398 | if (aligned_len != mem->dma_nents) { | 393 | if (aligned_len != mem->dma_nents) { |
| 399 | err = fall_to_bounce_buf(iser_task, ibdev, | 394 | err = fall_to_bounce_buf(iser_task, ibdev, mem, |
| 395 | &iser_task->data_copy[cmd_dir], | ||
| 400 | cmd_dir, aligned_len); | 396 | cmd_dir, aligned_len); |
| 401 | if (err) { | 397 | if (err) { |
| 402 | iser_err("failed to allocate bounce buffer\n"); | 398 | iser_err("failed to allocate bounce buffer\n"); |
| @@ -422,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
| 422 | (unsigned long)regd_buf->reg.va, | 418 | (unsigned long)regd_buf->reg.va, |
| 423 | (unsigned long)regd_buf->reg.len); | 419 | (unsigned long)regd_buf->reg.len); |
| 424 | } else { /* use FMR for multiple dma entries */ | 420 | } else { /* use FMR for multiple dma entries */ |
| 425 | iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev); | 421 | iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); |
| 426 | err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec, | 422 | err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, |
| 427 | ®d_buf->reg); | 423 | ®d_buf->reg); |
| 428 | if (err && err != -EAGAIN) { | 424 | if (err && err != -EAGAIN) { |
| 429 | iser_data_buf_dump(mem, ibdev); | 425 | iser_data_buf_dump(mem, ibdev); |
| @@ -431,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
| 431 | mem->dma_nents, | 427 | mem->dma_nents, |
| 432 | ntoh24(iser_task->desc.iscsi_header.dlength)); | 428 | ntoh24(iser_task->desc.iscsi_header.dlength)); |
| 433 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 429 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
| 434 | ib_conn->fastreg.fmr.page_vec->data_size, | 430 | ib_conn->fmr.page_vec->data_size, |
| 435 | ib_conn->fastreg.fmr.page_vec->length, | 431 | ib_conn->fmr.page_vec->length, |
| 436 | ib_conn->fastreg.fmr.page_vec->offset); | 432 | ib_conn->fmr.page_vec->offset); |
| 437 | for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++) | 433 | for (i = 0; i < ib_conn->fmr.page_vec->length; i++) |
| 438 | iser_err("page_vec[%d] = 0x%llx\n", i, | 434 | iser_err("page_vec[%d] = 0x%llx\n", i, |
| 439 | (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]); | 435 | (unsigned long long) ib_conn->fmr.page_vec->pages[i]); |
| 440 | } | 436 | } |
| 441 | if (err) | 437 | if (err) |
| 442 | return err; | 438 | return err; |
| @@ -444,94 +440,280 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
| 444 | return 0; | 440 | return 0; |
| 445 | } | 441 | } |
| 446 | 442 | ||
| 447 | static int iser_fast_reg_mr(struct fast_reg_descriptor *desc, | 443 | static inline enum ib_t10_dif_type |
| 448 | struct iser_conn *ib_conn, | 444 | scsi2ib_prot_type(unsigned char prot_type) |
| 445 | { | ||
| 446 | switch (prot_type) { | ||
| 447 | case SCSI_PROT_DIF_TYPE0: | ||
| 448 | return IB_T10DIF_NONE; | ||
| 449 | case SCSI_PROT_DIF_TYPE1: | ||
| 450 | return IB_T10DIF_TYPE1; | ||
| 451 | case SCSI_PROT_DIF_TYPE2: | ||
| 452 | return IB_T10DIF_TYPE2; | ||
| 453 | case SCSI_PROT_DIF_TYPE3: | ||
| 454 | return IB_T10DIF_TYPE3; | ||
| 455 | default: | ||
| 456 | return IB_T10DIF_NONE; | ||
| 457 | } | ||
| 458 | } | ||
| 459 | |||
| 460 | |||
| 461 | static int | ||
| 462 | iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) | ||
| 463 | { | ||
| 464 | unsigned char scsi_ptype = scsi_get_prot_type(sc); | ||
| 465 | |||
| 466 | sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; | ||
| 467 | sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; | ||
| 468 | sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size; | ||
| 469 | sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size; | ||
| 470 | |||
| 471 | switch (scsi_get_prot_op(sc)) { | ||
| 472 | case SCSI_PROT_WRITE_INSERT: | ||
| 473 | case SCSI_PROT_READ_STRIP: | ||
| 474 | sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; | ||
| 475 | sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); | ||
| 476 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; | ||
| 477 | sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & | ||
| 478 | 0xffffffff; | ||
| 479 | break; | ||
| 480 | case SCSI_PROT_READ_INSERT: | ||
| 481 | case SCSI_PROT_WRITE_STRIP: | ||
| 482 | sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); | ||
| 483 | sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; | ||
| 484 | sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & | ||
| 485 | 0xffffffff; | ||
| 486 | sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; | ||
| 487 | break; | ||
| 488 | case SCSI_PROT_READ_PASS: | ||
| 489 | case SCSI_PROT_WRITE_PASS: | ||
| 490 | sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); | ||
| 491 | sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; | ||
| 492 | sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & | ||
| 493 | 0xffffffff; | ||
| 494 | sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); | ||
| 495 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; | ||
| 496 | sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & | ||
| 497 | 0xffffffff; | ||
| 498 | break; | ||
| 499 | default: | ||
| 500 | iser_err("Unsupported PI operation %d\n", | ||
| 501 | scsi_get_prot_op(sc)); | ||
| 502 | return -EINVAL; | ||
| 503 | } | ||
| 504 | return 0; | ||
| 505 | } | ||
| 506 | |||
| 507 | |||
| 508 | static int | ||
| 509 | iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) | ||
| 510 | { | ||
| 511 | switch (scsi_get_prot_type(sc)) { | ||
| 512 | case SCSI_PROT_DIF_TYPE0: | ||
| 513 | *mask = 0x0; | ||
| 514 | break; | ||
| 515 | case SCSI_PROT_DIF_TYPE1: | ||
| 516 | case SCSI_PROT_DIF_TYPE2: | ||
| 517 | *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG; | ||
| 518 | break; | ||
| 519 | case SCSI_PROT_DIF_TYPE3: | ||
| 520 | *mask = ISER_CHECK_GUARD; | ||
| 521 | break; | ||
| 522 | default: | ||
| 523 | iser_err("Unsupported protection type %d\n", | ||
| 524 | scsi_get_prot_type(sc)); | ||
| 525 | return -EINVAL; | ||
| 526 | } | ||
| 527 | |||
| 528 | return 0; | ||
| 529 | } | ||
| 530 | |||
| 531 | static int | ||
| 532 | iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | ||
| 533 | struct fast_reg_descriptor *desc, struct ib_sge *data_sge, | ||
| 534 | struct ib_sge *prot_sge, struct ib_sge *sig_sge) | ||
| 535 | { | ||
| 536 | struct iser_conn *ib_conn = iser_task->ib_conn; | ||
| 537 | struct iser_pi_context *pi_ctx = desc->pi_ctx; | ||
| 538 | struct ib_send_wr sig_wr, inv_wr; | ||
| 539 | struct ib_send_wr *bad_wr, *wr = NULL; | ||
| 540 | struct ib_sig_attrs sig_attrs; | ||
| 541 | int ret; | ||
| 542 | u32 key; | ||
| 543 | |||
| 544 | memset(&sig_attrs, 0, sizeof(sig_attrs)); | ||
| 545 | ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); | ||
| 546 | if (ret) | ||
| 547 | goto err; | ||
| 548 | |||
| 549 | ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); | ||
| 550 | if (ret) | ||
| 551 | goto err; | ||
| 552 | |||
| 553 | if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { | ||
| 554 | memset(&inv_wr, 0, sizeof(inv_wr)); | ||
| 555 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
| 556 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 557 | inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; | ||
| 558 | wr = &inv_wr; | ||
| 559 | /* Bump the key */ | ||
| 560 | key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); | ||
| 561 | ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); | ||
| 562 | } | ||
| 563 | |||
| 564 | memset(&sig_wr, 0, sizeof(sig_wr)); | ||
| 565 | sig_wr.opcode = IB_WR_REG_SIG_MR; | ||
| 566 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 567 | sig_wr.sg_list = data_sge; | ||
| 568 | sig_wr.num_sge = 1; | ||
| 569 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; | ||
| 570 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; | ||
| 571 | if (scsi_prot_sg_count(iser_task->sc)) | ||
| 572 | sig_wr.wr.sig_handover.prot = prot_sge; | ||
| 573 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | | ||
| 574 | IB_ACCESS_REMOTE_READ | | ||
| 575 | IB_ACCESS_REMOTE_WRITE; | ||
| 576 | |||
| 577 | if (!wr) | ||
| 578 | wr = &sig_wr; | ||
| 579 | else | ||
| 580 | wr->next = &sig_wr; | ||
| 581 | |||
| 582 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); | ||
| 583 | if (ret) { | ||
| 584 | iser_err("reg_sig_mr failed, ret:%d\n", ret); | ||
| 585 | goto err; | ||
| 586 | } | ||
| 587 | desc->reg_indicators &= ~ISER_SIG_KEY_VALID; | ||
| 588 | |||
| 589 | sig_sge->lkey = pi_ctx->sig_mr->lkey; | ||
| 590 | sig_sge->addr = 0; | ||
| 591 | sig_sge->length = data_sge->length + prot_sge->length; | ||
| 592 | if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT || | ||
| 593 | scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) { | ||
| 594 | sig_sge->length += (data_sge->length / | ||
| 595 | iser_task->sc->device->sector_size) * 8; | ||
| 596 | } | ||
| 597 | |||
| 598 | iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n", | ||
| 599 | sig_sge->addr, sig_sge->length, | ||
| 600 | sig_sge->lkey); | ||
| 601 | err: | ||
| 602 | return ret; | ||
| 603 | } | ||
| 604 | |||
| 605 | static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | ||
| 449 | struct iser_regd_buf *regd_buf, | 606 | struct iser_regd_buf *regd_buf, |
| 450 | u32 offset, unsigned int data_size, | 607 | struct iser_data_buf *mem, |
| 451 | unsigned int page_list_len) | 608 | enum iser_reg_indicator ind, |
| 609 | struct ib_sge *sge) | ||
| 452 | { | 610 | { |
| 611 | struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; | ||
| 612 | struct iser_conn *ib_conn = iser_task->ib_conn; | ||
| 613 | struct iser_device *device = ib_conn->device; | ||
| 614 | struct ib_device *ibdev = device->ib_device; | ||
| 615 | struct ib_mr *mr; | ||
| 616 | struct ib_fast_reg_page_list *frpl; | ||
| 453 | struct ib_send_wr fastreg_wr, inv_wr; | 617 | struct ib_send_wr fastreg_wr, inv_wr; |
| 454 | struct ib_send_wr *bad_wr, *wr = NULL; | 618 | struct ib_send_wr *bad_wr, *wr = NULL; |
| 455 | u8 key; | 619 | u8 key; |
| 456 | int ret; | 620 | int ret, offset, size, plen; |
| 621 | |||
| 622 | /* if there a single dma entry, dma mr suffices */ | ||
| 623 | if (mem->dma_nents == 1) { | ||
| 624 | struct scatterlist *sg = (struct scatterlist *)mem->buf; | ||
| 457 | 625 | ||
| 458 | if (!desc->valid) { | 626 | sge->lkey = device->mr->lkey; |
| 627 | sge->addr = ib_sg_dma_address(ibdev, &sg[0]); | ||
| 628 | sge->length = ib_sg_dma_len(ibdev, &sg[0]); | ||
| 629 | |||
| 630 | iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", | ||
| 631 | sge->lkey, sge->addr, sge->length); | ||
| 632 | return 0; | ||
| 633 | } | ||
| 634 | |||
| 635 | if (ind == ISER_DATA_KEY_VALID) { | ||
| 636 | mr = desc->data_mr; | ||
| 637 | frpl = desc->data_frpl; | ||
| 638 | } else { | ||
| 639 | mr = desc->pi_ctx->prot_mr; | ||
| 640 | frpl = desc->pi_ctx->prot_frpl; | ||
| 641 | } | ||
| 642 | |||
| 643 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, | ||
| 644 | &offset, &size); | ||
| 645 | if (plen * SIZE_4K < size) { | ||
| 646 | iser_err("fast reg page_list too short to hold this SG\n"); | ||
| 647 | return -EINVAL; | ||
| 648 | } | ||
| 649 | |||
| 650 | if (!(desc->reg_indicators & ind)) { | ||
| 459 | memset(&inv_wr, 0, sizeof(inv_wr)); | 651 | memset(&inv_wr, 0, sizeof(inv_wr)); |
| 652 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 460 | inv_wr.opcode = IB_WR_LOCAL_INV; | 653 | inv_wr.opcode = IB_WR_LOCAL_INV; |
| 461 | inv_wr.send_flags = IB_SEND_SIGNALED; | 654 | inv_wr.ex.invalidate_rkey = mr->rkey; |
| 462 | inv_wr.ex.invalidate_rkey = desc->data_mr->rkey; | ||
| 463 | wr = &inv_wr; | 655 | wr = &inv_wr; |
| 464 | /* Bump the key */ | 656 | /* Bump the key */ |
| 465 | key = (u8)(desc->data_mr->rkey & 0x000000FF); | 657 | key = (u8)(mr->rkey & 0x000000FF); |
| 466 | ib_update_fast_reg_key(desc->data_mr, ++key); | 658 | ib_update_fast_reg_key(mr, ++key); |
| 467 | } | 659 | } |
| 468 | 660 | ||
| 469 | /* Prepare FASTREG WR */ | 661 | /* Prepare FASTREG WR */ |
| 470 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | 662 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); |
| 663 | fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 471 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | 664 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; |
| 472 | fastreg_wr.send_flags = IB_SEND_SIGNALED; | 665 | fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; |
| 473 | fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset; | 666 | fastreg_wr.wr.fast_reg.page_list = frpl; |
| 474 | fastreg_wr.wr.fast_reg.page_list = desc->data_frpl; | 667 | fastreg_wr.wr.fast_reg.page_list_len = plen; |
| 475 | fastreg_wr.wr.fast_reg.page_list_len = page_list_len; | ||
| 476 | fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; | 668 | fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; |
| 477 | fastreg_wr.wr.fast_reg.length = data_size; | 669 | fastreg_wr.wr.fast_reg.length = size; |
| 478 | fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey; | 670 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; |
| 479 | fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | | 671 | fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | |
| 480 | IB_ACCESS_REMOTE_WRITE | | 672 | IB_ACCESS_REMOTE_WRITE | |
| 481 | IB_ACCESS_REMOTE_READ); | 673 | IB_ACCESS_REMOTE_READ); |
| 482 | 674 | ||
| 483 | if (!wr) { | 675 | if (!wr) |
| 484 | wr = &fastreg_wr; | 676 | wr = &fastreg_wr; |
| 485 | atomic_inc(&ib_conn->post_send_buf_count); | 677 | else |
| 486 | } else { | ||
| 487 | wr->next = &fastreg_wr; | 678 | wr->next = &fastreg_wr; |
| 488 | atomic_add(2, &ib_conn->post_send_buf_count); | ||
| 489 | } | ||
| 490 | 679 | ||
| 491 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); | 680 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
| 492 | if (ret) { | 681 | if (ret) { |
| 493 | if (bad_wr->next) | ||
| 494 | atomic_sub(2, &ib_conn->post_send_buf_count); | ||
| 495 | else | ||
| 496 | atomic_dec(&ib_conn->post_send_buf_count); | ||
| 497 | iser_err("fast registration failed, ret:%d\n", ret); | 682 | iser_err("fast registration failed, ret:%d\n", ret); |
| 498 | return ret; | 683 | return ret; |
| 499 | } | 684 | } |
| 500 | desc->valid = false; | 685 | desc->reg_indicators &= ~ind; |
| 501 | 686 | ||
| 502 | regd_buf->reg.mem_h = desc; | 687 | sge->lkey = mr->lkey; |
| 503 | regd_buf->reg.lkey = desc->data_mr->lkey; | 688 | sge->addr = frpl->page_list[0] + offset; |
| 504 | regd_buf->reg.rkey = desc->data_mr->rkey; | 689 | sge->length = size; |
| 505 | regd_buf->reg.va = desc->data_frpl->page_list[0] + offset; | ||
| 506 | regd_buf->reg.len = data_size; | ||
| 507 | regd_buf->reg.is_mr = 1; | ||
| 508 | 690 | ||
| 509 | return ret; | 691 | return ret; |
| 510 | } | 692 | } |
| 511 | 693 | ||
| 512 | /** | 694 | /** |
| 513 | * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA, | 695 | * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, |
| 514 | * using Fast Registration WR (if possible) obtaining rkey and va | 696 | * using Fast Registration WR (if possible) obtaining rkey and va |
| 515 | * | 697 | * |
| 516 | * returns 0 on success, errno code on failure | 698 | * returns 0 on success, errno code on failure |
| 517 | */ | 699 | */ |
| 518 | int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task, | 700 | int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, |
| 519 | enum iser_data_dir cmd_dir) | 701 | enum iser_data_dir cmd_dir) |
| 520 | { | 702 | { |
| 521 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; | 703 | struct iser_conn *ib_conn = iser_task->ib_conn; |
| 522 | struct iser_device *device = ib_conn->device; | 704 | struct iser_device *device = ib_conn->device; |
| 523 | struct ib_device *ibdev = device->ib_device; | 705 | struct ib_device *ibdev = device->ib_device; |
| 524 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; | 706 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
| 525 | struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; | 707 | struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; |
| 526 | struct fast_reg_descriptor *desc; | 708 | struct fast_reg_descriptor *desc = NULL; |
| 527 | unsigned int data_size, page_list_len; | 709 | struct ib_sge data_sge; |
| 528 | int err, aligned_len; | 710 | int err, aligned_len; |
| 529 | unsigned long flags; | 711 | unsigned long flags; |
| 530 | u32 offset; | ||
| 531 | 712 | ||
| 532 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 713 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 533 | if (aligned_len != mem->dma_nents) { | 714 | if (aligned_len != mem->dma_nents) { |
| 534 | err = fall_to_bounce_buf(iser_task, ibdev, | 715 | err = fall_to_bounce_buf(iser_task, ibdev, mem, |
| 716 | &iser_task->data_copy[cmd_dir], | ||
| 535 | cmd_dir, aligned_len); | 717 | cmd_dir, aligned_len); |
| 536 | if (err) { | 718 | if (err) { |
| 537 | iser_err("failed to allocate bounce buffer\n"); | 719 | iser_err("failed to allocate bounce buffer\n"); |
| @@ -540,41 +722,79 @@ int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task, | |||
| 540 | mem = &iser_task->data_copy[cmd_dir]; | 722 | mem = &iser_task->data_copy[cmd_dir]; |
| 541 | } | 723 | } |
| 542 | 724 | ||
| 543 | /* if there a single dma entry, dma mr suffices */ | 725 | if (mem->dma_nents != 1 || |
| 544 | if (mem->dma_nents == 1) { | 726 | scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
| 545 | struct scatterlist *sg = (struct scatterlist *)mem->buf; | ||
| 546 | |||
| 547 | regd_buf->reg.lkey = device->mr->lkey; | ||
| 548 | regd_buf->reg.rkey = device->mr->rkey; | ||
| 549 | regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); | ||
| 550 | regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); | ||
| 551 | regd_buf->reg.is_mr = 0; | ||
| 552 | } else { | ||
| 553 | spin_lock_irqsave(&ib_conn->lock, flags); | 727 | spin_lock_irqsave(&ib_conn->lock, flags); |
| 554 | desc = list_first_entry(&ib_conn->fastreg.frwr.pool, | 728 | desc = list_first_entry(&ib_conn->fastreg.pool, |
| 555 | struct fast_reg_descriptor, list); | 729 | struct fast_reg_descriptor, list); |
| 556 | list_del(&desc->list); | 730 | list_del(&desc->list); |
| 557 | spin_unlock_irqrestore(&ib_conn->lock, flags); | 731 | spin_unlock_irqrestore(&ib_conn->lock, flags); |
| 558 | page_list_len = iser_sg_to_page_vec(mem, device->ib_device, | 732 | regd_buf->reg.mem_h = desc; |
| 559 | desc->data_frpl->page_list, | 733 | } |
| 560 | &offset, &data_size); | 734 | |
| 561 | 735 | err = iser_fast_reg_mr(iser_task, regd_buf, mem, | |
| 562 | if (page_list_len * SIZE_4K < data_size) { | 736 | ISER_DATA_KEY_VALID, &data_sge); |
| 563 | iser_err("fast reg page_list too short to hold this SG\n"); | 737 | if (err) |
| 564 | err = -EINVAL; | 738 | goto err_reg; |
| 565 | goto err_reg; | 739 | |
| 740 | if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { | ||
| 741 | struct ib_sge prot_sge, sig_sge; | ||
| 742 | |||
| 743 | memset(&prot_sge, 0, sizeof(prot_sge)); | ||
| 744 | if (scsi_prot_sg_count(iser_task->sc)) { | ||
| 745 | mem = &iser_task->prot[cmd_dir]; | ||
| 746 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | ||
| 747 | if (aligned_len != mem->dma_nents) { | ||
| 748 | err = fall_to_bounce_buf(iser_task, ibdev, mem, | ||
| 749 | &iser_task->prot_copy[cmd_dir], | ||
| 750 | cmd_dir, aligned_len); | ||
| 751 | if (err) { | ||
| 752 | iser_err("failed to allocate bounce buffer\n"); | ||
| 753 | return err; | ||
| 754 | } | ||
| 755 | mem = &iser_task->prot_copy[cmd_dir]; | ||
| 756 | } | ||
| 757 | |||
| 758 | err = iser_fast_reg_mr(iser_task, regd_buf, mem, | ||
| 759 | ISER_PROT_KEY_VALID, &prot_sge); | ||
| 760 | if (err) | ||
| 761 | goto err_reg; | ||
| 566 | } | 762 | } |
| 567 | 763 | ||
| 568 | err = iser_fast_reg_mr(desc, ib_conn, regd_buf, | 764 | err = iser_reg_sig_mr(iser_task, desc, &data_sge, |
| 569 | offset, data_size, page_list_len); | 765 | &prot_sge, &sig_sge); |
| 570 | if (err) | 766 | if (err) { |
| 571 | goto err_reg; | 767 | iser_err("Failed to register signature mr\n"); |
| 768 | return err; | ||
| 769 | } | ||
| 770 | desc->reg_indicators |= ISER_FASTREG_PROTECTED; | ||
| 771 | |||
| 772 | regd_buf->reg.lkey = sig_sge.lkey; | ||
| 773 | regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey; | ||
| 774 | regd_buf->reg.va = sig_sge.addr; | ||
| 775 | regd_buf->reg.len = sig_sge.length; | ||
| 776 | regd_buf->reg.is_mr = 1; | ||
| 777 | } else { | ||
| 778 | if (desc) { | ||
| 779 | regd_buf->reg.rkey = desc->data_mr->rkey; | ||
| 780 | regd_buf->reg.is_mr = 1; | ||
| 781 | } else { | ||
| 782 | regd_buf->reg.rkey = device->mr->rkey; | ||
| 783 | regd_buf->reg.is_mr = 0; | ||
| 784 | } | ||
| 785 | |||
| 786 | regd_buf->reg.lkey = data_sge.lkey; | ||
| 787 | regd_buf->reg.va = data_sge.addr; | ||
| 788 | regd_buf->reg.len = data_sge.length; | ||
| 572 | } | 789 | } |
| 573 | 790 | ||
| 574 | return 0; | 791 | return 0; |
| 575 | err_reg: | 792 | err_reg: |
| 576 | spin_lock_irqsave(&ib_conn->lock, flags); | 793 | if (desc) { |
| 577 | list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); | 794 | spin_lock_irqsave(&ib_conn->lock, flags); |
| 578 | spin_unlock_irqrestore(&ib_conn->lock, flags); | 795 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
| 796 | spin_unlock_irqrestore(&ib_conn->lock, flags); | ||
| 797 | } | ||
| 798 | |||
| 579 | return err; | 799 | return err; |
| 580 | } | 800 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ca37edef2791..32849f2becde 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
| 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. | 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
| 4 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. | 4 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
| 5 | * | 5 | * |
| 6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -71,17 +71,14 @@ static void iser_event_handler(struct ib_event_handler *handler, | |||
| 71 | */ | 71 | */ |
| 72 | static int iser_create_device_ib_res(struct iser_device *device) | 72 | static int iser_create_device_ib_res(struct iser_device *device) |
| 73 | { | 73 | { |
| 74 | int i, j; | ||
| 75 | struct iser_cq_desc *cq_desc; | 74 | struct iser_cq_desc *cq_desc; |
| 76 | struct ib_device_attr *dev_attr; | 75 | struct ib_device_attr *dev_attr = &device->dev_attr; |
| 76 | int ret, i, j; | ||
| 77 | 77 | ||
| 78 | dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL); | 78 | ret = ib_query_device(device->ib_device, dev_attr); |
| 79 | if (!dev_attr) | 79 | if (ret) { |
| 80 | return -ENOMEM; | ||
| 81 | |||
| 82 | if (ib_query_device(device->ib_device, dev_attr)) { | ||
| 83 | pr_warn("Query device failed for %s\n", device->ib_device->name); | 80 | pr_warn("Query device failed for %s\n", device->ib_device->name); |
| 84 | goto dev_attr_err; | 81 | return ret; |
| 85 | } | 82 | } |
| 86 | 83 | ||
| 87 | /* Assign function handles - based on FMR support */ | 84 | /* Assign function handles - based on FMR support */ |
| @@ -94,14 +91,14 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
| 94 | device->iser_unreg_rdma_mem = iser_unreg_mem_fmr; | 91 | device->iser_unreg_rdma_mem = iser_unreg_mem_fmr; |
| 95 | } else | 92 | } else |
| 96 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { | 93 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
| 97 | iser_info("FRWR supported, using FRWR for registration\n"); | 94 | iser_info("FastReg supported, using FastReg for registration\n"); |
| 98 | device->iser_alloc_rdma_reg_res = iser_create_frwr_pool; | 95 | device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool; |
| 99 | device->iser_free_rdma_reg_res = iser_free_frwr_pool; | 96 | device->iser_free_rdma_reg_res = iser_free_fastreg_pool; |
| 100 | device->iser_reg_rdma_mem = iser_reg_rdma_mem_frwr; | 97 | device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg; |
| 101 | device->iser_unreg_rdma_mem = iser_unreg_mem_frwr; | 98 | device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg; |
| 102 | } else { | 99 | } else { |
| 103 | iser_err("IB device does not support FMRs nor FRWRs, can't register memory\n"); | 100 | iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); |
| 104 | goto dev_attr_err; | 101 | return -1; |
| 105 | } | 102 | } |
| 106 | 103 | ||
| 107 | device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); | 104 | device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); |
| @@ -158,7 +155,6 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
| 158 | if (ib_register_event_handler(&device->event_handler)) | 155 | if (ib_register_event_handler(&device->event_handler)) |
| 159 | goto handler_err; | 156 | goto handler_err; |
| 160 | 157 | ||
| 161 | kfree(dev_attr); | ||
| 162 | return 0; | 158 | return 0; |
| 163 | 159 | ||
| 164 | handler_err: | 160 | handler_err: |
| @@ -178,8 +174,6 @@ pd_err: | |||
| 178 | kfree(device->cq_desc); | 174 | kfree(device->cq_desc); |
| 179 | cq_desc_err: | 175 | cq_desc_err: |
| 180 | iser_err("failed to allocate an IB resource\n"); | 176 | iser_err("failed to allocate an IB resource\n"); |
| 181 | dev_attr_err: | ||
| 182 | kfree(dev_attr); | ||
| 183 | return -1; | 177 | return -1; |
| 184 | } | 178 | } |
| 185 | 179 | ||
| @@ -221,13 +215,13 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max) | |||
| 221 | struct ib_fmr_pool_param params; | 215 | struct ib_fmr_pool_param params; |
| 222 | int ret = -ENOMEM; | 216 | int ret = -ENOMEM; |
| 223 | 217 | ||
| 224 | ib_conn->fastreg.fmr.page_vec = kmalloc(sizeof(struct iser_page_vec) + | 218 | ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + |
| 225 | (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), | 219 | (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), |
| 226 | GFP_KERNEL); | 220 | GFP_KERNEL); |
| 227 | if (!ib_conn->fastreg.fmr.page_vec) | 221 | if (!ib_conn->fmr.page_vec) |
| 228 | return ret; | 222 | return ret; |
| 229 | 223 | ||
| 230 | ib_conn->fastreg.fmr.page_vec->pages = (u64 *)(ib_conn->fastreg.fmr.page_vec + 1); | 224 | ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); |
| 231 | 225 | ||
| 232 | params.page_shift = SHIFT_4K; | 226 | params.page_shift = SHIFT_4K; |
| 233 | /* when the first/last SG element are not start/end * | 227 | /* when the first/last SG element are not start/end * |
| @@ -243,16 +237,16 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max) | |||
| 243 | IB_ACCESS_REMOTE_WRITE | | 237 | IB_ACCESS_REMOTE_WRITE | |
| 244 | IB_ACCESS_REMOTE_READ); | 238 | IB_ACCESS_REMOTE_READ); |
| 245 | 239 | ||
| 246 | ib_conn->fastreg.fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); | 240 | ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); |
| 247 | if (!IS_ERR(ib_conn->fastreg.fmr.pool)) | 241 | if (!IS_ERR(ib_conn->fmr.pool)) |
| 248 | return 0; | 242 | return 0; |
| 249 | 243 | ||
| 250 | /* no FMR => no need for page_vec */ | 244 | /* no FMR => no need for page_vec */ |
| 251 | kfree(ib_conn->fastreg.fmr.page_vec); | 245 | kfree(ib_conn->fmr.page_vec); |
| 252 | ib_conn->fastreg.fmr.page_vec = NULL; | 246 | ib_conn->fmr.page_vec = NULL; |
| 253 | 247 | ||
| 254 | ret = PTR_ERR(ib_conn->fastreg.fmr.pool); | 248 | ret = PTR_ERR(ib_conn->fmr.pool); |
| 255 | ib_conn->fastreg.fmr.pool = NULL; | 249 | ib_conn->fmr.pool = NULL; |
| 256 | if (ret != -ENOSYS) { | 250 | if (ret != -ENOSYS) { |
| 257 | iser_err("FMR allocation failed, err %d\n", ret); | 251 | iser_err("FMR allocation failed, err %d\n", ret); |
| 258 | return ret; | 252 | return ret; |
| @@ -268,93 +262,173 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max) | |||
| 268 | void iser_free_fmr_pool(struct iser_conn *ib_conn) | 262 | void iser_free_fmr_pool(struct iser_conn *ib_conn) |
| 269 | { | 263 | { |
| 270 | iser_info("freeing conn %p fmr pool %p\n", | 264 | iser_info("freeing conn %p fmr pool %p\n", |
| 271 | ib_conn, ib_conn->fastreg.fmr.pool); | 265 | ib_conn, ib_conn->fmr.pool); |
| 266 | |||
| 267 | if (ib_conn->fmr.pool != NULL) | ||
| 268 | ib_destroy_fmr_pool(ib_conn->fmr.pool); | ||
| 269 | |||
| 270 | ib_conn->fmr.pool = NULL; | ||
| 271 | |||
| 272 | kfree(ib_conn->fmr.page_vec); | ||
| 273 | ib_conn->fmr.page_vec = NULL; | ||
| 274 | } | ||
| 275 | |||
| 276 | static int | ||
| 277 | iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, | ||
| 278 | bool pi_enable, struct fast_reg_descriptor *desc) | ||
| 279 | { | ||
| 280 | int ret; | ||
| 281 | |||
| 282 | desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, | ||
| 283 | ISCSI_ISER_SG_TABLESIZE + 1); | ||
| 284 | if (IS_ERR(desc->data_frpl)) { | ||
| 285 | ret = PTR_ERR(desc->data_frpl); | ||
| 286 | iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", | ||
| 287 | ret); | ||
| 288 | return PTR_ERR(desc->data_frpl); | ||
| 289 | } | ||
| 290 | |||
| 291 | desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1); | ||
| 292 | if (IS_ERR(desc->data_mr)) { | ||
| 293 | ret = PTR_ERR(desc->data_mr); | ||
| 294 | iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); | ||
| 295 | goto fast_reg_mr_failure; | ||
| 296 | } | ||
| 297 | desc->reg_indicators |= ISER_DATA_KEY_VALID; | ||
| 298 | |||
| 299 | if (pi_enable) { | ||
| 300 | struct ib_mr_init_attr mr_init_attr = {0}; | ||
| 301 | struct iser_pi_context *pi_ctx = NULL; | ||
| 272 | 302 | ||
| 273 | if (ib_conn->fastreg.fmr.pool != NULL) | 303 | desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); |
| 274 | ib_destroy_fmr_pool(ib_conn->fastreg.fmr.pool); | 304 | if (!desc->pi_ctx) { |
| 305 | iser_err("Failed to allocate pi context\n"); | ||
| 306 | ret = -ENOMEM; | ||
| 307 | goto pi_ctx_alloc_failure; | ||
| 308 | } | ||
| 309 | pi_ctx = desc->pi_ctx; | ||
| 310 | |||
| 311 | pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, | ||
| 312 | ISCSI_ISER_SG_TABLESIZE); | ||
| 313 | if (IS_ERR(pi_ctx->prot_frpl)) { | ||
| 314 | ret = PTR_ERR(pi_ctx->prot_frpl); | ||
| 315 | iser_err("Failed to allocate prot frpl ret=%d\n", | ||
| 316 | ret); | ||
| 317 | goto prot_frpl_failure; | ||
| 318 | } | ||
| 319 | |||
| 320 | pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, | ||
| 321 | ISCSI_ISER_SG_TABLESIZE + 1); | ||
| 322 | if (IS_ERR(pi_ctx->prot_mr)) { | ||
| 323 | ret = PTR_ERR(pi_ctx->prot_mr); | ||
| 324 | iser_err("Failed to allocate prot frmr ret=%d\n", | ||
| 325 | ret); | ||
| 326 | goto prot_mr_failure; | ||
| 327 | } | ||
| 328 | desc->reg_indicators |= ISER_PROT_KEY_VALID; | ||
| 329 | |||
| 330 | mr_init_attr.max_reg_descriptors = 2; | ||
| 331 | mr_init_attr.flags |= IB_MR_SIGNATURE_EN; | ||
| 332 | pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); | ||
| 333 | if (IS_ERR(pi_ctx->sig_mr)) { | ||
| 334 | ret = PTR_ERR(pi_ctx->sig_mr); | ||
| 335 | iser_err("Failed to allocate signature enabled mr err=%d\n", | ||
| 336 | ret); | ||
| 337 | goto sig_mr_failure; | ||
| 338 | } | ||
| 339 | desc->reg_indicators |= ISER_SIG_KEY_VALID; | ||
| 340 | } | ||
| 341 | desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; | ||
| 275 | 342 | ||
| 276 | ib_conn->fastreg.fmr.pool = NULL; | 343 | iser_dbg("Create fr_desc %p page_list %p\n", |
| 344 | desc, desc->data_frpl->page_list); | ||
| 277 | 345 | ||
| 278 | kfree(ib_conn->fastreg.fmr.page_vec); | 346 | return 0; |
| 279 | ib_conn->fastreg.fmr.page_vec = NULL; | 347 | sig_mr_failure: |
| 348 | ib_dereg_mr(desc->pi_ctx->prot_mr); | ||
| 349 | prot_mr_failure: | ||
| 350 | ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); | ||
| 351 | prot_frpl_failure: | ||
| 352 | kfree(desc->pi_ctx); | ||
| 353 | pi_ctx_alloc_failure: | ||
| 354 | ib_dereg_mr(desc->data_mr); | ||
| 355 | fast_reg_mr_failure: | ||
| 356 | ib_free_fast_reg_page_list(desc->data_frpl); | ||
| 357 | |||
| 358 | return ret; | ||
| 280 | } | 359 | } |
| 281 | 360 | ||
| 282 | /** | 361 | /** |
| 283 | * iser_create_frwr_pool - Creates pool of fast_reg descriptors | 362 | * iser_create_fastreg_pool - Creates pool of fast_reg descriptors |
| 284 | * for fast registration work requests. | 363 | * for fast registration work requests. |
| 285 | * returns 0 on success, or errno code on failure | 364 | * returns 0 on success, or errno code on failure |
| 286 | */ | 365 | */ |
| 287 | int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max) | 366 | int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max) |
| 288 | { | 367 | { |
| 289 | struct iser_device *device = ib_conn->device; | 368 | struct iser_device *device = ib_conn->device; |
| 290 | struct fast_reg_descriptor *desc; | 369 | struct fast_reg_descriptor *desc; |
| 291 | int i, ret; | 370 | int i, ret; |
| 292 | 371 | ||
| 293 | INIT_LIST_HEAD(&ib_conn->fastreg.frwr.pool); | 372 | INIT_LIST_HEAD(&ib_conn->fastreg.pool); |
| 294 | ib_conn->fastreg.frwr.pool_size = 0; | 373 | ib_conn->fastreg.pool_size = 0; |
| 295 | for (i = 0; i < cmds_max; i++) { | 374 | for (i = 0; i < cmds_max; i++) { |
| 296 | desc = kmalloc(sizeof(*desc), GFP_KERNEL); | 375 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
| 297 | if (!desc) { | 376 | if (!desc) { |
| 298 | iser_err("Failed to allocate a new fast_reg descriptor\n"); | 377 | iser_err("Failed to allocate a new fast_reg descriptor\n"); |
| 299 | ret = -ENOMEM; | 378 | ret = -ENOMEM; |
| 300 | goto err; | 379 | goto err; |
| 301 | } | 380 | } |
| 302 | 381 | ||
| 303 | desc->data_frpl = ib_alloc_fast_reg_page_list(device->ib_device, | 382 | ret = iser_create_fastreg_desc(device->ib_device, device->pd, |
| 304 | ISCSI_ISER_SG_TABLESIZE + 1); | 383 | ib_conn->pi_support, desc); |
| 305 | if (IS_ERR(desc->data_frpl)) { | 384 | if (ret) { |
| 306 | ret = PTR_ERR(desc->data_frpl); | 385 | iser_err("Failed to create fastreg descriptor err=%d\n", |
| 307 | iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", ret); | 386 | ret); |
| 308 | goto fast_reg_page_failure; | 387 | kfree(desc); |
| 388 | goto err; | ||
| 309 | } | 389 | } |
| 310 | 390 | ||
| 311 | desc->data_mr = ib_alloc_fast_reg_mr(device->pd, | 391 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
| 312 | ISCSI_ISER_SG_TABLESIZE + 1); | 392 | ib_conn->fastreg.pool_size++; |
| 313 | if (IS_ERR(desc->data_mr)) { | ||
| 314 | ret = PTR_ERR(desc->data_mr); | ||
| 315 | iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); | ||
| 316 | goto fast_reg_mr_failure; | ||
| 317 | } | ||
| 318 | desc->valid = true; | ||
| 319 | list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); | ||
| 320 | ib_conn->fastreg.frwr.pool_size++; | ||
| 321 | } | 393 | } |
| 322 | 394 | ||
| 323 | return 0; | 395 | return 0; |
| 324 | 396 | ||
| 325 | fast_reg_mr_failure: | ||
| 326 | ib_free_fast_reg_page_list(desc->data_frpl); | ||
| 327 | fast_reg_page_failure: | ||
| 328 | kfree(desc); | ||
| 329 | err: | 397 | err: |
| 330 | iser_free_frwr_pool(ib_conn); | 398 | iser_free_fastreg_pool(ib_conn); |
| 331 | return ret; | 399 | return ret; |
| 332 | } | 400 | } |
| 333 | 401 | ||
| 334 | /** | 402 | /** |
| 335 | * iser_free_frwr_pool - releases the pool of fast_reg descriptors | 403 | * iser_free_fastreg_pool - releases the pool of fast_reg descriptors |
| 336 | */ | 404 | */ |
| 337 | void iser_free_frwr_pool(struct iser_conn *ib_conn) | 405 | void iser_free_fastreg_pool(struct iser_conn *ib_conn) |
| 338 | { | 406 | { |
| 339 | struct fast_reg_descriptor *desc, *tmp; | 407 | struct fast_reg_descriptor *desc, *tmp; |
| 340 | int i = 0; | 408 | int i = 0; |
| 341 | 409 | ||
| 342 | if (list_empty(&ib_conn->fastreg.frwr.pool)) | 410 | if (list_empty(&ib_conn->fastreg.pool)) |
| 343 | return; | 411 | return; |
| 344 | 412 | ||
| 345 | iser_info("freeing conn %p frwr pool\n", ib_conn); | 413 | iser_info("freeing conn %p fr pool\n", ib_conn); |
| 346 | 414 | ||
| 347 | list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.frwr.pool, list) { | 415 | list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { |
| 348 | list_del(&desc->list); | 416 | list_del(&desc->list); |
| 349 | ib_free_fast_reg_page_list(desc->data_frpl); | 417 | ib_free_fast_reg_page_list(desc->data_frpl); |
| 350 | ib_dereg_mr(desc->data_mr); | 418 | ib_dereg_mr(desc->data_mr); |
| 419 | if (desc->pi_ctx) { | ||
| 420 | ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); | ||
| 421 | ib_dereg_mr(desc->pi_ctx->prot_mr); | ||
| 422 | ib_destroy_mr(desc->pi_ctx->sig_mr); | ||
| 423 | kfree(desc->pi_ctx); | ||
| 424 | } | ||
| 351 | kfree(desc); | 425 | kfree(desc); |
| 352 | ++i; | 426 | ++i; |
| 353 | } | 427 | } |
| 354 | 428 | ||
| 355 | if (i < ib_conn->fastreg.frwr.pool_size) | 429 | if (i < ib_conn->fastreg.pool_size) |
| 356 | iser_warn("pool still has %d regions registered\n", | 430 | iser_warn("pool still has %d regions registered\n", |
| 357 | ib_conn->fastreg.frwr.pool_size - i); | 431 | ib_conn->fastreg.pool_size - i); |
| 358 | } | 432 | } |
| 359 | 433 | ||
| 360 | /** | 434 | /** |
| @@ -389,12 +463,17 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
| 389 | init_attr.qp_context = (void *)ib_conn; | 463 | init_attr.qp_context = (void *)ib_conn; |
| 390 | init_attr.send_cq = device->tx_cq[min_index]; | 464 | init_attr.send_cq = device->tx_cq[min_index]; |
| 391 | init_attr.recv_cq = device->rx_cq[min_index]; | 465 | init_attr.recv_cq = device->rx_cq[min_index]; |
| 392 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | ||
| 393 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; | 466 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; |
| 394 | init_attr.cap.max_send_sge = 2; | 467 | init_attr.cap.max_send_sge = 2; |
| 395 | init_attr.cap.max_recv_sge = 1; | 468 | init_attr.cap.max_recv_sge = 1; |
| 396 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 469 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 397 | init_attr.qp_type = IB_QPT_RC; | 470 | init_attr.qp_type = IB_QPT_RC; |
| 471 | if (ib_conn->pi_support) { | ||
| 472 | init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS; | ||
| 473 | init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; | ||
| 474 | } else { | ||
| 475 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | ||
| 476 | } | ||
| 398 | 477 | ||
| 399 | ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); | 478 | ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); |
| 400 | if (ret) | 479 | if (ret) |
| @@ -591,6 +670,19 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id) | |||
| 591 | ib_conn = (struct iser_conn *)cma_id->context; | 670 | ib_conn = (struct iser_conn *)cma_id->context; |
| 592 | ib_conn->device = device; | 671 | ib_conn->device = device; |
| 593 | 672 | ||
| 673 | /* connection T10-PI support */ | ||
| 674 | if (iser_pi_enable) { | ||
| 675 | if (!(device->dev_attr.device_cap_flags & | ||
| 676 | IB_DEVICE_SIGNATURE_HANDOVER)) { | ||
| 677 | iser_warn("T10-PI requested but not supported on %s, " | ||
| 678 | "continue without T10-PI\n", | ||
| 679 | ib_conn->device->ib_device->name); | ||
| 680 | ib_conn->pi_support = false; | ||
| 681 | } else { | ||
| 682 | ib_conn->pi_support = true; | ||
| 683 | } | ||
| 684 | } | ||
| 685 | |||
| 594 | ret = rdma_resolve_route(cma_id, 1000); | 686 | ret = rdma_resolve_route(cma_id, 1000); |
| 595 | if (ret) { | 687 | if (ret) { |
| 596 | iser_err("resolve route failed: %d\n", ret); | 688 | iser_err("resolve route failed: %d\n", ret); |
| @@ -636,6 +728,11 @@ failure: | |||
| 636 | static void iser_connected_handler(struct rdma_cm_id *cma_id) | 728 | static void iser_connected_handler(struct rdma_cm_id *cma_id) |
| 637 | { | 729 | { |
| 638 | struct iser_conn *ib_conn; | 730 | struct iser_conn *ib_conn; |
| 731 | struct ib_qp_attr attr; | ||
| 732 | struct ib_qp_init_attr init_attr; | ||
| 733 | |||
| 734 | (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); | ||
| 735 | iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); | ||
| 639 | 736 | ||
| 640 | ib_conn = (struct iser_conn *)cma_id->context; | 737 | ib_conn = (struct iser_conn *)cma_id->context; |
| 641 | ib_conn->state = ISER_CONN_UP; | 738 | ib_conn->state = ISER_CONN_UP; |
| @@ -653,9 +750,8 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
| 653 | * terminated asynchronously from the iSCSI layer's perspective. */ | 750 | * terminated asynchronously from the iSCSI layer's perspective. */ |
| 654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 751 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, |
| 655 | ISER_CONN_TERMINATING)){ | 752 | ISER_CONN_TERMINATING)){ |
| 656 | if (ib_conn->iser_conn) | 753 | if (ib_conn->iscsi_conn) |
| 657 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | 754 | iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); |
| 658 | ISCSI_ERR_CONN_FAILED); | ||
| 659 | else | 755 | else |
| 660 | iser_err("iscsi_iser connection isn't bound\n"); | 756 | iser_err("iscsi_iser connection isn't bound\n"); |
| 661 | } | 757 | } |
| @@ -801,7 +897,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, | |||
| 801 | page_list = page_vec->pages; | 897 | page_list = page_vec->pages; |
| 802 | io_addr = page_list[0]; | 898 | io_addr = page_list[0]; |
| 803 | 899 | ||
| 804 | mem = ib_fmr_pool_map_phys(ib_conn->fastreg.fmr.pool, | 900 | mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, |
| 805 | page_list, | 901 | page_list, |
| 806 | page_vec->length, | 902 | page_vec->length, |
| 807 | io_addr); | 903 | io_addr); |
| @@ -855,11 +951,11 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, | |||
| 855 | reg->mem_h = NULL; | 951 | reg->mem_h = NULL; |
| 856 | } | 952 | } |
| 857 | 953 | ||
| 858 | void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, | 954 | void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, |
| 859 | enum iser_data_dir cmd_dir) | 955 | enum iser_data_dir cmd_dir) |
| 860 | { | 956 | { |
| 861 | struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; | 957 | struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; |
| 862 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; | 958 | struct iser_conn *ib_conn = iser_task->ib_conn; |
| 863 | struct fast_reg_descriptor *desc = reg->mem_h; | 959 | struct fast_reg_descriptor *desc = reg->mem_h; |
| 864 | 960 | ||
| 865 | if (!reg->is_mr) | 961 | if (!reg->is_mr) |
| @@ -868,7 +964,7 @@ void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, | |||
| 868 | reg->mem_h = NULL; | 964 | reg->mem_h = NULL; |
| 869 | reg->is_mr = 0; | 965 | reg->is_mr = 0; |
| 870 | spin_lock_bh(&ib_conn->lock); | 966 | spin_lock_bh(&ib_conn->lock); |
| 871 | list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); | 967 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
| 872 | spin_unlock_bh(&ib_conn->lock); | 968 | spin_unlock_bh(&ib_conn->lock); |
| 873 | } | 969 | } |
| 874 | 970 | ||
| @@ -969,7 +1065,7 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, | |||
| 969 | * perspective. */ | 1065 | * perspective. */ |
| 970 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 1066 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, |
| 971 | ISER_CONN_TERMINATING)) | 1067 | ISER_CONN_TERMINATING)) |
| 972 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | 1068 | iscsi_conn_failure(ib_conn->iscsi_conn, |
| 973 | ISCSI_ERR_CONN_FAILED); | 1069 | ISCSI_ERR_CONN_FAILED); |
| 974 | 1070 | ||
| 975 | /* no more non completed posts to the QP, complete the | 1071 | /* no more non completed posts to the QP, complete the |
| @@ -993,18 +1089,16 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index) | |||
| 993 | if (wc.status == IB_WC_SUCCESS) { | 1089 | if (wc.status == IB_WC_SUCCESS) { |
| 994 | if (wc.opcode == IB_WC_SEND) | 1090 | if (wc.opcode == IB_WC_SEND) |
| 995 | iser_snd_completion(tx_desc, ib_conn); | 1091 | iser_snd_completion(tx_desc, ib_conn); |
| 996 | else if (wc.opcode == IB_WC_LOCAL_INV || | 1092 | else |
| 997 | wc.opcode == IB_WC_FAST_REG_MR) { | ||
| 998 | atomic_dec(&ib_conn->post_send_buf_count); | ||
| 999 | continue; | ||
| 1000 | } else | ||
| 1001 | iser_err("expected opcode %d got %d\n", | 1093 | iser_err("expected opcode %d got %d\n", |
| 1002 | IB_WC_SEND, wc.opcode); | 1094 | IB_WC_SEND, wc.opcode); |
| 1003 | } else { | 1095 | } else { |
| 1004 | iser_err("tx id %llx status %d vend_err %x\n", | 1096 | iser_err("tx id %llx status %d vend_err %x\n", |
| 1005 | wc.wr_id, wc.status, wc.vendor_err); | 1097 | wc.wr_id, wc.status, wc.vendor_err); |
| 1006 | atomic_dec(&ib_conn->post_send_buf_count); | 1098 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { |
| 1007 | iser_handle_comp_error(tx_desc, ib_conn); | 1099 | atomic_dec(&ib_conn->post_send_buf_count); |
| 1100 | iser_handle_comp_error(tx_desc, ib_conn); | ||
| 1101 | } | ||
| 1008 | } | 1102 | } |
| 1009 | completed_tx++; | 1103 | completed_tx++; |
| 1010 | } | 1104 | } |
| @@ -1022,8 +1116,12 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
| 1022 | struct iser_rx_desc *desc; | 1116 | struct iser_rx_desc *desc; |
| 1023 | unsigned long xfer_len; | 1117 | unsigned long xfer_len; |
| 1024 | struct iser_conn *ib_conn; | 1118 | struct iser_conn *ib_conn; |
| 1025 | int completed_tx, completed_rx; | 1119 | int completed_tx, completed_rx = 0; |
| 1026 | completed_tx = completed_rx = 0; | 1120 | |
| 1121 | /* First do tx drain, so in a case where we have rx flushes and a successful | ||
| 1122 | * tx completion we will still go through completion error handling. | ||
| 1123 | */ | ||
| 1124 | completed_tx = iser_drain_tx_cq(device, cq_index); | ||
| 1027 | 1125 | ||
| 1028 | while (ib_poll_cq(cq, 1, &wc) == 1) { | 1126 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
| 1029 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; | 1127 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; |
| @@ -1051,7 +1149,6 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
| 1051 | * " would not cause interrupts to be missed" */ | 1149 | * " would not cause interrupts to be missed" */ |
| 1052 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 1150 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
| 1053 | 1151 | ||
| 1054 | completed_tx += iser_drain_tx_cq(device, cq_index); | ||
| 1055 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); | 1152 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); |
| 1056 | } | 1153 | } |
| 1057 | 1154 | ||
| @@ -1063,3 +1160,51 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context) | |||
| 1063 | 1160 | ||
| 1064 | tasklet_schedule(&device->cq_tasklet[cq_index]); | 1161 | tasklet_schedule(&device->cq_tasklet[cq_index]); |
| 1065 | } | 1162 | } |
| 1163 | |||
| 1164 | u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, | ||
| 1165 | enum iser_data_dir cmd_dir, sector_t *sector) | ||
| 1166 | { | ||
| 1167 | struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; | ||
| 1168 | struct fast_reg_descriptor *desc = reg->mem_h; | ||
| 1169 | unsigned long sector_size = iser_task->sc->device->sector_size; | ||
| 1170 | struct ib_mr_status mr_status; | ||
| 1171 | int ret; | ||
| 1172 | |||
| 1173 | if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) { | ||
| 1174 | desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; | ||
| 1175 | ret = ib_check_mr_status(desc->pi_ctx->sig_mr, | ||
| 1176 | IB_MR_CHECK_SIG_STATUS, &mr_status); | ||
| 1177 | if (ret) { | ||
| 1178 | pr_err("ib_check_mr_status failed, ret %d\n", ret); | ||
| 1179 | goto err; | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { | ||
| 1183 | sector_t sector_off = mr_status.sig_err.sig_err_offset; | ||
| 1184 | |||
| 1185 | do_div(sector_off, sector_size + 8); | ||
| 1186 | *sector = scsi_get_lba(iser_task->sc) + sector_off; | ||
| 1187 | |||
| 1188 | pr_err("PI error found type %d at sector %llx " | ||
| 1189 | "expected %x vs actual %x\n", | ||
| 1190 | mr_status.sig_err.err_type, | ||
| 1191 | (unsigned long long)*sector, | ||
| 1192 | mr_status.sig_err.expected, | ||
| 1193 | mr_status.sig_err.actual); | ||
| 1194 | |||
| 1195 | switch (mr_status.sig_err.err_type) { | ||
| 1196 | case IB_SIG_BAD_GUARD: | ||
| 1197 | return 0x1; | ||
| 1198 | case IB_SIG_BAD_REFTAG: | ||
| 1199 | return 0x3; | ||
| 1200 | case IB_SIG_BAD_APPTAG: | ||
| 1201 | return 0x2; | ||
| 1202 | } | ||
| 1203 | } | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | return 0; | ||
| 1207 | err: | ||
| 1208 | /* Not alot we can do here, return ambiguous guard error */ | ||
| 1209 | return 0x1; | ||
| 1210 | } | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 529b6bcdca7a..66a908bf3fb9 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -411,6 +411,8 @@ static void srp_path_rec_completion(int status, | |||
| 411 | 411 | ||
| 412 | static int srp_lookup_path(struct srp_target_port *target) | 412 | static int srp_lookup_path(struct srp_target_port *target) |
| 413 | { | 413 | { |
| 414 | int ret; | ||
| 415 | |||
| 414 | target->path.numb_path = 1; | 416 | target->path.numb_path = 1; |
| 415 | 417 | ||
| 416 | init_completion(&target->done); | 418 | init_completion(&target->done); |
| @@ -431,7 +433,9 @@ static int srp_lookup_path(struct srp_target_port *target) | |||
| 431 | if (target->path_query_id < 0) | 433 | if (target->path_query_id < 0) |
| 432 | return target->path_query_id; | 434 | return target->path_query_id; |
| 433 | 435 | ||
| 434 | wait_for_completion(&target->done); | 436 | ret = wait_for_completion_interruptible(&target->done); |
| 437 | if (ret < 0) | ||
| 438 | return ret; | ||
| 435 | 439 | ||
| 436 | if (target->status < 0) | 440 | if (target->status < 0) |
| 437 | shost_printk(KERN_WARNING, target->scsi_host, | 441 | shost_printk(KERN_WARNING, target->scsi_host, |
| @@ -710,7 +714,9 @@ static int srp_connect_target(struct srp_target_port *target) | |||
| 710 | ret = srp_send_req(target); | 714 | ret = srp_send_req(target); |
| 711 | if (ret) | 715 | if (ret) |
| 712 | return ret; | 716 | return ret; |
| 713 | wait_for_completion(&target->done); | 717 | ret = wait_for_completion_interruptible(&target->done); |
| 718 | if (ret < 0) | ||
| 719 | return ret; | ||
| 714 | 720 | ||
| 715 | /* | 721 | /* |
| 716 | * The CM event handling code will set status to | 722 | * The CM event handling code will set status to |
| @@ -777,6 +783,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
| 777 | * srp_claim_req - Take ownership of the scmnd associated with a request. | 783 | * srp_claim_req - Take ownership of the scmnd associated with a request. |
| 778 | * @target: SRP target port. | 784 | * @target: SRP target port. |
| 779 | * @req: SRP request. | 785 | * @req: SRP request. |
| 786 | * @sdev: If not NULL, only take ownership for this SCSI device. | ||
| 780 | * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take | 787 | * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take |
| 781 | * ownership of @req->scmnd if it equals @scmnd. | 788 | * ownership of @req->scmnd if it equals @scmnd. |
| 782 | * | 789 | * |
| @@ -785,16 +792,17 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
| 785 | */ | 792 | */ |
| 786 | static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, | 793 | static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, |
| 787 | struct srp_request *req, | 794 | struct srp_request *req, |
| 795 | struct scsi_device *sdev, | ||
| 788 | struct scsi_cmnd *scmnd) | 796 | struct scsi_cmnd *scmnd) |
| 789 | { | 797 | { |
| 790 | unsigned long flags; | 798 | unsigned long flags; |
| 791 | 799 | ||
| 792 | spin_lock_irqsave(&target->lock, flags); | 800 | spin_lock_irqsave(&target->lock, flags); |
| 793 | if (!scmnd) { | 801 | if (req->scmnd && |
| 802 | (!sdev || req->scmnd->device == sdev) && | ||
| 803 | (!scmnd || req->scmnd == scmnd)) { | ||
| 794 | scmnd = req->scmnd; | 804 | scmnd = req->scmnd; |
| 795 | req->scmnd = NULL; | 805 | req->scmnd = NULL; |
| 796 | } else if (req->scmnd == scmnd) { | ||
| 797 | req->scmnd = NULL; | ||
| 798 | } else { | 806 | } else { |
| 799 | scmnd = NULL; | 807 | scmnd = NULL; |
| 800 | } | 808 | } |
| @@ -821,9 +829,10 @@ static void srp_free_req(struct srp_target_port *target, | |||
| 821 | } | 829 | } |
| 822 | 830 | ||
| 823 | static void srp_finish_req(struct srp_target_port *target, | 831 | static void srp_finish_req(struct srp_target_port *target, |
| 824 | struct srp_request *req, int result) | 832 | struct srp_request *req, struct scsi_device *sdev, |
| 833 | int result) | ||
| 825 | { | 834 | { |
| 826 | struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); | 835 | struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL); |
| 827 | 836 | ||
| 828 | if (scmnd) { | 837 | if (scmnd) { |
| 829 | srp_free_req(target, req, scmnd, 0); | 838 | srp_free_req(target, req, scmnd, 0); |
| @@ -835,11 +844,20 @@ static void srp_finish_req(struct srp_target_port *target, | |||
| 835 | static void srp_terminate_io(struct srp_rport *rport) | 844 | static void srp_terminate_io(struct srp_rport *rport) |
| 836 | { | 845 | { |
| 837 | struct srp_target_port *target = rport->lld_data; | 846 | struct srp_target_port *target = rport->lld_data; |
| 847 | struct Scsi_Host *shost = target->scsi_host; | ||
| 848 | struct scsi_device *sdev; | ||
| 838 | int i; | 849 | int i; |
| 839 | 850 | ||
| 851 | /* | ||
| 852 | * Invoking srp_terminate_io() while srp_queuecommand() is running | ||
| 853 | * is not safe. Hence the warning statement below. | ||
| 854 | */ | ||
| 855 | shost_for_each_device(sdev, shost) | ||
| 856 | WARN_ON_ONCE(sdev->request_queue->request_fn_active); | ||
| 857 | |||
| 840 | for (i = 0; i < target->req_ring_size; ++i) { | 858 | for (i = 0; i < target->req_ring_size; ++i) { |
| 841 | struct srp_request *req = &target->req_ring[i]; | 859 | struct srp_request *req = &target->req_ring[i]; |
| 842 | srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16); | 860 | srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16); |
| 843 | } | 861 | } |
| 844 | } | 862 | } |
| 845 | 863 | ||
| @@ -876,7 +894,7 @@ static int srp_rport_reconnect(struct srp_rport *rport) | |||
| 876 | 894 | ||
| 877 | for (i = 0; i < target->req_ring_size; ++i) { | 895 | for (i = 0; i < target->req_ring_size; ++i) { |
| 878 | struct srp_request *req = &target->req_ring[i]; | 896 | struct srp_request *req = &target->req_ring[i]; |
| 879 | srp_finish_req(target, req, DID_RESET << 16); | 897 | srp_finish_req(target, req, NULL, DID_RESET << 16); |
| 880 | } | 898 | } |
| 881 | 899 | ||
| 882 | INIT_LIST_HEAD(&target->free_tx); | 900 | INIT_LIST_HEAD(&target->free_tx); |
| @@ -1284,7 +1302,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
| 1284 | complete(&target->tsk_mgmt_done); | 1302 | complete(&target->tsk_mgmt_done); |
| 1285 | } else { | 1303 | } else { |
| 1286 | req = &target->req_ring[rsp->tag]; | 1304 | req = &target->req_ring[rsp->tag]; |
| 1287 | scmnd = srp_claim_req(target, req, NULL); | 1305 | scmnd = srp_claim_req(target, req, NULL, NULL); |
| 1288 | if (!scmnd) { | 1306 | if (!scmnd) { |
| 1289 | shost_printk(KERN_ERR, target->scsi_host, | 1307 | shost_printk(KERN_ERR, target->scsi_host, |
| 1290 | "Null scmnd for RSP w/tag %016llx\n", | 1308 | "Null scmnd for RSP w/tag %016llx\n", |
| @@ -1804,8 +1822,10 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |||
| 1804 | shost_printk(KERN_WARNING, shost, | 1822 | shost_printk(KERN_WARNING, shost, |
| 1805 | PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); | 1823 | PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); |
| 1806 | else | 1824 | else |
| 1807 | shost_printk(KERN_WARNING, shost, | 1825 | shost_printk(KERN_WARNING, shost, PFX |
| 1808 | PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); | 1826 | "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", |
| 1827 | target->path.sgid.raw, | ||
| 1828 | target->orig_dgid, reason); | ||
| 1809 | } else | 1829 | } else |
| 1810 | shost_printk(KERN_WARNING, shost, | 1830 | shost_printk(KERN_WARNING, shost, |
| 1811 | " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," | 1831 | " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," |
| @@ -1863,6 +1883,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
| 1863 | case IB_CM_TIMEWAIT_EXIT: | 1883 | case IB_CM_TIMEWAIT_EXIT: |
| 1864 | shost_printk(KERN_ERR, target->scsi_host, | 1884 | shost_printk(KERN_ERR, target->scsi_host, |
| 1865 | PFX "connection closed\n"); | 1885 | PFX "connection closed\n"); |
| 1886 | comp = 1; | ||
| 1866 | 1887 | ||
| 1867 | target->status = 0; | 1888 | target->status = 0; |
| 1868 | break; | 1889 | break; |
| @@ -1999,7 +2020,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
| 1999 | 2020 | ||
| 2000 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | 2021 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); |
| 2001 | 2022 | ||
| 2002 | if (!req || !srp_claim_req(target, req, scmnd)) | 2023 | if (!req || !srp_claim_req(target, req, NULL, scmnd)) |
| 2003 | return SUCCESS; | 2024 | return SUCCESS; |
| 2004 | if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | 2025 | if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, |
| 2005 | SRP_TSK_ABORT_TASK) == 0) | 2026 | SRP_TSK_ABORT_TASK) == 0) |
| @@ -2030,8 +2051,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
| 2030 | 2051 | ||
| 2031 | for (i = 0; i < target->req_ring_size; ++i) { | 2052 | for (i = 0; i < target->req_ring_size; ++i) { |
| 2032 | struct srp_request *req = &target->req_ring[i]; | 2053 | struct srp_request *req = &target->req_ring[i]; |
| 2033 | if (req->scmnd && req->scmnd->device == scmnd->device) | 2054 | srp_finish_req(target, req, scmnd->device, DID_RESET << 16); |
| 2034 | srp_finish_req(target, req, DID_RESET << 16); | ||
| 2035 | } | 2055 | } |
| 2036 | 2056 | ||
| 2037 | return SUCCESS; | 2057 | return SUCCESS; |
| @@ -2612,6 +2632,8 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 2612 | target->tl_retry_count = 7; | 2632 | target->tl_retry_count = 7; |
| 2613 | target->queue_size = SRP_DEFAULT_QUEUE_SIZE; | 2633 | target->queue_size = SRP_DEFAULT_QUEUE_SIZE; |
| 2614 | 2634 | ||
| 2635 | mutex_lock(&host->add_target_mutex); | ||
| 2636 | |||
| 2615 | ret = srp_parse_options(buf, target); | 2637 | ret = srp_parse_options(buf, target); |
| 2616 | if (ret) | 2638 | if (ret) |
| 2617 | goto err; | 2639 | goto err; |
| @@ -2649,16 +2671,9 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 2649 | if (ret) | 2671 | if (ret) |
| 2650 | goto err_free_mem; | 2672 | goto err_free_mem; |
| 2651 | 2673 | ||
| 2652 | ib_query_gid(ibdev, host->port, 0, &target->path.sgid); | 2674 | ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid); |
| 2653 | 2675 | if (ret) | |
| 2654 | shost_printk(KERN_DEBUG, target->scsi_host, PFX | 2676 | goto err_free_mem; |
| 2655 | "new target: id_ext %016llx ioc_guid %016llx pkey %04x " | ||
| 2656 | "service_id %016llx dgid %pI6\n", | ||
| 2657 | (unsigned long long) be64_to_cpu(target->id_ext), | ||
| 2658 | (unsigned long long) be64_to_cpu(target->ioc_guid), | ||
| 2659 | be16_to_cpu(target->path.pkey), | ||
| 2660 | (unsigned long long) be64_to_cpu(target->service_id), | ||
| 2661 | target->path.dgid.raw); | ||
| 2662 | 2677 | ||
| 2663 | ret = srp_create_target_ib(target); | 2678 | ret = srp_create_target_ib(target); |
| 2664 | if (ret) | 2679 | if (ret) |
| @@ -2679,7 +2694,19 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 2679 | if (ret) | 2694 | if (ret) |
| 2680 | goto err_disconnect; | 2695 | goto err_disconnect; |
| 2681 | 2696 | ||
| 2682 | return count; | 2697 | shost_printk(KERN_DEBUG, target->scsi_host, PFX |
| 2698 | "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", | ||
| 2699 | be64_to_cpu(target->id_ext), | ||
| 2700 | be64_to_cpu(target->ioc_guid), | ||
| 2701 | be16_to_cpu(target->path.pkey), | ||
| 2702 | be64_to_cpu(target->service_id), | ||
| 2703 | target->path.sgid.raw, target->path.dgid.raw); | ||
| 2704 | |||
| 2705 | ret = count; | ||
| 2706 | |||
| 2707 | out: | ||
| 2708 | mutex_unlock(&host->add_target_mutex); | ||
| 2709 | return ret; | ||
| 2683 | 2710 | ||
| 2684 | err_disconnect: | 2711 | err_disconnect: |
| 2685 | srp_disconnect_target(target); | 2712 | srp_disconnect_target(target); |
| @@ -2695,8 +2722,7 @@ err_free_mem: | |||
| 2695 | 2722 | ||
| 2696 | err: | 2723 | err: |
| 2697 | scsi_host_put(target_host); | 2724 | scsi_host_put(target_host); |
| 2698 | 2725 | goto out; | |
| 2699 | return ret; | ||
| 2700 | } | 2726 | } |
| 2701 | 2727 | ||
| 2702 | static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); | 2728 | static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); |
| @@ -2732,6 +2758,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port) | |||
| 2732 | INIT_LIST_HEAD(&host->target_list); | 2758 | INIT_LIST_HEAD(&host->target_list); |
| 2733 | spin_lock_init(&host->target_lock); | 2759 | spin_lock_init(&host->target_lock); |
| 2734 | init_completion(&host->released); | 2760 | init_completion(&host->released); |
| 2761 | mutex_init(&host->add_target_mutex); | ||
| 2735 | host->srp_dev = device; | 2762 | host->srp_dev = device; |
| 2736 | host->port = port; | 2763 | host->port = port; |
| 2737 | 2764 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 575681063f38..aad27b7b4a46 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
| @@ -105,6 +105,7 @@ struct srp_host { | |||
| 105 | spinlock_t target_lock; | 105 | spinlock_t target_lock; |
| 106 | struct completion released; | 106 | struct completion released; |
| 107 | struct list_head list; | 107 | struct list_head list; |
| 108 | struct mutex add_target_mutex; | ||
| 108 | }; | 109 | }; |
| 109 | 110 | ||
| 110 | struct srp_request { | 111 | struct srp_request { |
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 9cd5415fe017..aa7f94375108 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c | |||
| @@ -35,6 +35,12 @@ static void _be_roce_dev_add(struct be_adapter *adapter) | |||
| 35 | 35 | ||
| 36 | if (!ocrdma_drv) | 36 | if (!ocrdma_drv) |
| 37 | return; | 37 | return; |
| 38 | |||
| 39 | if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) { | ||
| 40 | dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n"); | ||
| 41 | return; | ||
| 42 | } | ||
| 43 | |||
| 38 | if (pdev->device == OC_DEVICE_ID5) { | 44 | if (pdev->device == OC_DEVICE_ID5) { |
| 39 | /* only msix is supported on these devices */ | 45 | /* only msix is supported on these devices */ |
| 40 | if (!msix_enabled(adapter)) | 46 | if (!msix_enabled(adapter)) |
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index 2cd1129e19af..1bfb16164df8 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
| 22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
| 23 | 23 | ||
| 24 | #define BE_ROCE_ABI_VERSION 1 | ||
| 25 | |||
| 24 | struct ocrdma_dev; | 26 | struct ocrdma_dev; |
| 25 | 27 | ||
| 26 | enum be_interrupt_mode { | 28 | enum be_interrupt_mode { |
| @@ -52,6 +54,7 @@ struct be_dev_info { | |||
| 52 | /* ocrdma driver register's the callback functions with nic driver. */ | 54 | /* ocrdma driver register's the callback functions with nic driver. */ |
| 53 | struct ocrdma_driver { | 55 | struct ocrdma_driver { |
| 54 | unsigned char name[32]; | 56 | unsigned char name[32]; |
| 57 | u32 be_abi_version; | ||
| 55 | struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); | 58 | struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); |
| 56 | void (*remove) (struct ocrdma_dev *); | 59 | void (*remove) (struct ocrdma_dev *); |
| 57 | void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); | 60 | void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a064f06e0cb8..96a0617f7609 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -446,6 +446,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | |||
| 446 | mlx5_init_cq_table(dev); | 446 | mlx5_init_cq_table(dev); |
| 447 | mlx5_init_qp_table(dev); | 447 | mlx5_init_qp_table(dev); |
| 448 | mlx5_init_srq_table(dev); | 448 | mlx5_init_srq_table(dev); |
| 449 | mlx5_init_mr_table(dev); | ||
| 449 | 450 | ||
| 450 | return 0; | 451 | return 0; |
| 451 | 452 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 35e514dc7b7d..4cc927649404 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c | |||
| @@ -36,11 +36,24 @@ | |||
| 36 | #include <linux/mlx5/cmd.h> | 36 | #include <linux/mlx5/cmd.h> |
| 37 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
| 38 | 38 | ||
| 39 | void mlx5_init_mr_table(struct mlx5_core_dev *dev) | ||
| 40 | { | ||
| 41 | struct mlx5_mr_table *table = &dev->priv.mr_table; | ||
| 42 | |||
| 43 | rwlock_init(&table->lock); | ||
| 44 | INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); | ||
| 45 | } | ||
| 46 | |||
| 47 | void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev) | ||
| 48 | { | ||
| 49 | } | ||
| 50 | |||
| 39 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | 51 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, |
| 40 | struct mlx5_create_mkey_mbox_in *in, int inlen, | 52 | struct mlx5_create_mkey_mbox_in *in, int inlen, |
| 41 | mlx5_cmd_cbk_t callback, void *context, | 53 | mlx5_cmd_cbk_t callback, void *context, |
| 42 | struct mlx5_create_mkey_mbox_out *out) | 54 | struct mlx5_create_mkey_mbox_out *out) |
| 43 | { | 55 | { |
| 56 | struct mlx5_mr_table *table = &dev->priv.mr_table; | ||
| 44 | struct mlx5_create_mkey_mbox_out lout; | 57 | struct mlx5_create_mkey_mbox_out lout; |
| 45 | int err; | 58 | int err; |
| 46 | u8 key; | 59 | u8 key; |
| @@ -73,14 +86,21 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | |||
| 73 | mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", | 86 | mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", |
| 74 | be32_to_cpu(lout.mkey), key, mr->key); | 87 | be32_to_cpu(lout.mkey), key, mr->key); |
| 75 | 88 | ||
| 89 | /* connect to MR tree */ | ||
| 90 | write_lock_irq(&table->lock); | ||
| 91 | err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); | ||
| 92 | write_unlock_irq(&table->lock); | ||
| 93 | |||
| 76 | return err; | 94 | return err; |
| 77 | } | 95 | } |
| 78 | EXPORT_SYMBOL(mlx5_core_create_mkey); | 96 | EXPORT_SYMBOL(mlx5_core_create_mkey); |
| 79 | 97 | ||
| 80 | int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) | 98 | int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) |
| 81 | { | 99 | { |
| 100 | struct mlx5_mr_table *table = &dev->priv.mr_table; | ||
| 82 | struct mlx5_destroy_mkey_mbox_in in; | 101 | struct mlx5_destroy_mkey_mbox_in in; |
| 83 | struct mlx5_destroy_mkey_mbox_out out; | 102 | struct mlx5_destroy_mkey_mbox_out out; |
| 103 | unsigned long flags; | ||
| 84 | int err; | 104 | int err; |
| 85 | 105 | ||
| 86 | memset(&in, 0, sizeof(in)); | 106 | memset(&in, 0, sizeof(in)); |
| @@ -95,6 +115,10 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) | |||
| 95 | if (out.hdr.status) | 115 | if (out.hdr.status) |
| 96 | return mlx5_cmd_status_to_err(&out.hdr); | 116 | return mlx5_cmd_status_to_err(&out.hdr); |
| 97 | 117 | ||
| 118 | write_lock_irqsave(&table->lock, flags); | ||
| 119 | radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); | ||
| 120 | write_unlock_irqrestore(&table->lock, flags); | ||
| 121 | |||
| 98 | return err; | 122 | return err; |
| 99 | } | 123 | } |
| 100 | EXPORT_SYMBOL(mlx5_core_destroy_mkey); | 124 | EXPORT_SYMBOL(mlx5_core_destroy_mkey); |
| @@ -144,3 +168,64 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | |||
| 144 | return err; | 168 | return err; |
| 145 | } | 169 | } |
| 146 | EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); | 170 | EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); |
| 171 | |||
| 172 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, | ||
| 173 | int npsvs, u32 *sig_index) | ||
| 174 | { | ||
| 175 | struct mlx5_allocate_psv_in in; | ||
| 176 | struct mlx5_allocate_psv_out out; | ||
| 177 | int i, err; | ||
| 178 | |||
| 179 | if (npsvs > MLX5_MAX_PSVS) | ||
| 180 | return -EINVAL; | ||
| 181 | |||
| 182 | memset(&in, 0, sizeof(in)); | ||
| 183 | memset(&out, 0, sizeof(out)); | ||
| 184 | |||
| 185 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); | ||
| 186 | in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); | ||
| 187 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 188 | if (err) { | ||
| 189 | mlx5_core_err(dev, "cmd exec failed %d\n", err); | ||
| 190 | return err; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (out.hdr.status) { | ||
| 194 | mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); | ||
| 195 | return mlx5_cmd_status_to_err(&out.hdr); | ||
| 196 | } | ||
| 197 | |||
| 198 | for (i = 0; i < npsvs; i++) | ||
| 199 | sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; | ||
| 200 | |||
| 201 | return err; | ||
| 202 | } | ||
| 203 | EXPORT_SYMBOL(mlx5_core_create_psv); | ||
| 204 | |||
| 205 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) | ||
| 206 | { | ||
| 207 | struct mlx5_destroy_psv_in in; | ||
| 208 | struct mlx5_destroy_psv_out out; | ||
| 209 | int err; | ||
| 210 | |||
| 211 | memset(&in, 0, sizeof(in)); | ||
| 212 | memset(&out, 0, sizeof(out)); | ||
| 213 | |||
| 214 | in.psv_number = cpu_to_be32(psv_num); | ||
| 215 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); | ||
| 216 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 217 | if (err) { | ||
| 218 | mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); | ||
| 219 | goto out; | ||
| 220 | } | ||
| 221 | |||
| 222 | if (out.hdr.status) { | ||
| 223 | mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); | ||
| 224 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
| 225 | goto out; | ||
| 226 | } | ||
| 227 | |||
| 228 | out: | ||
| 229 | return err; | ||
| 230 | } | ||
| 231 | EXPORT_SYMBOL(mlx5_core_destroy_psv); | ||
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 40462415291e..3c11acf67849 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -395,6 +395,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) | |||
| 395 | if (rc) | 395 | if (rc) |
| 396 | return rc; | 396 | return rc; |
| 397 | } | 397 | } |
| 398 | |||
| 399 | if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) | ||
| 400 | task->protected = true; | ||
| 401 | |||
| 398 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | 402 | if (sc->sc_data_direction == DMA_TO_DEVICE) { |
| 399 | unsigned out_len = scsi_out(sc)->length; | 403 | unsigned out_len = scsi_out(sc)->length; |
| 400 | struct iscsi_r2t_info *r2t = &task->unsol_r2t; | 404 | struct iscsi_r2t_info *r2t = &task->unsol_r2t; |
| @@ -823,6 +827,33 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
| 823 | 827 | ||
| 824 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | 828 | sc->result = (DID_OK << 16) | rhdr->cmd_status; |
| 825 | 829 | ||
| 830 | if (task->protected) { | ||
| 831 | sector_t sector; | ||
| 832 | u8 ascq; | ||
| 833 | |||
| 834 | /** | ||
| 835 | * Transports that didn't implement check_protection | ||
| 836 | * callback but still published T10-PI support to scsi-mid | ||
| 837 | * deserve this BUG_ON. | ||
| 838 | **/ | ||
| 839 | BUG_ON(!session->tt->check_protection); | ||
| 840 | |||
| 841 | ascq = session->tt->check_protection(task, §or); | ||
| 842 | if (ascq) { | ||
| 843 | sc->result = DRIVER_SENSE << 24 | | ||
| 844 | SAM_STAT_CHECK_CONDITION; | ||
| 845 | scsi_build_sense_buffer(1, sc->sense_buffer, | ||
| 846 | ILLEGAL_REQUEST, 0x10, ascq); | ||
| 847 | sc->sense_buffer[7] = 0xc; /* Additional sense length */ | ||
| 848 | sc->sense_buffer[8] = 0; /* Information desc type */ | ||
| 849 | sc->sense_buffer[9] = 0xa; /* Additional desc length */ | ||
| 850 | sc->sense_buffer[10] = 0x80; /* Validity bit */ | ||
| 851 | |||
| 852 | put_unaligned_be64(sector, &sc->sense_buffer[12]); | ||
| 853 | goto out; | ||
| 854 | } | ||
| 855 | } | ||
| 856 | |||
| 826 | if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { | 857 | if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { |
| 827 | sc->result = DID_ERROR << 16; | 858 | sc->result = DID_ERROR << 16; |
| 828 | goto out; | 859 | goto out; |
| @@ -1567,6 +1598,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, | |||
| 1567 | task->have_checked_conn = false; | 1598 | task->have_checked_conn = false; |
| 1568 | task->last_timeout = jiffies; | 1599 | task->last_timeout = jiffies; |
| 1569 | task->last_xfer = jiffies; | 1600 | task->last_xfer = jiffies; |
| 1601 | task->protected = false; | ||
| 1570 | INIT_LIST_HEAD(&task->running); | 1602 | INIT_LIST_HEAD(&task->running); |
| 1571 | return task; | 1603 | return task; |
| 1572 | } | 1604 | } |
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index d47ffc8d3e43..13e898332e45 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c | |||
| @@ -810,6 +810,7 @@ EXPORT_SYMBOL_GPL(srp_remove_host); | |||
| 810 | 810 | ||
| 811 | /** | 811 | /** |
| 812 | * srp_stop_rport_timers - stop the transport layer recovery timers | 812 | * srp_stop_rport_timers - stop the transport layer recovery timers |
| 813 | * @rport: SRP remote port for which to stop the timers. | ||
| 813 | * | 814 | * |
| 814 | * Must be called after srp_remove_host() and scsi_remove_host(). The caller | 815 | * Must be called after srp_remove_host() and scsi_remove_host(). The caller |
| 815 | * must hold a reference on the rport (rport->dev) and on the SCSI host | 816 | * must hold a reference on the rport (rport->dev) and on the SCSI host |
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 2202c7f72b75..f6b17ac601bd 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h | |||
| @@ -80,6 +80,7 @@ enum { | |||
| 80 | MLX5_CQE_RESP_SEND_IMM = 3, | 80 | MLX5_CQE_RESP_SEND_IMM = 3, |
| 81 | MLX5_CQE_RESP_SEND_INV = 4, | 81 | MLX5_CQE_RESP_SEND_INV = 4, |
| 82 | MLX5_CQE_RESIZE_CQ = 5, | 82 | MLX5_CQE_RESIZE_CQ = 5, |
| 83 | MLX5_CQE_SIG_ERR = 12, | ||
| 83 | MLX5_CQE_REQ_ERR = 13, | 84 | MLX5_CQE_REQ_ERR = 13, |
| 84 | MLX5_CQE_RESP_ERR = 14, | 85 | MLX5_CQE_RESP_ERR = 14, |
| 85 | MLX5_CQE_INVALID = 15, | 86 | MLX5_CQE_INVALID = 15, |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 817a6fae6d2c..407bdb67fd4f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -48,6 +48,8 @@ enum { | |||
| 48 | MLX5_MAX_COMMANDS = 32, | 48 | MLX5_MAX_COMMANDS = 32, |
| 49 | MLX5_CMD_DATA_BLOCK_SIZE = 512, | 49 | MLX5_CMD_DATA_BLOCK_SIZE = 512, |
| 50 | MLX5_PCI_CMD_XPORT = 7, | 50 | MLX5_PCI_CMD_XPORT = 7, |
| 51 | MLX5_MKEY_BSF_OCTO_SIZE = 4, | ||
| 52 | MLX5_MAX_PSVS = 4, | ||
| 51 | }; | 53 | }; |
| 52 | 54 | ||
| 53 | enum { | 55 | enum { |
| @@ -116,6 +118,7 @@ enum { | |||
| 116 | MLX5_MKEY_MASK_START_ADDR = 1ull << 6, | 118 | MLX5_MKEY_MASK_START_ADDR = 1ull << 6, |
| 117 | MLX5_MKEY_MASK_PD = 1ull << 7, | 119 | MLX5_MKEY_MASK_PD = 1ull << 7, |
| 118 | MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, | 120 | MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, |
| 121 | MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, | ||
| 119 | MLX5_MKEY_MASK_BSF_EN = 1ull << 12, | 122 | MLX5_MKEY_MASK_BSF_EN = 1ull << 12, |
| 120 | MLX5_MKEY_MASK_KEY = 1ull << 13, | 123 | MLX5_MKEY_MASK_KEY = 1ull << 13, |
| 121 | MLX5_MKEY_MASK_QPN = 1ull << 14, | 124 | MLX5_MKEY_MASK_QPN = 1ull << 14, |
| @@ -555,6 +558,23 @@ struct mlx5_cqe64 { | |||
| 555 | u8 op_own; | 558 | u8 op_own; |
| 556 | }; | 559 | }; |
| 557 | 560 | ||
| 561 | struct mlx5_sig_err_cqe { | ||
| 562 | u8 rsvd0[16]; | ||
| 563 | __be32 expected_trans_sig; | ||
| 564 | __be32 actual_trans_sig; | ||
| 565 | __be32 expected_reftag; | ||
| 566 | __be32 actual_reftag; | ||
| 567 | __be16 syndrome; | ||
| 568 | u8 rsvd22[2]; | ||
| 569 | __be32 mkey; | ||
| 570 | __be64 err_offset; | ||
| 571 | u8 rsvd30[8]; | ||
| 572 | __be32 qpn; | ||
| 573 | u8 rsvd38[2]; | ||
| 574 | u8 signature; | ||
| 575 | u8 op_own; | ||
| 576 | }; | ||
| 577 | |||
| 558 | struct mlx5_wqe_srq_next_seg { | 578 | struct mlx5_wqe_srq_next_seg { |
| 559 | u8 rsvd0[2]; | 579 | u8 rsvd0[2]; |
| 560 | __be16 next_wqe_index; | 580 | __be16 next_wqe_index; |
| @@ -936,4 +956,27 @@ enum { | |||
| 936 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 | 956 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 |
| 937 | }; | 957 | }; |
| 938 | 958 | ||
| 959 | struct mlx5_allocate_psv_in { | ||
| 960 | struct mlx5_inbox_hdr hdr; | ||
| 961 | __be32 npsv_pd; | ||
| 962 | __be32 rsvd_psv0; | ||
| 963 | }; | ||
| 964 | |||
| 965 | struct mlx5_allocate_psv_out { | ||
| 966 | struct mlx5_outbox_hdr hdr; | ||
| 967 | u8 rsvd[8]; | ||
| 968 | __be32 psv_idx[4]; | ||
| 969 | }; | ||
| 970 | |||
| 971 | struct mlx5_destroy_psv_in { | ||
| 972 | struct mlx5_inbox_hdr hdr; | ||
| 973 | __be32 psv_number; | ||
| 974 | u8 rsvd[4]; | ||
| 975 | }; | ||
| 976 | |||
| 977 | struct mlx5_destroy_psv_out { | ||
| 978 | struct mlx5_outbox_hdr hdr; | ||
| 979 | u8 rsvd[8]; | ||
| 980 | }; | ||
| 981 | |||
| 939 | #endif /* MLX5_DEVICE_H */ | 982 | #endif /* MLX5_DEVICE_H */ |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 130bc8d77fa5..93cef6313e72 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -401,6 +401,26 @@ struct mlx5_eq { | |||
| 401 | struct mlx5_rsc_debug *dbg; | 401 | struct mlx5_rsc_debug *dbg; |
| 402 | }; | 402 | }; |
| 403 | 403 | ||
| 404 | struct mlx5_core_psv { | ||
| 405 | u32 psv_idx; | ||
| 406 | struct psv_layout { | ||
| 407 | u32 pd; | ||
| 408 | u16 syndrome; | ||
| 409 | u16 reserved; | ||
| 410 | u16 bg; | ||
| 411 | u16 app_tag; | ||
| 412 | u32 ref_tag; | ||
| 413 | } psv; | ||
| 414 | }; | ||
| 415 | |||
| 416 | struct mlx5_core_sig_ctx { | ||
| 417 | struct mlx5_core_psv psv_memory; | ||
| 418 | struct mlx5_core_psv psv_wire; | ||
| 419 | struct ib_sig_err err_item; | ||
| 420 | bool sig_status_checked; | ||
| 421 | bool sig_err_exists; | ||
| 422 | u32 sigerr_count; | ||
| 423 | }; | ||
| 404 | 424 | ||
| 405 | struct mlx5_core_mr { | 425 | struct mlx5_core_mr { |
| 406 | u64 iova; | 426 | u64 iova; |
| @@ -475,6 +495,13 @@ struct mlx5_srq_table { | |||
| 475 | struct radix_tree_root tree; | 495 | struct radix_tree_root tree; |
| 476 | }; | 496 | }; |
| 477 | 497 | ||
| 498 | struct mlx5_mr_table { | ||
| 499 | /* protect radix tree | ||
| 500 | */ | ||
| 501 | rwlock_t lock; | ||
| 502 | struct radix_tree_root tree; | ||
| 503 | }; | ||
| 504 | |||
| 478 | struct mlx5_priv { | 505 | struct mlx5_priv { |
| 479 | char name[MLX5_MAX_NAME_LEN]; | 506 | char name[MLX5_MAX_NAME_LEN]; |
| 480 | struct mlx5_eq_table eq_table; | 507 | struct mlx5_eq_table eq_table; |
| @@ -504,6 +531,10 @@ struct mlx5_priv { | |||
| 504 | struct mlx5_cq_table cq_table; | 531 | struct mlx5_cq_table cq_table; |
| 505 | /* end: cq staff */ | 532 | /* end: cq staff */ |
| 506 | 533 | ||
| 534 | /* start: mr staff */ | ||
| 535 | struct mlx5_mr_table mr_table; | ||
| 536 | /* end: mr staff */ | ||
| 537 | |||
| 507 | /* start: alloc staff */ | 538 | /* start: alloc staff */ |
| 508 | struct mutex pgdir_mutex; | 539 | struct mutex pgdir_mutex; |
| 509 | struct list_head pgdir_list; | 540 | struct list_head pgdir_list; |
| @@ -651,6 +682,11 @@ static inline void mlx5_vfree(const void *addr) | |||
| 651 | kfree(addr); | 682 | kfree(addr); |
| 652 | } | 683 | } |
| 653 | 684 | ||
| 685 | static inline u32 mlx5_base_mkey(const u32 key) | ||
| 686 | { | ||
| 687 | return key & 0xffffff00u; | ||
| 688 | } | ||
| 689 | |||
| 654 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); | 690 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); |
| 655 | void mlx5_dev_cleanup(struct mlx5_core_dev *dev); | 691 | void mlx5_dev_cleanup(struct mlx5_core_dev *dev); |
| 656 | int mlx5_cmd_init(struct mlx5_core_dev *dev); | 692 | int mlx5_cmd_init(struct mlx5_core_dev *dev); |
| @@ -685,6 +721,8 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | |||
| 685 | struct mlx5_query_srq_mbox_out *out); | 721 | struct mlx5_query_srq_mbox_out *out); |
| 686 | int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | 722 | int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
| 687 | u16 lwm, int is_srq); | 723 | u16 lwm, int is_srq); |
| 724 | void mlx5_init_mr_table(struct mlx5_core_dev *dev); | ||
| 725 | void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); | ||
| 688 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | 726 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, |
| 689 | struct mlx5_create_mkey_mbox_in *in, int inlen, | 727 | struct mlx5_create_mkey_mbox_in *in, int inlen, |
| 690 | mlx5_cmd_cbk_t callback, void *context, | 728 | mlx5_cmd_cbk_t callback, void *context, |
| @@ -746,6 +784,9 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); | |||
| 746 | const char *mlx5_command_str(int command); | 784 | const char *mlx5_command_str(int command); |
| 747 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); | 785 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
| 748 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); | 786 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
| 787 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, | ||
| 788 | int npsvs, u32 *sig_index); | ||
| 789 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); | ||
| 749 | 790 | ||
| 750 | static inline u32 mlx5_mkey_to_idx(u32 mkey) | 791 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
| 751 | { | 792 | { |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index d51eff713549..f829ad80ff28 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -37,6 +37,9 @@ | |||
| 37 | #include <linux/mlx5/driver.h> | 37 | #include <linux/mlx5/driver.h> |
| 38 | 38 | ||
| 39 | #define MLX5_INVALID_LKEY 0x100 | 39 | #define MLX5_INVALID_LKEY 0x100 |
| 40 | #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) | ||
| 41 | #define MLX5_DIF_SIZE 8 | ||
| 42 | #define MLX5_STRIDE_BLOCK_OP 0x400 | ||
| 40 | 43 | ||
| 41 | enum mlx5_qp_optpar { | 44 | enum mlx5_qp_optpar { |
| 42 | MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | 45 | MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, |
| @@ -151,6 +154,11 @@ enum { | |||
| 151 | MLX5_SND_DBR = 1, | 154 | MLX5_SND_DBR = 1, |
| 152 | }; | 155 | }; |
| 153 | 156 | ||
| 157 | enum { | ||
| 158 | MLX5_FLAGS_INLINE = 1<<7, | ||
| 159 | MLX5_FLAGS_CHECK_FREE = 1<<5, | ||
| 160 | }; | ||
| 161 | |||
| 154 | struct mlx5_wqe_fmr_seg { | 162 | struct mlx5_wqe_fmr_seg { |
| 155 | __be32 flags; | 163 | __be32 flags; |
| 156 | __be32 mem_key; | 164 | __be32 mem_key; |
| @@ -278,6 +286,60 @@ struct mlx5_wqe_inline_seg { | |||
| 278 | __be32 byte_count; | 286 | __be32 byte_count; |
| 279 | }; | 287 | }; |
| 280 | 288 | ||
| 289 | struct mlx5_bsf { | ||
| 290 | struct mlx5_bsf_basic { | ||
| 291 | u8 bsf_size_sbs; | ||
| 292 | u8 check_byte_mask; | ||
| 293 | union { | ||
| 294 | u8 copy_byte_mask; | ||
| 295 | u8 bs_selector; | ||
| 296 | u8 rsvd_wflags; | ||
| 297 | } wire; | ||
| 298 | union { | ||
| 299 | u8 bs_selector; | ||
| 300 | u8 rsvd_mflags; | ||
| 301 | } mem; | ||
| 302 | __be32 raw_data_size; | ||
| 303 | __be32 w_bfs_psv; | ||
| 304 | __be32 m_bfs_psv; | ||
| 305 | } basic; | ||
| 306 | struct mlx5_bsf_ext { | ||
| 307 | __be32 t_init_gen_pro_size; | ||
| 308 | __be32 rsvd_epi_size; | ||
| 309 | __be32 w_tfs_psv; | ||
| 310 | __be32 m_tfs_psv; | ||
| 311 | } ext; | ||
| 312 | struct mlx5_bsf_inl { | ||
| 313 | __be32 w_inl_vld; | ||
| 314 | __be32 w_rsvd; | ||
| 315 | __be64 w_block_format; | ||
| 316 | __be32 m_inl_vld; | ||
| 317 | __be32 m_rsvd; | ||
| 318 | __be64 m_block_format; | ||
| 319 | } inl; | ||
| 320 | }; | ||
| 321 | |||
| 322 | struct mlx5_klm { | ||
| 323 | __be32 bcount; | ||
| 324 | __be32 key; | ||
| 325 | __be64 va; | ||
| 326 | }; | ||
| 327 | |||
| 328 | struct mlx5_stride_block_entry { | ||
| 329 | __be16 stride; | ||
| 330 | __be16 bcount; | ||
| 331 | __be32 key; | ||
| 332 | __be64 va; | ||
| 333 | }; | ||
| 334 | |||
| 335 | struct mlx5_stride_block_ctrl_seg { | ||
| 336 | __be32 bcount_per_cycle; | ||
| 337 | __be32 op; | ||
| 338 | __be32 repeat_count; | ||
| 339 | u16 rsvd; | ||
| 340 | __be16 num_entries; | ||
| 341 | }; | ||
| 342 | |||
| 281 | struct mlx5_core_qp { | 343 | struct mlx5_core_qp { |
| 282 | void (*event) (struct mlx5_core_qp *, int); | 344 | void (*event) (struct mlx5_core_qp *, int); |
| 283 | int qpn; | 345 | int qpn; |
| @@ -444,6 +506,11 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u | |||
| 444 | return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); | 506 | return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); |
| 445 | } | 507 | } |
| 446 | 508 | ||
| 509 | static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) | ||
| 510 | { | ||
| 511 | return radix_tree_lookup(&dev->priv.mr_table.tree, key); | ||
| 512 | } | ||
| 513 | |||
| 447 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 514 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
| 448 | struct mlx5_core_qp *qp, | 515 | struct mlx5_core_qp *qp, |
| 449 | struct mlx5_create_qp_mbox_in *in, | 516 | struct mlx5_create_qp_mbox_in *in, |
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index f29e3a27c2cc..0e3ff30647d5 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h | |||
| @@ -601,5 +601,4 @@ struct ib_cm_sidr_rep_param { | |||
| 601 | int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, | 601 | int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, |
| 602 | struct ib_cm_sidr_rep_param *param); | 602 | struct ib_cm_sidr_rep_param *param); |
| 603 | 603 | ||
| 604 | int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac); | ||
| 605 | #endif /* IB_CM_H */ | 604 | #endif /* IB_CM_H */ |
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 9ee0d2e51b16..1ea0b65c4cfb 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
| @@ -46,17 +46,12 @@ struct ib_umem { | |||
| 46 | int page_size; | 46 | int page_size; |
| 47 | int writable; | 47 | int writable; |
| 48 | int hugetlb; | 48 | int hugetlb; |
| 49 | struct list_head chunk_list; | ||
| 50 | struct work_struct work; | 49 | struct work_struct work; |
| 51 | struct mm_struct *mm; | 50 | struct mm_struct *mm; |
| 52 | unsigned long diff; | 51 | unsigned long diff; |
| 53 | }; | 52 | struct sg_table sg_head; |
| 54 | 53 | int nmap; | |
| 55 | struct ib_umem_chunk { | 54 | int npages; |
| 56 | struct list_head list; | ||
| 57 | int nents; | ||
| 58 | int nmap; | ||
| 59 | struct scatterlist page_list[0]; | ||
| 60 | }; | 55 | }; |
| 61 | 56 | ||
| 62 | #ifdef CONFIG_INFINIBAND_USER_MEM | 57 | #ifdef CONFIG_INFINIBAND_USER_MEM |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 57777167dea7..acd825182977 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -122,7 +122,19 @@ enum ib_device_cap_flags { | |||
| 122 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), | 122 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
| 123 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), | 123 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), |
| 124 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), | 124 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), |
| 125 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29) | 125 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), |
| 126 | IB_DEVICE_SIGNATURE_HANDOVER = (1<<30) | ||
| 127 | }; | ||
| 128 | |||
| 129 | enum ib_signature_prot_cap { | ||
| 130 | IB_PROT_T10DIF_TYPE_1 = 1, | ||
| 131 | IB_PROT_T10DIF_TYPE_2 = 1 << 1, | ||
| 132 | IB_PROT_T10DIF_TYPE_3 = 1 << 2, | ||
| 133 | }; | ||
| 134 | |||
| 135 | enum ib_signature_guard_cap { | ||
| 136 | IB_GUARD_T10DIF_CRC = 1, | ||
| 137 | IB_GUARD_T10DIF_CSUM = 1 << 1, | ||
| 126 | }; | 138 | }; |
| 127 | 139 | ||
| 128 | enum ib_atomic_cap { | 140 | enum ib_atomic_cap { |
| @@ -172,6 +184,8 @@ struct ib_device_attr { | |||
| 172 | unsigned int max_fast_reg_page_list_len; | 184 | unsigned int max_fast_reg_page_list_len; |
| 173 | u16 max_pkeys; | 185 | u16 max_pkeys; |
| 174 | u8 local_ca_ack_delay; | 186 | u8 local_ca_ack_delay; |
| 187 | int sig_prot_cap; | ||
| 188 | int sig_guard_cap; | ||
| 175 | }; | 189 | }; |
| 176 | 190 | ||
| 177 | enum ib_mtu { | 191 | enum ib_mtu { |
| @@ -461,6 +475,130 @@ int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; | |||
| 461 | */ | 475 | */ |
| 462 | int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; | 476 | int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; |
| 463 | 477 | ||
| 478 | enum ib_mr_create_flags { | ||
| 479 | IB_MR_SIGNATURE_EN = 1, | ||
| 480 | }; | ||
| 481 | |||
| 482 | /** | ||
| 483 | * ib_mr_init_attr - Memory region init attributes passed to routine | ||
| 484 | * ib_create_mr. | ||
| 485 | * @max_reg_descriptors: max number of registration descriptors that | ||
| 486 | * may be used with registration work requests. | ||
| 487 | * @flags: MR creation flags bit mask. | ||
| 488 | */ | ||
| 489 | struct ib_mr_init_attr { | ||
| 490 | int max_reg_descriptors; | ||
| 491 | u32 flags; | ||
| 492 | }; | ||
| 493 | |||
| 494 | enum ib_signature_type { | ||
| 495 | IB_SIG_TYPE_T10_DIF, | ||
| 496 | }; | ||
| 497 | |||
| 498 | /** | ||
| 499 | * T10-DIF Signature types | ||
| 500 | * T10-DIF types are defined by SCSI | ||
| 501 | * specifications. | ||
| 502 | */ | ||
| 503 | enum ib_t10_dif_type { | ||
| 504 | IB_T10DIF_NONE, | ||
| 505 | IB_T10DIF_TYPE1, | ||
| 506 | IB_T10DIF_TYPE2, | ||
| 507 | IB_T10DIF_TYPE3 | ||
| 508 | }; | ||
| 509 | |||
| 510 | /** | ||
| 511 | * Signature T10-DIF block-guard types | ||
| 512 | * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. | ||
| 513 | * IB_T10DIF_CSUM: Corresponds to IP checksum rules. | ||
| 514 | */ | ||
| 515 | enum ib_t10_dif_bg_type { | ||
| 516 | IB_T10DIF_CRC, | ||
| 517 | IB_T10DIF_CSUM | ||
| 518 | }; | ||
| 519 | |||
| 520 | /** | ||
| 521 | * struct ib_t10_dif_domain - Parameters specific for T10-DIF | ||
| 522 | * domain. | ||
| 523 | * @type: T10-DIF type (0|1|2|3) | ||
| 524 | * @bg_type: T10-DIF block guard type (CRC|CSUM) | ||
| 525 | * @pi_interval: protection information interval. | ||
| 526 | * @bg: seed of guard computation. | ||
| 527 | * @app_tag: application tag of guard block | ||
| 528 | * @ref_tag: initial guard block reference tag. | ||
| 529 | * @type3_inc_reftag: T10-DIF type 3 does not state | ||
| 530 | * about the reference tag, it is the user | ||
| 531 | * choice to increment it or not. | ||
| 532 | */ | ||
| 533 | struct ib_t10_dif_domain { | ||
| 534 | enum ib_t10_dif_type type; | ||
| 535 | enum ib_t10_dif_bg_type bg_type; | ||
| 536 | u16 pi_interval; | ||
| 537 | u16 bg; | ||
| 538 | u16 app_tag; | ||
| 539 | u32 ref_tag; | ||
| 540 | bool type3_inc_reftag; | ||
| 541 | }; | ||
| 542 | |||
| 543 | /** | ||
| 544 | * struct ib_sig_domain - Parameters for signature domain | ||
| 545 | * @sig_type: specific signauture type | ||
| 546 | * @sig: union of all signature domain attributes that may | ||
| 547 | * be used to set domain layout. | ||
| 548 | */ | ||
| 549 | struct ib_sig_domain { | ||
| 550 | enum ib_signature_type sig_type; | ||
| 551 | union { | ||
| 552 | struct ib_t10_dif_domain dif; | ||
| 553 | } sig; | ||
| 554 | }; | ||
| 555 | |||
| 556 | /** | ||
| 557 | * struct ib_sig_attrs - Parameters for signature handover operation | ||
| 558 | * @check_mask: bitmask for signature byte check (8 bytes) | ||
| 559 | * @mem: memory domain layout desciptor. | ||
| 560 | * @wire: wire domain layout desciptor. | ||
| 561 | */ | ||
| 562 | struct ib_sig_attrs { | ||
| 563 | u8 check_mask; | ||
| 564 | struct ib_sig_domain mem; | ||
| 565 | struct ib_sig_domain wire; | ||
| 566 | }; | ||
| 567 | |||
| 568 | enum ib_sig_err_type { | ||
| 569 | IB_SIG_BAD_GUARD, | ||
| 570 | IB_SIG_BAD_REFTAG, | ||
| 571 | IB_SIG_BAD_APPTAG, | ||
| 572 | }; | ||
| 573 | |||
| 574 | /** | ||
| 575 | * struct ib_sig_err - signature error descriptor | ||
| 576 | */ | ||
| 577 | struct ib_sig_err { | ||
| 578 | enum ib_sig_err_type err_type; | ||
| 579 | u32 expected; | ||
| 580 | u32 actual; | ||
| 581 | u64 sig_err_offset; | ||
| 582 | u32 key; | ||
| 583 | }; | ||
| 584 | |||
| 585 | enum ib_mr_status_check { | ||
| 586 | IB_MR_CHECK_SIG_STATUS = 1, | ||
| 587 | }; | ||
| 588 | |||
| 589 | /** | ||
| 590 | * struct ib_mr_status - Memory region status container | ||
| 591 | * | ||
| 592 | * @fail_status: Bitmask of MR checks status. For each | ||
| 593 | * failed check a corresponding status bit is set. | ||
| 594 | * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS | ||
| 595 | * failure. | ||
| 596 | */ | ||
| 597 | struct ib_mr_status { | ||
| 598 | u32 fail_status; | ||
| 599 | struct ib_sig_err sig_err; | ||
| 600 | }; | ||
| 601 | |||
| 464 | /** | 602 | /** |
| 465 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate | 603 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate |
| 466 | * enum. | 604 | * enum. |
| @@ -644,6 +782,7 @@ enum ib_qp_create_flags { | |||
| 644 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, | 782 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, |
| 645 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, | 783 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, |
| 646 | IB_QP_CREATE_NETIF_QP = 1 << 5, | 784 | IB_QP_CREATE_NETIF_QP = 1 << 5, |
| 785 | IB_QP_CREATE_SIGNATURE_EN = 1 << 6, | ||
| 647 | /* reserve bits 26-31 for low level drivers' internal use */ | 786 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 648 | IB_QP_CREATE_RESERVED_START = 1 << 26, | 787 | IB_QP_CREATE_RESERVED_START = 1 << 26, |
| 649 | IB_QP_CREATE_RESERVED_END = 1 << 31, | 788 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
| @@ -808,6 +947,7 @@ enum ib_wr_opcode { | |||
| 808 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, | 947 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, |
| 809 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 948 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
| 810 | IB_WR_BIND_MW, | 949 | IB_WR_BIND_MW, |
| 950 | IB_WR_REG_SIG_MR, | ||
| 811 | /* reserve values for low level drivers' internal use. | 951 | /* reserve values for low level drivers' internal use. |
| 812 | * These values will not be used at all in the ib core layer. | 952 | * These values will not be used at all in the ib core layer. |
| 813 | */ | 953 | */ |
| @@ -913,6 +1053,12 @@ struct ib_send_wr { | |||
| 913 | u32 rkey; | 1053 | u32 rkey; |
| 914 | struct ib_mw_bind_info bind_info; | 1054 | struct ib_mw_bind_info bind_info; |
| 915 | } bind_mw; | 1055 | } bind_mw; |
| 1056 | struct { | ||
| 1057 | struct ib_sig_attrs *sig_attrs; | ||
| 1058 | struct ib_mr *sig_mr; | ||
| 1059 | int access_flags; | ||
| 1060 | struct ib_sge *prot; | ||
| 1061 | } sig_handover; | ||
| 916 | } wr; | 1062 | } wr; |
| 917 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ | 1063 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ |
| 918 | }; | 1064 | }; |
| @@ -1403,6 +1549,9 @@ struct ib_device { | |||
| 1403 | int (*query_mr)(struct ib_mr *mr, | 1549 | int (*query_mr)(struct ib_mr *mr, |
| 1404 | struct ib_mr_attr *mr_attr); | 1550 | struct ib_mr_attr *mr_attr); |
| 1405 | int (*dereg_mr)(struct ib_mr *mr); | 1551 | int (*dereg_mr)(struct ib_mr *mr); |
| 1552 | int (*destroy_mr)(struct ib_mr *mr); | ||
| 1553 | struct ib_mr * (*create_mr)(struct ib_pd *pd, | ||
| 1554 | struct ib_mr_init_attr *mr_init_attr); | ||
| 1406 | struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, | 1555 | struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, |
| 1407 | int max_page_list_len); | 1556 | int max_page_list_len); |
| 1408 | struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, | 1557 | struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, |
| @@ -1451,6 +1600,8 @@ struct ib_device { | |||
| 1451 | *flow_attr, | 1600 | *flow_attr, |
| 1452 | int domain); | 1601 | int domain); |
| 1453 | int (*destroy_flow)(struct ib_flow *flow_id); | 1602 | int (*destroy_flow)(struct ib_flow *flow_id); |
| 1603 | int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, | ||
| 1604 | struct ib_mr_status *mr_status); | ||
| 1454 | 1605 | ||
| 1455 | struct ib_dma_mapping_ops *dma_ops; | 1606 | struct ib_dma_mapping_ops *dma_ops; |
| 1456 | 1607 | ||
| @@ -2248,6 +2399,25 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); | |||
| 2248 | */ | 2399 | */ |
| 2249 | int ib_dereg_mr(struct ib_mr *mr); | 2400 | int ib_dereg_mr(struct ib_mr *mr); |
| 2250 | 2401 | ||
| 2402 | |||
| 2403 | /** | ||
| 2404 | * ib_create_mr - Allocates a memory region that may be used for | ||
| 2405 | * signature handover operations. | ||
| 2406 | * @pd: The protection domain associated with the region. | ||
| 2407 | * @mr_init_attr: memory region init attributes. | ||
| 2408 | */ | ||
| 2409 | struct ib_mr *ib_create_mr(struct ib_pd *pd, | ||
| 2410 | struct ib_mr_init_attr *mr_init_attr); | ||
| 2411 | |||
| 2412 | /** | ||
| 2413 | * ib_destroy_mr - Destroys a memory region that was created using | ||
| 2414 | * ib_create_mr and removes it from HW translation tables. | ||
| 2415 | * @mr: The memory region to destroy. | ||
| 2416 | * | ||
| 2417 | * This function can fail, if the memory region has memory windows bound to it. | ||
| 2418 | */ | ||
| 2419 | int ib_destroy_mr(struct ib_mr *mr); | ||
| 2420 | |||
| 2251 | /** | 2421 | /** |
| 2252 | * ib_alloc_fast_reg_mr - Allocates memory region usable with the | 2422 | * ib_alloc_fast_reg_mr - Allocates memory region usable with the |
| 2253 | * IB_WR_FAST_REG_MR send work request. | 2423 | * IB_WR_FAST_REG_MR send work request. |
| @@ -2433,4 +2603,19 @@ static inline int ib_check_mr_access(int flags) | |||
| 2433 | return 0; | 2603 | return 0; |
| 2434 | } | 2604 | } |
| 2435 | 2605 | ||
| 2606 | /** | ||
| 2607 | * ib_check_mr_status: lightweight check of MR status. | ||
| 2608 | * This routine may provide status checks on a selected | ||
| 2609 | * ib_mr. first use is for signature status check. | ||
| 2610 | * | ||
| 2611 | * @mr: A memory region. | ||
| 2612 | * @check_mask: Bitmask of which checks to perform from | ||
| 2613 | * ib_mr_status_check enumeration. | ||
| 2614 | * @mr_status: The container of relevant status checks. | ||
| 2615 | * failed checks will be indicated in the status bitmask | ||
| 2616 | * and the relevant info shall be in the error item. | ||
| 2617 | */ | ||
| 2618 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, | ||
| 2619 | struct ib_mr_status *mr_status); | ||
| 2620 | |||
| 2436 | #endif /* IB_VERBS_H */ | 2621 | #endif /* IB_VERBS_H */ |
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 309f51336fb9..1457c26dfc58 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h | |||
| @@ -133,6 +133,10 @@ struct iscsi_task { | |||
| 133 | unsigned long last_xfer; | 133 | unsigned long last_xfer; |
| 134 | unsigned long last_timeout; | 134 | unsigned long last_timeout; |
| 135 | bool have_checked_conn; | 135 | bool have_checked_conn; |
| 136 | |||
| 137 | /* T10 protection information */ | ||
| 138 | bool protected; | ||
| 139 | |||
| 136 | /* state set/tested under session->lock */ | 140 | /* state set/tested under session->lock */ |
| 137 | int state; | 141 | int state; |
| 138 | atomic_t refcount; | 142 | atomic_t refcount; |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 88640a47216c..2555ee5343fd 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
| @@ -167,6 +167,7 @@ struct iscsi_transport { | |||
| 167 | struct iscsi_bus_flash_conn *fnode_conn); | 167 | struct iscsi_bus_flash_conn *fnode_conn); |
| 168 | int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess); | 168 | int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess); |
| 169 | int (*get_host_stats) (struct Scsi_Host *shost, char *buf, int len); | 169 | int (*get_host_stats) (struct Scsi_Host *shost, char *buf, int len); |
| 170 | u8 (*check_protection)(struct iscsi_task *task, sector_t *sector); | ||
| 170 | }; | 171 | }; |
| 171 | 172 | ||
| 172 | /* | 173 | /* |
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index b11da5c1331e..cdb05dd1d440 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h | |||
| @@ -41,7 +41,6 @@ enum srp_rport_state { | |||
| 41 | * @mutex: Protects against concurrent rport reconnect / | 41 | * @mutex: Protects against concurrent rport reconnect / |
| 42 | * fast_io_fail / dev_loss_tmo activity. | 42 | * fast_io_fail / dev_loss_tmo activity. |
| 43 | * @state: rport state. | 43 | * @state: rport state. |
| 44 | * @deleted: Whether or not srp_rport_del() has already been invoked. | ||
| 45 | * @reconnect_delay: Reconnect delay in seconds. | 44 | * @reconnect_delay: Reconnect delay in seconds. |
| 46 | * @failed_reconnects: Number of failed reconnect attempts. | 45 | * @failed_reconnects: Number of failed reconnect attempts. |
| 47 | * @reconnect_work: Work structure used for scheduling reconnect attempts. | 46 | * @reconnect_work: Work structure used for scheduling reconnect attempts. |
