diff options
21 files changed, 232 insertions, 56 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f1c279fabe64..7c0f9535fb7d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 423 | struct sockaddr_ib *addr; | 423 | struct sockaddr_ib *addr; |
| 424 | union ib_gid gid, sgid, *dgid; | 424 | union ib_gid gid, sgid, *dgid; |
| 425 | u16 pkey, index; | 425 | u16 pkey, index; |
| 426 | u8 port, p; | 426 | u8 p; |
| 427 | int i; | 427 | int i; |
| 428 | 428 | ||
| 429 | cma_dev = NULL; | 429 | cma_dev = NULL; |
| @@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 443 | if (!memcmp(&gid, dgid, sizeof(gid))) { | 443 | if (!memcmp(&gid, dgid, sizeof(gid))) { |
| 444 | cma_dev = cur_dev; | 444 | cma_dev = cur_dev; |
| 445 | sgid = gid; | 445 | sgid = gid; |
| 446 | port = p; | 446 | id_priv->id.port_num = p; |
| 447 | goto found; | 447 | goto found; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| @@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 451 | dgid->global.subnet_prefix)) { | 451 | dgid->global.subnet_prefix)) { |
| 452 | cma_dev = cur_dev; | 452 | cma_dev = cur_dev; |
| 453 | sgid = gid; | 453 | sgid = gid; |
| 454 | port = p; | 454 | id_priv->id.port_num = p; |
| 455 | } | 455 | } |
| 456 | } | 456 | } |
| 457 | } | 457 | } |
| @@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 462 | 462 | ||
| 463 | found: | 463 | found: |
| 464 | cma_attach_to_dev(id_priv, cma_dev); | 464 | cma_attach_to_dev(id_priv, cma_dev); |
| 465 | id_priv->id.port_num = port; | ||
| 466 | addr = (struct sockaddr_ib *) cma_src_addr(id_priv); | 465 | addr = (struct sockaddr_ib *) cma_src_addr(id_priv); |
| 467 | memcpy(&addr->sib_addr, &sgid, sizeof sgid); | 466 | memcpy(&addr->sib_addr, &sgid, sizeof sgid); |
| 468 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); | 467 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); |
| @@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id | |||
| 880 | { | 879 | { |
| 881 | struct cma_hdr *hdr; | 880 | struct cma_hdr *hdr; |
| 882 | 881 | ||
| 883 | if (listen_id->route.addr.src_addr.ss_family == AF_IB) { | 882 | if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && |
| 883 | (ib_event->event == IB_CM_REQ_RECEIVED)) { | ||
| 884 | cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); | 884 | cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); |
| 885 | return 0; | 885 | return 0; |
| 886 | } | 886 | } |
| @@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
| 2677 | { | 2677 | { |
| 2678 | struct ib_cm_sidr_req_param req; | 2678 | struct ib_cm_sidr_req_param req; |
| 2679 | struct ib_cm_id *id; | 2679 | struct ib_cm_id *id; |
| 2680 | void *private_data; | ||
| 2680 | int offset, ret; | 2681 | int offset, ret; |
| 2681 | 2682 | ||
| 2683 | memset(&req, 0, sizeof req); | ||
| 2682 | offset = cma_user_data_offset(id_priv); | 2684 | offset = cma_user_data_offset(id_priv); |
| 2683 | req.private_data_len = offset + conn_param->private_data_len; | 2685 | req.private_data_len = offset + conn_param->private_data_len; |
| 2684 | if (req.private_data_len < conn_param->private_data_len) | 2686 | if (req.private_data_len < conn_param->private_data_len) |
| 2685 | return -EINVAL; | 2687 | return -EINVAL; |
| 2686 | 2688 | ||
| 2687 | if (req.private_data_len) { | 2689 | if (req.private_data_len) { |
| 2688 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | 2690 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); |
| 2689 | if (!req.private_data) | 2691 | if (!private_data) |
| 2690 | return -ENOMEM; | 2692 | return -ENOMEM; |
| 2691 | } else { | 2693 | } else { |
| 2692 | req.private_data = NULL; | 2694 | private_data = NULL; |
| 2693 | } | 2695 | } |
| 2694 | 2696 | ||
| 2695 | if (conn_param->private_data && conn_param->private_data_len) | 2697 | if (conn_param->private_data && conn_param->private_data_len) |
| 2696 | memcpy((void *) req.private_data + offset, | 2698 | memcpy(private_data + offset, conn_param->private_data, |
| 2697 | conn_param->private_data, conn_param->private_data_len); | 2699 | conn_param->private_data_len); |
| 2698 | 2700 | ||
| 2699 | if (req.private_data) { | 2701 | if (private_data) { |
| 2700 | ret = cma_format_hdr((void *) req.private_data, id_priv); | 2702 | ret = cma_format_hdr(private_data, id_priv); |
| 2701 | if (ret) | 2703 | if (ret) |
| 2702 | goto out; | 2704 | goto out; |
| 2705 | req.private_data = private_data; | ||
| 2703 | } | 2706 | } |
| 2704 | 2707 | ||
| 2705 | id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, | 2708 | id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, |
| @@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
| 2721 | id_priv->cm_id.ib = NULL; | 2724 | id_priv->cm_id.ib = NULL; |
| 2722 | } | 2725 | } |
| 2723 | out: | 2726 | out: |
| 2724 | kfree(req.private_data); | 2727 | kfree(private_data); |
| 2725 | return ret; | 2728 | return ret; |
| 2726 | } | 2729 | } |
| 2727 | 2730 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index dc3fd1e8af07..4c837e66516b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2663 | int ret, i; | 2663 | int ret, i; |
| 2664 | struct ib_qp_attr *attr; | 2664 | struct ib_qp_attr *attr; |
| 2665 | struct ib_qp *qp; | 2665 | struct ib_qp *qp; |
| 2666 | u16 pkey_index; | ||
| 2666 | 2667 | ||
| 2667 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2668 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 2668 | if (!attr) { | 2669 | if (!attr) { |
| @@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2670 | return -ENOMEM; | 2671 | return -ENOMEM; |
| 2671 | } | 2672 | } |
| 2672 | 2673 | ||
| 2674 | ret = ib_find_pkey(port_priv->device, port_priv->port_num, | ||
| 2675 | IB_DEFAULT_PKEY_FULL, &pkey_index); | ||
| 2676 | if (ret) | ||
| 2677 | pkey_index = 0; | ||
| 2678 | |||
| 2673 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2679 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
| 2674 | qp = port_priv->qp_info[i].qp; | 2680 | qp = port_priv->qp_info[i].qp; |
| 2675 | if (!qp) | 2681 | if (!qp) |
| @@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2680 | * one is needed for the Reset to Init transition | 2686 | * one is needed for the Reset to Init transition |
| 2681 | */ | 2687 | */ |
| 2682 | attr->qp_state = IB_QPS_INIT; | 2688 | attr->qp_state = IB_QPS_INIT; |
| 2683 | attr->pkey_index = 0; | 2689 | attr->pkey_index = pkey_index; |
| 2684 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; | 2690 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; |
| 2685 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | 2691 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | |
| 2686 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | 2692 | IB_QP_PKEY_INDEX | IB_QP_QKEY); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index e87f2201b220..d2283837d451 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
| 226 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * | 226 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * |
| 227 | sizeof(struct t3_cqe)); | 227 | sizeof(struct t3_cqe)); |
| 228 | uresp.memsize = mm->len; | 228 | uresp.memsize = mm->len; |
| 229 | uresp.reserved = 0; | ||
| 229 | resplen = sizeof uresp; | 230 | resplen = sizeof uresp; |
| 230 | } | 231 | } |
| 231 | if (ib_copy_to_udata(udata, &uresp, resplen)) { | 232 | if (ib_copy_to_udata(udata, &uresp, resplen)) { |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 232040447e8a..a4975e1654a6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -1657,6 +1657,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1657 | if (mm5) { | 1657 | if (mm5) { |
| 1658 | uresp.ma_sync_key = ucontext->key; | 1658 | uresp.ma_sync_key = ucontext->key; |
| 1659 | ucontext->key += PAGE_SIZE; | 1659 | ucontext->key += PAGE_SIZE; |
| 1660 | } else { | ||
| 1661 | uresp.ma_sync_key = 0; | ||
| 1660 | } | 1662 | } |
| 1661 | uresp.sq_key = ucontext->key; | 1663 | uresp.sq_key = ucontext->key; |
| 1662 | ucontext->key += PAGE_SIZE; | 1664 | ucontext->key += PAGE_SIZE; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 4d599cedbb0b..f2a3f48107e7 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, | |||
| 1511 | 1511 | ||
| 1512 | memset(&attr, 0, sizeof attr); | 1512 | memset(&attr, 0, sizeof attr); |
| 1513 | attr.qp_state = IB_QPS_INIT; | 1513 | attr.qp_state = IB_QPS_INIT; |
| 1514 | attr.pkey_index = | 1514 | ret = 0; |
| 1515 | to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; | 1515 | if (create_tun) |
| 1516 | ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, | ||
| 1517 | ctx->port, IB_DEFAULT_PKEY_FULL, | ||
| 1518 | &attr.pkey_index); | ||
| 1519 | if (ret || !create_tun) | ||
| 1520 | attr.pkey_index = | ||
| 1521 | to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; | ||
| 1516 | attr.qkey = IB_QP1_QKEY; | 1522 | attr.qkey = IB_QP1_QKEY; |
| 1517 | attr.port_num = ctx->port; | 1523 | attr.port_num = ctx->port; |
| 1518 | ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); | 1524 | ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8000fff4d444..3f831de9a4d8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 619 | 619 | ||
| 620 | resp.tot_uuars = req.total_num_uuars; | 620 | resp.tot_uuars = req.total_num_uuars; |
| 621 | resp.num_ports = dev->mdev.caps.num_ports; | 621 | resp.num_ports = dev->mdev.caps.num_ports; |
| 622 | err = ib_copy_to_udata(udata, &resp, sizeof(resp)); | 622 | err = ib_copy_to_udata(udata, &resp, |
| 623 | sizeof(resp) - sizeof(resp.reserved)); | ||
| 623 | if (err) | 624 | if (err) |
| 624 | goto out_uars; | 625 | goto out_uars; |
| 625 | 626 | ||
| @@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev, | |||
| 1426 | if (err) | 1427 | if (err) |
| 1427 | goto err_eqs; | 1428 | goto err_eqs; |
| 1428 | 1429 | ||
| 1429 | if (ib_register_device(&dev->ib_dev, NULL)) | 1430 | err = ib_register_device(&dev->ib_dev, NULL); |
| 1431 | if (err) | ||
| 1430 | goto err_rsrc; | 1432 | goto err_rsrc; |
| 1431 | 1433 | ||
| 1432 | err = create_umr_res(dev); | 1434 | err = create_umr_res(dev); |
| @@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev, | |||
| 1434 | goto err_dev; | 1436 | goto err_dev; |
| 1435 | 1437 | ||
| 1436 | for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { | 1438 | for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { |
| 1437 | if (device_create_file(&dev->ib_dev.dev, | 1439 | err = device_create_file(&dev->ib_dev.dev, |
| 1438 | mlx5_class_attributes[i])) | 1440 | mlx5_class_attributes[i]); |
| 1441 | if (err) | ||
| 1439 | goto err_umrc; | 1442 | goto err_umrc; |
| 1440 | } | 1443 | } |
| 1441 | 1444 | ||
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 16ac54c9819f..045f8cdbd303 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 199 | 199 | ||
| 200 | static int sq_overhead(enum ib_qp_type qp_type) | 200 | static int sq_overhead(enum ib_qp_type qp_type) |
| 201 | { | 201 | { |
| 202 | int size; | 202 | int size = 0; |
| 203 | 203 | ||
| 204 | switch (qp_type) { | 204 | switch (qp_type) { |
| 205 | case IB_QPT_XRC_INI: | 205 | case IB_QPT_XRC_INI: |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 418004c93feb..90200245c5eb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
| @@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
| 3570 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; | 3570 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; |
| 3571 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; | 3571 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; |
| 3572 | nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," | 3572 | nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," |
| 3573 | " Tcp state = %d, iWARP state = %d\n", | 3573 | " Tcp state = %s, iWARP state = %s\n", |
| 3574 | async_event_id, | 3574 | async_event_id, |
| 3575 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, | 3575 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, |
| 3576 | tcp_state, iwarp_state); | 3576 | nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); |
| 3577 | 3577 | ||
| 3578 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); | 3578 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); |
| 3579 | if (aeq_info & NES_AEQE_QP) { | 3579 | if (aeq_info & NES_AEQE_QP) { |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 8f67fe2e91e6..5b53ca5a2284 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
| 1384 | 1384 | ||
| 1385 | if (ibpd->uobject) { | 1385 | if (ibpd->uobject) { |
| 1386 | uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; | 1386 | uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; |
| 1387 | uresp.mmap_rq_db_index = 0; | ||
| 1387 | uresp.actual_sq_size = sq_size; | 1388 | uresp.actual_sq_size = sq_size; |
| 1388 | uresp.actual_rq_size = rq_size; | 1389 | uresp.actual_rq_size = rq_size; |
| 1389 | uresp.qp_id = nesqp->hwqp.qp_id; | 1390 | uresp.qp_id = nesqp->hwqp.qp_id; |
| @@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
| 1767 | resp.cq_id = nescq->hw_cq.cq_number; | 1768 | resp.cq_id = nescq->hw_cq.cq_number; |
| 1768 | resp.cq_size = nescq->hw_cq.cq_size; | 1769 | resp.cq_size = nescq->hw_cq.cq_size; |
| 1769 | resp.mmap_db_index = 0; | 1770 | resp.mmap_db_index = 0; |
| 1770 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | 1771 | if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) { |
| 1771 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1772 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
| 1772 | kfree(nescq); | 1773 | kfree(nescq); |
| 1773 | return ERR_PTR(-EFAULT); | 1774 | return ERR_PTR(-EFAULT); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index a877a8ed7907..f4c587c68f64 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <net/netevent.h> | 29 | #include <net/netevent.h> |
| 30 | 30 | ||
| 31 | #include <rdma/ib_addr.h> | 31 | #include <rdma/ib_addr.h> |
| 32 | #include <rdma/ib_cache.h> | ||
| 33 | 32 | ||
| 34 | #include "ocrdma.h" | 33 | #include "ocrdma.h" |
| 35 | #include "ocrdma_verbs.h" | 34 | #include "ocrdma_verbs.h" |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index dcfbab177faa..f36630e4b6be 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |||
| 242 | memset(ctx->ah_tbl.va, 0, map_len); | 242 | memset(ctx->ah_tbl.va, 0, map_len); |
| 243 | ctx->ah_tbl.len = map_len; | 243 | ctx->ah_tbl.len = map_len; |
| 244 | 244 | ||
| 245 | memset(&resp, 0, sizeof(resp)); | ||
| 245 | resp.ah_tbl_len = ctx->ah_tbl.len; | 246 | resp.ah_tbl_len = ctx->ah_tbl.len; |
| 246 | resp.ah_tbl_page = ctx->ah_tbl.pa; | 247 | resp.ah_tbl_page = ctx->ah_tbl.pa; |
| 247 | 248 | ||
| @@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |||
| 253 | resp.wqe_size = dev->attr.wqe_size; | 254 | resp.wqe_size = dev->attr.wqe_size; |
| 254 | resp.rqe_size = dev->attr.rqe_size; | 255 | resp.rqe_size = dev->attr.rqe_size; |
| 255 | resp.dpp_wqe_size = dev->attr.wqe_size; | 256 | resp.dpp_wqe_size = dev->attr.wqe_size; |
| 256 | resp.rsvd = 0; | ||
| 257 | 257 | ||
| 258 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | 258 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); |
| 259 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | 259 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
| @@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, | |||
| 338 | struct ocrdma_alloc_pd_uresp rsp; | 338 | struct ocrdma_alloc_pd_uresp rsp; |
| 339 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | 339 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
| 340 | 340 | ||
| 341 | memset(&rsp, 0, sizeof(rsp)); | ||
| 341 | rsp.id = pd->id; | 342 | rsp.id = pd->id; |
| 342 | rsp.dpp_enabled = pd->dpp_enabled; | 343 | rsp.dpp_enabled = pd->dpp_enabled; |
| 343 | db_page_addr = pd->dev->nic_info.unmapped_db + | 344 | db_page_addr = pd->dev->nic_info.unmapped_db + |
| @@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | |||
| 692 | struct ocrdma_ucontext *uctx; | 693 | struct ocrdma_ucontext *uctx; |
| 693 | struct ocrdma_create_cq_uresp uresp; | 694 | struct ocrdma_create_cq_uresp uresp; |
| 694 | 695 | ||
| 696 | memset(&uresp, 0, sizeof(uresp)); | ||
| 695 | uresp.cq_id = cq->id; | 697 | uresp.cq_id = cq->id; |
| 696 | uresp.page_size = cq->len; | 698 | uresp.page_size = cq->len; |
| 697 | uresp.num_pages = 1; | 699 | uresp.num_pages = 1; |
| @@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | |||
| 1460 | int status; | 1462 | int status; |
| 1461 | struct ocrdma_create_srq_uresp uresp; | 1463 | struct ocrdma_create_srq_uresp uresp; |
| 1462 | 1464 | ||
| 1465 | memset(&uresp, 0, sizeof(uresp)); | ||
| 1463 | uresp.rq_dbid = srq->rq.dbid; | 1466 | uresp.rq_dbid = srq->rq.dbid; |
| 1464 | uresp.num_rq_pages = 1; | 1467 | uresp.num_rq_pages = 1; |
| 1465 | uresp.rq_page_addr[0] = srq->rq.pa; | 1468 | uresp.rq_page_addr[0] = srq->rq.pa; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 21e8b09d4bf8..016e7429adf6 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) | |||
| 1596 | struct qib_devdata *dd = ppd->dd; | 1596 | struct qib_devdata *dd = ppd->dd; |
| 1597 | 1597 | ||
| 1598 | errs &= QIB_E_P_SDMAERRS; | 1598 | errs &= QIB_E_P_SDMAERRS; |
| 1599 | err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), | ||
| 1600 | errs, qib_7322p_error_msgs); | ||
| 1599 | 1601 | ||
| 1600 | if (errs & QIB_E_P_SDMAUNEXPDATA) | 1602 | if (errs & QIB_E_P_SDMAUNEXPDATA) |
| 1601 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, | 1603 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 32162d355370..9b5322d8cd5a 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
| @@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd) | |||
| 717 | struct qib_sdma_txreq *txp, *txpnext; | 717 | struct qib_sdma_txreq *txp, *txpnext; |
| 718 | __le64 *descqp; | 718 | __le64 *descqp; |
| 719 | u64 desc[2]; | 719 | u64 desc[2]; |
| 720 | dma_addr_t addr; | 720 | u64 addr; |
| 721 | u16 gen, dwlen, dwoffset; | 721 | u16 gen, dwlen, dwoffset; |
| 722 | u16 head, tail, cnt; | 722 | u16 head, tail, cnt; |
| 723 | 723 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2cfa76f5d99e..196b1d13cbcb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
| 932 | return 0; | 932 | return 0; |
| 933 | } | 933 | } |
| 934 | 934 | ||
| 935 | /* | ||
| 936 | * Takes whatever value which is in pkey index 0 and updates priv->pkey | ||
| 937 | * returns 0 if the pkey value was changed. | ||
| 938 | */ | ||
| 939 | static inline int update_parent_pkey(struct ipoib_dev_priv *priv) | ||
| 940 | { | ||
| 941 | int result; | ||
| 942 | u16 prev_pkey; | ||
| 943 | |||
| 944 | prev_pkey = priv->pkey; | ||
| 945 | result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); | ||
| 946 | if (result) { | ||
| 947 | ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", | ||
| 948 | priv->port, result); | ||
| 949 | return result; | ||
| 950 | } | ||
| 951 | |||
| 952 | priv->pkey |= 0x8000; | ||
| 953 | |||
| 954 | if (prev_pkey != priv->pkey) { | ||
| 955 | ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", | ||
| 956 | prev_pkey, priv->pkey); | ||
| 957 | /* | ||
| 958 | * Update the pkey in the broadcast address, while making sure to set | ||
| 959 | * the full membership bit, so that we join the right broadcast group. | ||
| 960 | */ | ||
| 961 | priv->dev->broadcast[8] = priv->pkey >> 8; | ||
| 962 | priv->dev->broadcast[9] = priv->pkey & 0xff; | ||
| 963 | return 0; | ||
| 964 | } | ||
| 965 | |||
| 966 | return 1; | ||
| 967 | } | ||
| 968 | |||
| 935 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | 969 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, |
| 936 | enum ipoib_flush_level level) | 970 | enum ipoib_flush_level level) |
| 937 | { | 971 | { |
| 938 | struct ipoib_dev_priv *cpriv; | 972 | struct ipoib_dev_priv *cpriv; |
| 939 | struct net_device *dev = priv->dev; | 973 | struct net_device *dev = priv->dev; |
| 940 | u16 new_index; | 974 | u16 new_index; |
| 975 | int result; | ||
| 941 | 976 | ||
| 942 | mutex_lock(&priv->vlan_mutex); | 977 | mutex_lock(&priv->vlan_mutex); |
| 943 | 978 | ||
| @@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 951 | mutex_unlock(&priv->vlan_mutex); | 986 | mutex_unlock(&priv->vlan_mutex); |
| 952 | 987 | ||
| 953 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { | 988 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { |
| 989 | /* for non-child devices must check/update the pkey value here */ | ||
| 990 | if (level == IPOIB_FLUSH_HEAVY && | ||
| 991 | !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) | ||
| 992 | update_parent_pkey(priv); | ||
| 954 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 993 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
| 955 | return; | 994 | return; |
| 956 | } | 995 | } |
| @@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 961 | } | 1000 | } |
| 962 | 1001 | ||
| 963 | if (level == IPOIB_FLUSH_HEAVY) { | 1002 | if (level == IPOIB_FLUSH_HEAVY) { |
| 964 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { | 1003 | /* child devices chase their origin pkey value, while non-child |
| 965 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 1004 | * (parent) devices should always takes what present in pkey index 0 |
| 966 | ipoib_ib_dev_down(dev, 0); | 1005 | */ |
| 967 | ipoib_ib_dev_stop(dev, 0); | 1006 | if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 968 | if (ipoib_pkey_dev_delay_open(dev)) | 1007 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { |
| 1008 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
| 1009 | ipoib_ib_dev_down(dev, 0); | ||
| 1010 | ipoib_ib_dev_stop(dev, 0); | ||
| 1011 | if (ipoib_pkey_dev_delay_open(dev)) | ||
| 1012 | return; | ||
| 1013 | } | ||
| 1014 | /* restart QP only if P_Key index is changed */ | ||
| 1015 | if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 1016 | new_index == priv->pkey_index) { | ||
| 1017 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); | ||
| 969 | return; | 1018 | return; |
| 1019 | } | ||
| 1020 | priv->pkey_index = new_index; | ||
| 1021 | } else { | ||
| 1022 | result = update_parent_pkey(priv); | ||
| 1023 | /* restart QP only if P_Key value changed */ | ||
| 1024 | if (result) { | ||
| 1025 | ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); | ||
| 1026 | return; | ||
| 1027 | } | ||
| 970 | } | 1028 | } |
| 971 | |||
| 972 | /* restart QP only if P_Key index is changed */ | ||
| 973 | if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 974 | new_index == priv->pkey_index) { | ||
| 975 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); | ||
| 976 | return; | ||
| 977 | } | ||
| 978 | priv->pkey_index = new_index; | ||
| 979 | } | 1029 | } |
| 980 | 1030 | ||
| 981 | if (level == IPOIB_FLUSH_LIGHT) { | 1031 | if (level == IPOIB_FLUSH_LIGHT) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b6e049a3c7a8..c6f71a88c55c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev, | |||
| 1461 | if (sscanf(buf, "%i", &pkey) != 1) | 1461 | if (sscanf(buf, "%i", &pkey) != 1) |
| 1462 | return -EINVAL; | 1462 | return -EINVAL; |
| 1463 | 1463 | ||
| 1464 | if (pkey < 0 || pkey > 0xffff) | 1464 | if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) |
| 1465 | return -EINVAL; | 1465 | return -EINVAL; |
| 1466 | 1466 | ||
| 1467 | /* | 1467 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 74685936c948..f81abe16cf09 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c | |||
| @@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, | |||
| 119 | } else | 119 | } else |
| 120 | child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); | 120 | child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); |
| 121 | 121 | ||
| 122 | if (child_pkey == 0 || child_pkey == 0x8000) | ||
| 123 | return -EINVAL; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * Set the full membership bit, so that we join the right | ||
| 127 | * broadcast group, etc. | ||
| 128 | */ | ||
| 129 | child_pkey |= 0x8000; | ||
| 130 | |||
| 122 | err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); | 131 | err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); |
| 123 | 132 | ||
| 124 | if (!err && data) | 133 | if (!err && data) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 40374063c01e..c571de85d0f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
| 47 | 47 | ||
| 48 | enum { | 48 | enum { |
| 49 | CMD_IF_REV = 3, | 49 | CMD_IF_REV = 4, |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | enum { | 52 | enum { |
| @@ -282,6 +282,12 @@ const char *mlx5_command_str(int command) | |||
| 282 | case MLX5_CMD_OP_TEARDOWN_HCA: | 282 | case MLX5_CMD_OP_TEARDOWN_HCA: |
| 283 | return "TEARDOWN_HCA"; | 283 | return "TEARDOWN_HCA"; |
| 284 | 284 | ||
| 285 | case MLX5_CMD_OP_ENABLE_HCA: | ||
| 286 | return "MLX5_CMD_OP_ENABLE_HCA"; | ||
| 287 | |||
| 288 | case MLX5_CMD_OP_DISABLE_HCA: | ||
| 289 | return "MLX5_CMD_OP_DISABLE_HCA"; | ||
| 290 | |||
| 285 | case MLX5_CMD_OP_QUERY_PAGES: | 291 | case MLX5_CMD_OP_QUERY_PAGES: |
| 286 | return "QUERY_PAGES"; | 292 | return "QUERY_PAGES"; |
| 287 | 293 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 12242de2b0e3..b47739b0b5f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) | |||
| 249 | return err; | 249 | return err; |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) | ||
| 253 | { | ||
| 254 | int err; | ||
| 255 | struct mlx5_enable_hca_mbox_in in; | ||
| 256 | struct mlx5_enable_hca_mbox_out out; | ||
| 257 | |||
| 258 | memset(&in, 0, sizeof(in)); | ||
| 259 | memset(&out, 0, sizeof(out)); | ||
| 260 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); | ||
| 261 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 262 | if (err) | ||
| 263 | return err; | ||
| 264 | |||
| 265 | if (out.hdr.status) | ||
| 266 | return mlx5_cmd_status_to_err(&out.hdr); | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) | ||
| 272 | { | ||
| 273 | int err; | ||
| 274 | struct mlx5_disable_hca_mbox_in in; | ||
| 275 | struct mlx5_disable_hca_mbox_out out; | ||
| 276 | |||
| 277 | memset(&in, 0, sizeof(in)); | ||
| 278 | memset(&out, 0, sizeof(out)); | ||
| 279 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); | ||
| 280 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 281 | if (err) | ||
| 282 | return err; | ||
| 283 | |||
| 284 | if (out.hdr.status) | ||
| 285 | return mlx5_cmd_status_to_err(&out.hdr); | ||
| 286 | |||
| 287 | return 0; | ||
| 288 | } | ||
| 289 | |||
| 252 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | 290 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) |
| 253 | { | 291 | { |
| 254 | struct mlx5_priv *priv = &dev->priv; | 292 | struct mlx5_priv *priv = &dev->priv; |
| @@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | |||
| 304 | } | 342 | } |
| 305 | 343 | ||
| 306 | mlx5_pagealloc_init(dev); | 344 | mlx5_pagealloc_init(dev); |
| 345 | |||
| 346 | err = mlx5_core_enable_hca(dev); | ||
| 347 | if (err) { | ||
| 348 | dev_err(&pdev->dev, "enable hca failed\n"); | ||
| 349 | goto err_pagealloc_cleanup; | ||
| 350 | } | ||
| 351 | |||
| 352 | err = mlx5_satisfy_startup_pages(dev, 1); | ||
| 353 | if (err) { | ||
| 354 | dev_err(&pdev->dev, "failed to allocate boot pages\n"); | ||
| 355 | goto err_disable_hca; | ||
| 356 | } | ||
| 357 | |||
| 307 | err = set_hca_ctrl(dev); | 358 | err = set_hca_ctrl(dev); |
| 308 | if (err) { | 359 | if (err) { |
| 309 | dev_err(&pdev->dev, "set_hca_ctrl failed\n"); | 360 | dev_err(&pdev->dev, "set_hca_ctrl failed\n"); |
| 310 | goto err_pagealloc_cleanup; | 361 | goto reclaim_boot_pages; |
| 311 | } | 362 | } |
| 312 | 363 | ||
| 313 | err = handle_hca_cap(dev); | 364 | err = handle_hca_cap(dev); |
| 314 | if (err) { | 365 | if (err) { |
| 315 | dev_err(&pdev->dev, "handle_hca_cap failed\n"); | 366 | dev_err(&pdev->dev, "handle_hca_cap failed\n"); |
| 316 | goto err_pagealloc_cleanup; | 367 | goto reclaim_boot_pages; |
| 317 | } | 368 | } |
| 318 | 369 | ||
| 319 | err = mlx5_satisfy_startup_pages(dev); | 370 | err = mlx5_satisfy_startup_pages(dev, 0); |
| 320 | if (err) { | 371 | if (err) { |
| 321 | dev_err(&pdev->dev, "failed to allocate startup pages\n"); | 372 | dev_err(&pdev->dev, "failed to allocate init pages\n"); |
| 322 | goto err_pagealloc_cleanup; | 373 | goto reclaim_boot_pages; |
| 323 | } | 374 | } |
| 324 | 375 | ||
| 325 | err = mlx5_pagealloc_start(dev); | 376 | err = mlx5_pagealloc_start(dev); |
| 326 | if (err) { | 377 | if (err) { |
| 327 | dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); | 378 | dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); |
| 328 | goto err_reclaim_pages; | 379 | goto reclaim_boot_pages; |
| 329 | } | 380 | } |
| 330 | 381 | ||
| 331 | err = mlx5_cmd_init_hca(dev); | 382 | err = mlx5_cmd_init_hca(dev); |
| @@ -396,9 +447,12 @@ err_stop_poll: | |||
| 396 | err_pagealloc_stop: | 447 | err_pagealloc_stop: |
| 397 | mlx5_pagealloc_stop(dev); | 448 | mlx5_pagealloc_stop(dev); |
| 398 | 449 | ||
| 399 | err_reclaim_pages: | 450 | reclaim_boot_pages: |
| 400 | mlx5_reclaim_startup_pages(dev); | 451 | mlx5_reclaim_startup_pages(dev); |
| 401 | 452 | ||
| 453 | err_disable_hca: | ||
| 454 | mlx5_core_disable_hca(dev); | ||
| 455 | |||
| 402 | err_pagealloc_cleanup: | 456 | err_pagealloc_cleanup: |
| 403 | mlx5_pagealloc_cleanup(dev); | 457 | mlx5_pagealloc_cleanup(dev); |
| 404 | mlx5_cmd_cleanup(dev); | 458 | mlx5_cmd_cleanup(dev); |
| @@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev) | |||
| 434 | mlx5_cmd_teardown_hca(dev); | 488 | mlx5_cmd_teardown_hca(dev); |
| 435 | mlx5_pagealloc_stop(dev); | 489 | mlx5_pagealloc_stop(dev); |
| 436 | mlx5_reclaim_startup_pages(dev); | 490 | mlx5_reclaim_startup_pages(dev); |
| 491 | mlx5_core_disable_hca(dev); | ||
| 437 | mlx5_pagealloc_cleanup(dev); | 492 | mlx5_pagealloc_cleanup(dev); |
| 438 | mlx5_cmd_cleanup(dev); | 493 | mlx5_cmd_cleanup(dev); |
| 439 | iounmap(dev->iseg); | 494 | iounmap(dev->iseg); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index f0bf46339b28..4a3e137931a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
| @@ -64,7 +64,7 @@ struct mlx5_query_pages_inbox { | |||
| 64 | 64 | ||
| 65 | struct mlx5_query_pages_outbox { | 65 | struct mlx5_query_pages_outbox { |
| 66 | struct mlx5_outbox_hdr hdr; | 66 | struct mlx5_outbox_hdr hdr; |
| 67 | u8 reserved[2]; | 67 | __be16 num_boot_pages; |
| 68 | __be16 func_id; | 68 | __be16 func_id; |
| 69 | __be16 init_pages; | 69 | __be16 init_pages; |
| 70 | __be16 num_pages; | 70 | __be16 num_pages; |
| @@ -146,7 +146,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
| 149 | s16 *pages, s16 *init_pages) | 149 | s16 *pages, s16 *init_pages, u16 *boot_pages) |
| 150 | { | 150 | { |
| 151 | struct mlx5_query_pages_inbox in; | 151 | struct mlx5_query_pages_inbox in; |
| 152 | struct mlx5_query_pages_outbox out; | 152 | struct mlx5_query_pages_outbox out; |
| @@ -164,8 +164,13 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
| 164 | 164 | ||
| 165 | if (pages) | 165 | if (pages) |
| 166 | *pages = be16_to_cpu(out.num_pages); | 166 | *pages = be16_to_cpu(out.num_pages); |
| 167 | |||
| 167 | if (init_pages) | 168 | if (init_pages) |
| 168 | *init_pages = be16_to_cpu(out.init_pages); | 169 | *init_pages = be16_to_cpu(out.init_pages); |
| 170 | |||
| 171 | if (boot_pages) | ||
| 172 | *boot_pages = be16_to_cpu(out.num_boot_pages); | ||
| 173 | |||
| 169 | *func_id = be16_to_cpu(out.func_id); | 174 | *func_id = be16_to_cpu(out.func_id); |
| 170 | 175 | ||
| 171 | return err; | 176 | return err; |
| @@ -357,19 +362,22 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
| 357 | queue_work(dev->priv.pg_wq, &req->work); | 362 | queue_work(dev->priv.pg_wq, &req->work); |
| 358 | } | 363 | } |
| 359 | 364 | ||
| 360 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) | 365 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
| 361 | { | 366 | { |
| 367 | u16 uninitialized_var(boot_pages); | ||
| 362 | s16 uninitialized_var(init_pages); | 368 | s16 uninitialized_var(init_pages); |
| 363 | u16 uninitialized_var(func_id); | 369 | u16 uninitialized_var(func_id); |
| 364 | int err; | 370 | int err; |
| 365 | 371 | ||
| 366 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); | 372 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, |
| 373 | &boot_pages); | ||
| 367 | if (err) | 374 | if (err) |
| 368 | return err; | 375 | return err; |
| 369 | 376 | ||
| 370 | mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); | ||
| 371 | 377 | ||
| 372 | return give_pages(dev, func_id, init_pages, 0); | 378 | mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", |
| 379 | init_pages, boot_pages, func_id); | ||
| 380 | return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); | ||
| 373 | } | 381 | } |
| 374 | 382 | ||
| 375 | static int optimal_reclaimed_pages(void) | 383 | static int optimal_reclaimed_pages(void) |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8de8d8f22384..737685e9e852 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -690,6 +690,26 @@ struct mlx5_query_cq_mbox_out { | |||
| 690 | __be64 pas[0]; | 690 | __be64 pas[0]; |
| 691 | }; | 691 | }; |
| 692 | 692 | ||
| 693 | struct mlx5_enable_hca_mbox_in { | ||
| 694 | struct mlx5_inbox_hdr hdr; | ||
| 695 | u8 rsvd[8]; | ||
| 696 | }; | ||
| 697 | |||
| 698 | struct mlx5_enable_hca_mbox_out { | ||
| 699 | struct mlx5_outbox_hdr hdr; | ||
| 700 | u8 rsvd[8]; | ||
| 701 | }; | ||
| 702 | |||
| 703 | struct mlx5_disable_hca_mbox_in { | ||
| 704 | struct mlx5_inbox_hdr hdr; | ||
| 705 | u8 rsvd[8]; | ||
| 706 | }; | ||
| 707 | |||
| 708 | struct mlx5_disable_hca_mbox_out { | ||
| 709 | struct mlx5_outbox_hdr hdr; | ||
| 710 | u8 rsvd[8]; | ||
| 711 | }; | ||
| 712 | |||
| 693 | struct mlx5_eq_context { | 713 | struct mlx5_eq_context { |
| 694 | u8 status; | 714 | u8 status; |
| 695 | u8 ec_oi; | 715 | u8 ec_oi; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f22e4419839b..2aa258b0ced1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -101,6 +101,8 @@ enum { | |||
| 101 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, | 101 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, |
| 102 | MLX5_CMD_OP_INIT_HCA = 0x102, | 102 | MLX5_CMD_OP_INIT_HCA = 0x102, |
| 103 | MLX5_CMD_OP_TEARDOWN_HCA = 0x103, | 103 | MLX5_CMD_OP_TEARDOWN_HCA = 0x103, |
| 104 | MLX5_CMD_OP_ENABLE_HCA = 0x104, | ||
| 105 | MLX5_CMD_OP_DISABLE_HCA = 0x105, | ||
| 104 | MLX5_CMD_OP_QUERY_PAGES = 0x107, | 106 | MLX5_CMD_OP_QUERY_PAGES = 0x107, |
| 105 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, | 107 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, |
| 106 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, | 108 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, |
| @@ -690,7 +692,7 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev); | |||
| 690 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); | 692 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
| 691 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 693 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
| 692 | s16 npages); | 694 | s16 npages); |
| 693 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); | 695 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
| 694 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); | 696 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
| 695 | void mlx5_register_debugfs(void); | 697 | void mlx5_register_debugfs(void); |
| 696 | void mlx5_unregister_debugfs(void); | 698 | void mlx5_unregister_debugfs(void); |
