diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 18:29:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 18:29:48 -0400 |
commit | e0fd9affeb64088eff407dfc98bbd3a5c17ea479 (patch) | |
tree | 94ee7e3410fffb305aa6901053b85245686444a2 | |
parent | 3d15b798eafd3b6b3cc25f20747008ab9401a57f (diff) | |
parent | ea9627c800e99a902e2668ac8e6377f02d6f720a (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes from Roland Dreier:
- XRC transport fixes
- Fix DHCP on IPoIB
- mlx4 preparations for flow steering
- iSER fixes
- miscellaneous other fixes
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (23 commits)
IB/iser: Add support for iser CM REQ additional info
IB/iser: Return error to upper layers on EAGAIN registration failures
IB/iser: Move informational messages from error to info level
IB/iser: Add module version
mlx4_core: Expose a few helpers to fill DMFS HW strucutures
mlx4_core: Directly expose fields of DMFS HW rule control segment
mlx4_core: Change a few DMFS fields names to match firmare spec
mlx4: Match DMFS promiscuous field names to firmware spec
mlx4_core: Move DMFS HW structs to common header file
IB/mlx4: Set link type for RAW PACKET QPs in the QP context
IB/mlx4: Disable VLAN stripping for RAW PACKET QPs
mlx4_core: Reduce warning message for SRQ_LIMIT event to debug level
RDMA/iwcm: Don't touch cmid after dropping reference
IB/qib: Correct qib_verbs_register_sysfs() error handling
IB/ipath: Correct ipath_verbs_register_sysfs() error handling
RDMA/cxgb4: Fix SQ allocation when on-chip SQ is disabled
SRPT: Fix odd use of WARN_ON()
IPoIB: Fix ipoib_hard_header() return value
RDMA: Rename random32() to prandom_u32()
RDMA/cxgb3: Fix uninitialized variable
...
23 files changed, 322 insertions, 195 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 0bb99bb38809..c47c2034ca71 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work) | |||
878 | } | 878 | } |
879 | return; | 879 | return; |
880 | } | 880 | } |
881 | if (empty) | ||
882 | return; | ||
881 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 883 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
882 | } | 884 | } |
883 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 885 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a8fdd3381405..22192deb8828 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) | |||
348 | struct ib_qp *qp = context; | 348 | struct ib_qp *qp = context; |
349 | 349 | ||
350 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) | 350 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
351 | event->element.qp->event_handler(event, event->element.qp->qp_context); | 351 | if (event->element.qp->event_handler) |
352 | event->element.qp->event_handler(event, event->element.qp->qp_context); | ||
352 | } | 353 | } |
353 | 354 | ||
354 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | 355 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9c12da0cbd32..e87f2201b220 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, | |||
559 | __be64 *page_list = NULL; | 559 | __be64 *page_list = NULL; |
560 | int shift = 0; | 560 | int shift = 0; |
561 | u64 total_size; | 561 | u64 total_size; |
562 | int npages; | 562 | int npages = 0; |
563 | int ret; | 563 | int ret; |
564 | 564 | ||
565 | PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); | 565 | PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 5b059e2d80cc..232040447e8a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -111,6 +111,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) | ||
115 | { | ||
116 | int ret = -ENOSYS; | ||
117 | if (user) | ||
118 | ret = alloc_oc_sq(rdev, sq); | ||
119 | if (ret) | ||
120 | ret = alloc_host_sq(rdev, sq); | ||
121 | return ret; | ||
122 | } | ||
123 | |||
114 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | 124 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
115 | struct c4iw_dev_ucontext *uctx) | 125 | struct c4iw_dev_ucontext *uctx) |
116 | { | 126 | { |
@@ -179,15 +189,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
179 | goto free_sw_rq; | 189 | goto free_sw_rq; |
180 | } | 190 | } |
181 | 191 | ||
182 | if (user) { | 192 | ret = alloc_sq(rdev, &wq->sq, user); |
183 | if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) | 193 | if (ret) |
184 | goto free_hwaddr; | 194 | goto free_hwaddr; |
185 | } else { | ||
186 | ret = alloc_host_sq(rdev, &wq->sq); | ||
187 | if (ret) | ||
188 | goto free_hwaddr; | ||
189 | } | ||
190 | |||
191 | memset(wq->sq.queue, 0, wq->sq.memsize); | 195 | memset(wq->sq.queue, 0, wq->sq.memsize); |
192 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); | 196 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
193 | 197 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index ea93870266eb..44ea9390417c 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd) | |||
2187 | if (ret) | 2187 | if (ret) |
2188 | goto err_reg; | 2188 | goto err_reg; |
2189 | 2189 | ||
2190 | if (ipath_verbs_register_sysfs(dev)) | 2190 | ret = ipath_verbs_register_sysfs(dev); |
2191 | if (ret) | ||
2191 | goto err_class; | 2192 | goto err_class; |
2192 | 2193 | ||
2193 | enable_timer(dd); | 2194 | enable_timer(dd); |
@@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) | |||
2327 | int i; | 2328 | int i; |
2328 | int ret; | 2329 | int ret; |
2329 | 2330 | ||
2330 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) | 2331 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) { |
2331 | if (device_create_file(&dev->dev, | 2332 | ret = device_create_file(&dev->dev, |
2332 | ipath_class_attributes[i])) { | 2333 | ipath_class_attributes[i]); |
2333 | ret = 1; | 2334 | if (ret) |
2334 | goto bail; | 2335 | goto bail; |
2335 | } | 2336 | } |
2336 | 2337 | return 0; | |
2337 | ret = 0; | ||
2338 | |||
2339 | bail: | 2338 | bail: |
2339 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) | ||
2340 | device_remove_file(&dev->dev, ipath_class_attributes[i]); | ||
2340 | return ret; | 2341 | return ret; |
2341 | } | 2342 | } |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 73b3a7132587..d5e60f44ba5a 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include <linux/mlx4/cq.h> | 34 | #include <linux/mlx4/cq.h> |
35 | #include <linux/mlx4/qp.h> | 35 | #include <linux/mlx4/qp.h> |
36 | #include <linux/mlx4/srq.h> | ||
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
37 | 38 | ||
38 | #include "mlx4_ib.h" | 39 | #include "mlx4_ib.h" |
@@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | |||
585 | struct mlx4_qp *mqp; | 586 | struct mlx4_qp *mqp; |
586 | struct mlx4_ib_wq *wq; | 587 | struct mlx4_ib_wq *wq; |
587 | struct mlx4_ib_srq *srq; | 588 | struct mlx4_ib_srq *srq; |
589 | struct mlx4_srq *msrq = NULL; | ||
588 | int is_send; | 590 | int is_send; |
589 | int is_error; | 591 | int is_error; |
590 | u32 g_mlpath_rqpn; | 592 | u32 g_mlpath_rqpn; |
@@ -653,6 +655,20 @@ repoll: | |||
653 | 655 | ||
654 | wc->qp = &(*cur_qp)->ibqp; | 656 | wc->qp = &(*cur_qp)->ibqp; |
655 | 657 | ||
658 | if (wc->qp->qp_type == IB_QPT_XRC_TGT) { | ||
659 | u32 srq_num; | ||
660 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); | ||
661 | srq_num = g_mlpath_rqpn & 0xffffff; | ||
662 | /* SRQ is also in the radix tree */ | ||
663 | msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, | ||
664 | srq_num); | ||
665 | if (unlikely(!msrq)) { | ||
666 | pr_warn("CQ %06x with entry for unknown SRQN %06x\n", | ||
667 | cq->mcq.cqn, srq_num); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
656 | if (is_send) { | 672 | if (is_send) { |
657 | wq = &(*cur_qp)->sq; | 673 | wq = &(*cur_qp)->sq; |
658 | if (!(*cur_qp)->sq_signal_bits) { | 674 | if (!(*cur_qp)->sq_signal_bits) { |
@@ -666,6 +682,11 @@ repoll: | |||
666 | wqe_ctr = be16_to_cpu(cqe->wqe_index); | 682 | wqe_ctr = be16_to_cpu(cqe->wqe_index); |
667 | wc->wr_id = srq->wrid[wqe_ctr]; | 683 | wc->wr_id = srq->wrid[wqe_ctr]; |
668 | mlx4_ib_free_srq_wqe(srq, wqe_ctr); | 684 | mlx4_ib_free_srq_wqe(srq, wqe_ctr); |
685 | } else if (msrq) { | ||
686 | srq = to_mibsrq(msrq); | ||
687 | wqe_ctr = be16_to_cpu(cqe->wqe_index); | ||
688 | wc->wr_id = srq->wrid[wqe_ctr]; | ||
689 | mlx4_ib_free_srq_wqe(srq, wqe_ctr); | ||
669 | } else { | 690 | } else { |
670 | wq = &(*cur_qp)->rq; | 691 | wq = &(*cur_qp)->rq; |
671 | tail = wq->tail & (wq->wqe_cnt - 1); | 692 | tail = wq->tail & (wq->wqe_cnt - 1); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 35cced2a4da8..4f10af2905b5 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1292 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { | 1292 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
1293 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; | 1293 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; |
1294 | context->xrcd = cpu_to_be32((u32) qp->xrcdn); | 1294 | context->xrcd = cpu_to_be32((u32) qp->xrcdn); |
1295 | if (ibqp->qp_type == IB_QPT_RAW_PACKET) | ||
1296 | context->param3 |= cpu_to_be32(1 << 30); | ||
1295 | } | 1297 | } |
1296 | 1298 | ||
1297 | if (qp->ibqp.uobject) | 1299 | if (qp->ibqp.uobject) |
@@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1458 | } | 1460 | } |
1459 | } | 1461 | } |
1460 | 1462 | ||
1463 | if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) | ||
1464 | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | | ||
1465 | MLX4_IB_LINK_TYPE_ETH; | ||
1466 | |||
1461 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && | 1467 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && |
1462 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) | 1468 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) |
1463 | sqd_event = 1; | 1469 | sqd_event = 1; |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 034cc821de5c..3c8e4e3caca6 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd) | |||
808 | for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { | 808 | for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { |
809 | ret = device_create_file(&dev->dev, qib_attributes[i]); | 809 | ret = device_create_file(&dev->dev, qib_attributes[i]); |
810 | if (ret) | 810 | if (ret) |
811 | return ret; | 811 | goto bail; |
812 | } | 812 | } |
813 | 813 | ||
814 | return 0; | 814 | return 0; |
815 | bail: | ||
816 | for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) | ||
817 | device_remove_file(&dev->dev, qib_attributes[i]); | ||
818 | return ret; | ||
815 | } | 819 | } |
816 | 820 | ||
817 | /* | 821 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7c0ab16a2fe2..904c384aa361 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
2234 | if (ret) | 2234 | if (ret) |
2235 | goto err_agents; | 2235 | goto err_agents; |
2236 | 2236 | ||
2237 | if (qib_verbs_register_sysfs(dd)) | 2237 | ret = qib_verbs_register_sysfs(dd); |
2238 | if (ret) | ||
2238 | goto err_class; | 2239 | goto err_class; |
2239 | 2240 | ||
2240 | goto bail; | 2241 | goto bail; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 554b9063da54..b6e049a3c7a8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -830,7 +830,7 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
830 | */ | 830 | */ |
831 | memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); | 831 | memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); |
832 | 832 | ||
833 | return 0; | 833 | return sizeof *header; |
834 | } | 834 | } |
835 | 835 | ||
836 | static void ipoib_set_mcast_list(struct net_device *dev) | 836 | static void ipoib_set_mcast_list(struct net_device *dev) |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 0ab8c9cc3a78..f19b0998a53c 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); | |||
82 | 82 | ||
83 | int iser_debug_level = 0; | 83 | int iser_debug_level = 0; |
84 | 84 | ||
85 | MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover " | 85 | MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); |
86 | "v" DRV_VER " (" DRV_DATE ")"); | ||
87 | MODULE_LICENSE("Dual BSD/GPL"); | 86 | MODULE_LICENSE("Dual BSD/GPL"); |
88 | MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); | 87 | MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); |
88 | MODULE_VERSION(DRV_VER); | ||
89 | 89 | ||
90 | module_param_named(debug_level, iser_debug_level, int, 0644); | 90 | module_param_named(debug_level, iser_debug_level, int, 0644); |
91 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); | 91 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); |
@@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
370 | /* binds the iSER connection retrieved from the previously | 370 | /* binds the iSER connection retrieved from the previously |
371 | * connected ep_handle to the iSCSI layer connection. exchanges | 371 | * connected ep_handle to the iSCSI layer connection. exchanges |
372 | * connection pointers */ | 372 | * connection pointers */ |
373 | iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", | 373 | iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n", |
374 | conn, conn->dd_data, ib_conn); | 374 | conn, conn->dd_data, ib_conn); |
375 | iser_conn = conn->dd_data; | 375 | iser_conn = conn->dd_data; |
376 | ib_conn->iser_conn = iser_conn; | 376 | ib_conn->iser_conn = iser_conn; |
377 | iser_conn->ib_conn = ib_conn; | 377 | iser_conn->ib_conn = ib_conn; |
@@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, | |||
475 | case ISCSI_PARAM_HDRDGST_EN: | 475 | case ISCSI_PARAM_HDRDGST_EN: |
476 | sscanf(buf, "%d", &value); | 476 | sscanf(buf, "%d", &value); |
477 | if (value) { | 477 | if (value) { |
478 | printk(KERN_ERR "DataDigest wasn't negotiated to None"); | 478 | iser_err("DataDigest wasn't negotiated to None"); |
479 | return -EPROTO; | 479 | return -EPROTO; |
480 | } | 480 | } |
481 | break; | 481 | break; |
482 | case ISCSI_PARAM_DATADGST_EN: | 482 | case ISCSI_PARAM_DATADGST_EN: |
483 | sscanf(buf, "%d", &value); | 483 | sscanf(buf, "%d", &value); |
484 | if (value) { | 484 | if (value) { |
485 | printk(KERN_ERR "DataDigest wasn't negotiated to None"); | 485 | iser_err("DataDigest wasn't negotiated to None"); |
486 | return -EPROTO; | 486 | return -EPROTO; |
487 | } | 487 | } |
488 | break; | 488 | break; |
489 | case ISCSI_PARAM_IFMARKER_EN: | 489 | case ISCSI_PARAM_IFMARKER_EN: |
490 | sscanf(buf, "%d", &value); | 490 | sscanf(buf, "%d", &value); |
491 | if (value) { | 491 | if (value) { |
492 | printk(KERN_ERR "IFMarker wasn't negotiated to No"); | 492 | iser_err("IFMarker wasn't negotiated to No"); |
493 | return -EPROTO; | 493 | return -EPROTO; |
494 | } | 494 | } |
495 | break; | 495 | break; |
496 | case ISCSI_PARAM_OFMARKER_EN: | 496 | case ISCSI_PARAM_OFMARKER_EN: |
497 | sscanf(buf, "%d", &value); | 497 | sscanf(buf, "%d", &value); |
498 | if (value) { | 498 | if (value) { |
499 | printk(KERN_ERR "OFMarker wasn't negotiated to No"); | 499 | iser_err("OFMarker wasn't negotiated to No"); |
500 | return -EPROTO; | 500 | return -EPROTO; |
501 | } | 501 | } |
502 | break; | 502 | break; |
@@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
596 | ib_conn->state == ISER_CONN_DOWN)) | 596 | ib_conn->state == ISER_CONN_DOWN)) |
597 | rc = -1; | 597 | rc = -1; |
598 | 598 | ||
599 | iser_err("ib conn %p rc = %d\n", ib_conn, rc); | 599 | iser_info("ib conn %p rc = %d\n", ib_conn, rc); |
600 | 600 | ||
601 | if (rc > 0) | 601 | if (rc > 0) |
602 | return 1; /* success, this is the equivalent of POLLOUT */ | 602 | return 1; /* success, this is the equivalent of POLLOUT */ |
@@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
623 | iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); | 623 | iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); |
624 | 624 | ||
625 | 625 | ||
626 | iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); | 626 | iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); |
627 | iser_conn_terminate(ib_conn); | 627 | iser_conn_terminate(ib_conn); |
628 | } | 628 | } |
629 | 629 | ||
@@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param) | |||
682 | 682 | ||
683 | static struct scsi_host_template iscsi_iser_sht = { | 683 | static struct scsi_host_template iscsi_iser_sht = { |
684 | .module = THIS_MODULE, | 684 | .module = THIS_MODULE, |
685 | .name = "iSCSI Initiator over iSER, v." DRV_VER, | 685 | .name = "iSCSI Initiator over iSER", |
686 | .queuecommand = iscsi_queuecommand, | 686 | .queuecommand = iscsi_queuecommand, |
687 | .change_queue_depth = iscsi_change_queue_depth, | 687 | .change_queue_depth = iscsi_change_queue_depth, |
688 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, | 688 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, |
@@ -740,7 +740,7 @@ static int __init iser_init(void) | |||
740 | iser_dbg("Starting iSER datamover...\n"); | 740 | iser_dbg("Starting iSER datamover...\n"); |
741 | 741 | ||
742 | if (iscsi_max_lun < 1) { | 742 | if (iscsi_max_lun < 1) { |
743 | printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); | 743 | iser_err("Invalid max_lun value of %u\n", iscsi_max_lun); |
744 | return -EINVAL; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5babdb35bda7..06f578cde75b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #include <linux/types.h> | 43 | #include <linux/types.h> |
44 | #include <linux/net.h> | 44 | #include <linux/net.h> |
45 | #include <linux/printk.h> | ||
45 | #include <scsi/libiscsi.h> | 46 | #include <scsi/libiscsi.h> |
46 | #include <scsi/scsi_transport_iscsi.h> | 47 | #include <scsi/scsi_transport_iscsi.h> |
47 | 48 | ||
@@ -65,20 +66,26 @@ | |||
65 | 66 | ||
66 | #define DRV_NAME "iser" | 67 | #define DRV_NAME "iser" |
67 | #define PFX DRV_NAME ": " | 68 | #define PFX DRV_NAME ": " |
68 | #define DRV_VER "0.1" | 69 | #define DRV_VER "1.1" |
69 | #define DRV_DATE "May 7th, 2006" | ||
70 | 70 | ||
71 | #define iser_dbg(fmt, arg...) \ | 71 | #define iser_dbg(fmt, arg...) \ |
72 | do { \ | 72 | do { \ |
73 | if (iser_debug_level > 1) \ | 73 | if (iser_debug_level > 2) \ |
74 | printk(KERN_DEBUG PFX "%s:" fmt,\ | 74 | printk(KERN_DEBUG PFX "%s:" fmt,\ |
75 | __func__ , ## arg); \ | 75 | __func__ , ## arg); \ |
76 | } while (0) | 76 | } while (0) |
77 | 77 | ||
78 | #define iser_warn(fmt, arg...) \ | 78 | #define iser_warn(fmt, arg...) \ |
79 | do { \ | 79 | do { \ |
80 | if (iser_debug_level > 1) \ | ||
81 | pr_warn(PFX "%s:" fmt, \ | ||
82 | __func__ , ## arg); \ | ||
83 | } while (0) | ||
84 | |||
85 | #define iser_info(fmt, arg...) \ | ||
86 | do { \ | ||
80 | if (iser_debug_level > 0) \ | 87 | if (iser_debug_level > 0) \ |
81 | printk(KERN_DEBUG PFX "%s:" fmt,\ | 88 | pr_info(PFX "%s:" fmt, \ |
82 | __func__ , ## arg); \ | 89 | __func__ , ## arg); \ |
83 | } while (0) | 90 | } while (0) |
84 | 91 | ||
@@ -133,6 +140,15 @@ struct iser_hdr { | |||
133 | __be64 read_va; | 140 | __be64 read_va; |
134 | } __attribute__((packed)); | 141 | } __attribute__((packed)); |
135 | 142 | ||
143 | |||
144 | #define ISER_ZBVA_NOT_SUPPORTED 0x80 | ||
145 | #define ISER_SEND_W_INV_NOT_SUPPORTED 0x40 | ||
146 | |||
147 | struct iser_cm_hdr { | ||
148 | u8 flags; | ||
149 | u8 rsvd[3]; | ||
150 | } __packed; | ||
151 | |||
136 | /* Constant PDU lengths calculations */ | 152 | /* Constant PDU lengths calculations */ |
137 | #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) | 153 | #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) |
138 | 154 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index be1edb04b085..68ebb7fe072a 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, | |||
416 | for (i=0 ; i<ib_conn->page_vec->length ; i++) | 416 | for (i=0 ; i<ib_conn->page_vec->length ; i++) |
417 | iser_err("page_vec[%d] = 0x%llx\n", i, | 417 | iser_err("page_vec[%d] = 0x%llx\n", i, |
418 | (unsigned long long) ib_conn->page_vec->pages[i]); | 418 | (unsigned long long) ib_conn->page_vec->pages[i]); |
419 | return err; | ||
420 | } | 419 | } |
420 | if (err) | ||
421 | return err; | ||
421 | } | 422 | } |
422 | return 0; | 423 | return 0; |
423 | } | 424 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 4debadc53106..5278916c3103 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
74 | struct iser_cq_desc *cq_desc; | 74 | struct iser_cq_desc *cq_desc; |
75 | 75 | ||
76 | device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); | 76 | device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); |
77 | iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, | 77 | iser_info("using %d CQs, device %s supports %d vectors\n", |
78 | device->ib_device->name, device->ib_device->num_comp_vectors); | 78 | device->cqs_used, device->ib_device->name, |
79 | device->ib_device->num_comp_vectors); | ||
79 | 80 | ||
80 | device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, | 81 | device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, |
81 | GFP_KERNEL); | 82 | GFP_KERNEL); |
@@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
262 | min_index = index; | 263 | min_index = index; |
263 | device->cq_active_qps[min_index]++; | 264 | device->cq_active_qps[min_index]++; |
264 | mutex_unlock(&ig.connlist_mutex); | 265 | mutex_unlock(&ig.connlist_mutex); |
265 | iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); | 266 | iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); |
266 | 267 | ||
267 | init_attr.event_handler = iser_qp_event_callback; | 268 | init_attr.event_handler = iser_qp_event_callback; |
268 | init_attr.qp_context = (void *)ib_conn; | 269 | init_attr.qp_context = (void *)ib_conn; |
@@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
280 | goto out_err; | 281 | goto out_err; |
281 | 282 | ||
282 | ib_conn->qp = ib_conn->cma_id->qp; | 283 | ib_conn->qp = ib_conn->cma_id->qp; |
283 | iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", | 284 | iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n", |
284 | ib_conn, ib_conn->cma_id, | 285 | ib_conn, ib_conn->cma_id, |
285 | ib_conn->fmr_pool, ib_conn->cma_id->qp); | 286 | ib_conn->fmr_pool, ib_conn->cma_id->qp); |
286 | return ret; | 287 | return ret; |
287 | 288 | ||
288 | out_err: | 289 | out_err: |
@@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) | |||
299 | int cq_index; | 300 | int cq_index; |
300 | BUG_ON(ib_conn == NULL); | 301 | BUG_ON(ib_conn == NULL); |
301 | 302 | ||
302 | iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", | 303 | iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n", |
303 | ib_conn, ib_conn->cma_id, | 304 | ib_conn, ib_conn->cma_id, |
304 | ib_conn->fmr_pool, ib_conn->qp); | 305 | ib_conn->fmr_pool, ib_conn->qp); |
305 | 306 | ||
306 | /* qp is created only once both addr & route are resolved */ | 307 | /* qp is created only once both addr & route are resolved */ |
307 | if (ib_conn->fmr_pool != NULL) | 308 | if (ib_conn->fmr_pool != NULL) |
@@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device) | |||
379 | { | 380 | { |
380 | mutex_lock(&ig.device_list_mutex); | 381 | mutex_lock(&ig.device_list_mutex); |
381 | device->refcount--; | 382 | device->refcount--; |
382 | iser_err("device %p refcount %d\n",device,device->refcount); | 383 | iser_info("device %p refcount %d\n", device, device->refcount); |
383 | if (!device->refcount) { | 384 | if (!device->refcount) { |
384 | iser_free_device_ib_res(device); | 385 | iser_free_device_ib_res(device); |
385 | list_del(&device->ig_list); | 386 | list_del(&device->ig_list); |
@@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) | |||
498 | { | 499 | { |
499 | struct rdma_conn_param conn_param; | 500 | struct rdma_conn_param conn_param; |
500 | int ret; | 501 | int ret; |
502 | struct iser_cm_hdr req_hdr; | ||
501 | 503 | ||
502 | ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); | 504 | ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); |
503 | if (ret) | 505 | if (ret) |
@@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) | |||
509 | conn_param.retry_count = 7; | 511 | conn_param.retry_count = 7; |
510 | conn_param.rnr_retry_count = 6; | 512 | conn_param.rnr_retry_count = 6; |
511 | 513 | ||
514 | memset(&req_hdr, 0, sizeof(req_hdr)); | ||
515 | req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED | | ||
516 | ISER_SEND_W_INV_NOT_SUPPORTED); | ||
517 | conn_param.private_data = (void *)&req_hdr; | ||
518 | conn_param.private_data_len = sizeof(struct iser_cm_hdr); | ||
519 | |||
512 | ret = rdma_connect(cma_id, &conn_param); | 520 | ret = rdma_connect(cma_id, &conn_param); |
513 | if (ret) { | 521 | if (ret) { |
514 | iser_err("failure connecting: %d\n", ret); | 522 | iser_err("failure connecting: %d\n", ret); |
@@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
558 | { | 566 | { |
559 | int ret = 0; | 567 | int ret = 0; |
560 | 568 | ||
561 | iser_err("event %d status %d conn %p id %p\n", | 569 | iser_info("event %d status %d conn %p id %p\n", |
562 | event->event, event->status, cma_id->context, cma_id); | 570 | event->event, event->status, cma_id->context, cma_id); |
563 | 571 | ||
564 | switch (event->event) { | 572 | switch (event->event) { |
565 | case RDMA_CM_EVENT_ADDR_RESOLVED: | 573 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
@@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn, | |||
619 | /* the device is known only --after-- address resolution */ | 627 | /* the device is known only --after-- address resolution */ |
620 | ib_conn->device = NULL; | 628 | ib_conn->device = NULL; |
621 | 629 | ||
622 | iser_err("connecting to: %pI4, port 0x%x\n", | 630 | iser_info("connecting to: %pI4, port 0x%x\n", |
623 | &dst_addr->sin_addr, dst_addr->sin_port); | 631 | &dst_addr->sin_addr, dst_addr->sin_port); |
624 | 632 | ||
625 | ib_conn->state = ISER_CONN_PENDING; | 633 | ib_conn->state = ISER_CONN_PENDING; |
626 | 634 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index c09d41b1a2ff..b08ca7a9f76b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1374 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); | 1374 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); |
1375 | break; | 1375 | break; |
1376 | default: | 1376 | default: |
1377 | WARN_ON("ERROR: unexpected command state"); | 1377 | WARN(1, "Unexpected command state (%d)", state); |
1378 | break; | 1378 | break; |
1379 | } | 1379 | } |
1380 | 1380 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bcf4d118e98c..c9e6b62dd000 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, | |||
889 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | 889 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, |
890 | .exclusive = 0, | 890 | .exclusive = 0, |
891 | .allow_loopback = 1, | 891 | .allow_loopback = 1, |
892 | .promisc_mode = MLX4_FS_PROMISC_NONE, | 892 | .promisc_mode = MLX4_FS_REGULAR, |
893 | }; | 893 | }; |
894 | 894 | ||
895 | rule.port = priv->port; | 895 | rule.port = priv->port; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index a69a908614e6..b35f94700093 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work) | |||
127 | .queue_mode = MLX4_NET_TRANS_Q_LIFO, | 127 | .queue_mode = MLX4_NET_TRANS_Q_LIFO, |
128 | .exclusive = 1, | 128 | .exclusive = 1, |
129 | .allow_loopback = 1, | 129 | .allow_loopback = 1, |
130 | .promisc_mode = MLX4_FS_PROMISC_NONE, | 130 | .promisc_mode = MLX4_FS_REGULAR, |
131 | .port = priv->port, | 131 | .port = priv->port, |
132 | .priority = MLX4_DOMAIN_RFS, | 132 | .priority = MLX4_DOMAIN_RFS, |
133 | }; | 133 | }; |
@@ -448,7 +448,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, | |||
448 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | 448 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, |
449 | .exclusive = 0, | 449 | .exclusive = 0, |
450 | .allow_loopback = 1, | 450 | .allow_loopback = 1, |
451 | .promisc_mode = MLX4_FS_PROMISC_NONE, | 451 | .promisc_mode = MLX4_FS_REGULAR, |
452 | .priority = MLX4_DOMAIN_NIC, | 452 | .priority = MLX4_DOMAIN_NIC, |
453 | }; | 453 | }; |
454 | 454 | ||
@@ -795,7 +795,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, | |||
795 | err = mlx4_flow_steer_promisc_add(mdev->dev, | 795 | err = mlx4_flow_steer_promisc_add(mdev->dev, |
796 | priv->port, | 796 | priv->port, |
797 | priv->base_qpn, | 797 | priv->base_qpn, |
798 | MLX4_FS_PROMISC_UPLINK); | 798 | MLX4_FS_ALL_DEFAULT); |
799 | if (err) | 799 | if (err) |
800 | en_err(priv, "Failed enabling promiscuous mode\n"); | 800 | en_err(priv, "Failed enabling promiscuous mode\n"); |
801 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | 801 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; |
@@ -858,7 +858,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, | |||
858 | case MLX4_STEERING_MODE_DEVICE_MANAGED: | 858 | case MLX4_STEERING_MODE_DEVICE_MANAGED: |
859 | err = mlx4_flow_steer_promisc_remove(mdev->dev, | 859 | err = mlx4_flow_steer_promisc_remove(mdev->dev, |
860 | priv->port, | 860 | priv->port, |
861 | MLX4_FS_PROMISC_UPLINK); | 861 | MLX4_FS_ALL_DEFAULT); |
862 | if (err) | 862 | if (err) |
863 | en_err(priv, "Failed disabling promiscuous mode\n"); | 863 | en_err(priv, "Failed disabling promiscuous mode\n"); |
864 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | 864 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; |
@@ -919,7 +919,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, | |||
919 | err = mlx4_flow_steer_promisc_add(mdev->dev, | 919 | err = mlx4_flow_steer_promisc_add(mdev->dev, |
920 | priv->port, | 920 | priv->port, |
921 | priv->base_qpn, | 921 | priv->base_qpn, |
922 | MLX4_FS_PROMISC_ALL_MULTI); | 922 | MLX4_FS_MC_DEFAULT); |
923 | break; | 923 | break; |
924 | 924 | ||
925 | case MLX4_STEERING_MODE_B0: | 925 | case MLX4_STEERING_MODE_B0: |
@@ -942,7 +942,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, | |||
942 | case MLX4_STEERING_MODE_DEVICE_MANAGED: | 942 | case MLX4_STEERING_MODE_DEVICE_MANAGED: |
943 | err = mlx4_flow_steer_promisc_remove(mdev->dev, | 943 | err = mlx4_flow_steer_promisc_remove(mdev->dev, |
944 | priv->port, | 944 | priv->port, |
945 | MLX4_FS_PROMISC_ALL_MULTI); | 945 | MLX4_FS_MC_DEFAULT); |
946 | break; | 946 | break; |
947 | 947 | ||
948 | case MLX4_STEERING_MODE_B0: | 948 | case MLX4_STEERING_MODE_B0: |
@@ -1621,10 +1621,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1621 | MLX4_EN_FLAG_MC_PROMISC); | 1621 | MLX4_EN_FLAG_MC_PROMISC); |
1622 | mlx4_flow_steer_promisc_remove(mdev->dev, | 1622 | mlx4_flow_steer_promisc_remove(mdev->dev, |
1623 | priv->port, | 1623 | priv->port, |
1624 | MLX4_FS_PROMISC_UPLINK); | 1624 | MLX4_FS_ALL_DEFAULT); |
1625 | mlx4_flow_steer_promisc_remove(mdev->dev, | 1625 | mlx4_flow_steer_promisc_remove(mdev->dev, |
1626 | priv->port, | 1626 | priv->port, |
1627 | MLX4_FS_PROMISC_ALL_MULTI); | 1627 | MLX4_FS_MC_DEFAULT); |
1628 | } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { | 1628 | } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { |
1629 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | 1629 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; |
1630 | 1630 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e3123a1df88..6000342f9725 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
497 | break; | 497 | break; |
498 | 498 | ||
499 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | 499 | case MLX4_EVENT_TYPE_SRQ_LIMIT: |
500 | mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", | 500 | mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", |
501 | __func__); | 501 | __func__); |
502 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | 502 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: |
503 | if (mlx4_is_master(dev)) { | 503 | if (mlx4_is_master(dev)) { |
504 | /* forward only to slave owning the SRQ */ | 504 | /* forward only to slave owning the SRQ */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ffc78d2cb0cf..f3e804f2a35f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port, | |||
645 | return err; | 645 | return err; |
646 | } | 646 | } |
647 | 647 | ||
648 | static const u8 __promisc_mode[] = { | ||
649 | [MLX4_FS_REGULAR] = 0x0, | ||
650 | [MLX4_FS_ALL_DEFAULT] = 0x1, | ||
651 | [MLX4_FS_MC_DEFAULT] = 0x3, | ||
652 | [MLX4_FS_UC_SNIFFER] = 0x4, | ||
653 | [MLX4_FS_MC_SNIFFER] = 0x5, | ||
654 | }; | ||
655 | |||
656 | int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, | ||
657 | enum mlx4_net_trans_promisc_mode flow_type) | ||
658 | { | ||
659 | if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { | ||
660 | mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); | ||
661 | return -EINVAL; | ||
662 | } | ||
663 | return __promisc_mode[flow_type]; | ||
664 | } | ||
665 | EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); | ||
666 | |||
648 | static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, | 667 | static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, |
649 | struct mlx4_net_trans_rule_hw_ctrl *hw) | 668 | struct mlx4_net_trans_rule_hw_ctrl *hw) |
650 | { | 669 | { |
651 | static const u8 __promisc_mode[] = { | 670 | u8 flags = 0; |
652 | [MLX4_FS_PROMISC_NONE] = 0x0, | 671 | |
653 | [MLX4_FS_PROMISC_UPLINK] = 0x1, | 672 | flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; |
654 | [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, | 673 | flags |= ctrl->exclusive ? (1 << 2) : 0; |
655 | [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, | 674 | flags |= ctrl->allow_loopback ? (1 << 3) : 0; |
656 | }; | 675 | |
657 | 676 | hw->flags = flags; | |
658 | u32 dw = 0; | 677 | hw->type = __promisc_mode[ctrl->promisc_mode]; |
659 | 678 | hw->prio = cpu_to_be16(ctrl->priority); | |
660 | dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; | ||
661 | dw |= ctrl->exclusive ? (1 << 2) : 0; | ||
662 | dw |= ctrl->allow_loopback ? (1 << 3) : 0; | ||
663 | dw |= __promisc_mode[ctrl->promisc_mode] << 8; | ||
664 | dw |= ctrl->priority << 16; | ||
665 | |||
666 | hw->ctrl = cpu_to_be32(dw); | ||
667 | hw->port = ctrl->port; | 679 | hw->port = ctrl->port; |
668 | hw->qpn = cpu_to_be32(ctrl->qpn); | 680 | hw->qpn = cpu_to_be32(ctrl->qpn); |
669 | } | 681 | } |
@@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = { | |||
677 | [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 | 689 | [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 |
678 | }; | 690 | }; |
679 | 691 | ||
692 | int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, | ||
693 | enum mlx4_net_trans_rule_id id) | ||
694 | { | ||
695 | if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { | ||
696 | mlx4_err(dev, "Invalid network rule id. id = %d\n", id); | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | return __sw_id_hw[id]; | ||
700 | } | ||
701 | EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); | ||
702 | |||
703 | static const int __rule_hw_sz[] = { | ||
704 | [MLX4_NET_TRANS_RULE_ID_ETH] = | ||
705 | sizeof(struct mlx4_net_trans_rule_hw_eth), | ||
706 | [MLX4_NET_TRANS_RULE_ID_IB] = | ||
707 | sizeof(struct mlx4_net_trans_rule_hw_ib), | ||
708 | [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, | ||
709 | [MLX4_NET_TRANS_RULE_ID_IPV4] = | ||
710 | sizeof(struct mlx4_net_trans_rule_hw_ipv4), | ||
711 | [MLX4_NET_TRANS_RULE_ID_TCP] = | ||
712 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), | ||
713 | [MLX4_NET_TRANS_RULE_ID_UDP] = | ||
714 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) | ||
715 | }; | ||
716 | |||
717 | int mlx4_hw_rule_sz(struct mlx4_dev *dev, | ||
718 | enum mlx4_net_trans_rule_id id) | ||
719 | { | ||
720 | if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { | ||
721 | mlx4_err(dev, "Invalid network rule id. id = %d\n", id); | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | |||
725 | return __rule_hw_sz[id]; | ||
726 | } | ||
727 | EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); | ||
728 | |||
680 | static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, | 729 | static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, |
681 | struct _rule_hw *rule_hw) | 730 | struct _rule_hw *rule_hw) |
682 | { | 731 | { |
683 | static const size_t __rule_hw_sz[] = { | 732 | if (mlx4_hw_rule_sz(dev, spec->id) < 0) |
684 | [MLX4_NET_TRANS_RULE_ID_ETH] = | ||
685 | sizeof(struct mlx4_net_trans_rule_hw_eth), | ||
686 | [MLX4_NET_TRANS_RULE_ID_IB] = | ||
687 | sizeof(struct mlx4_net_trans_rule_hw_ib), | ||
688 | [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, | ||
689 | [MLX4_NET_TRANS_RULE_ID_IPV4] = | ||
690 | sizeof(struct mlx4_net_trans_rule_hw_ipv4), | ||
691 | [MLX4_NET_TRANS_RULE_ID_TCP] = | ||
692 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), | ||
693 | [MLX4_NET_TRANS_RULE_ID_UDP] = | ||
694 | sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) | ||
695 | }; | ||
696 | if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { | ||
697 | mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); | ||
698 | return -EINVAL; | 733 | return -EINVAL; |
699 | } | 734 | memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); |
700 | memset(rule_hw, 0, __rule_hw_sz[spec->id]); | ||
701 | rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); | 735 | rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); |
702 | rule_hw->size = __rule_hw_sz[spec->id] >> 2; | 736 | rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; |
703 | 737 | ||
704 | switch (spec->id) { | 738 | switch (spec->id) { |
705 | case MLX4_NET_TRANS_RULE_ID_ETH: | 739 | case MLX4_NET_TRANS_RULE_ID_ETH: |
@@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, | |||
713 | rule_hw->eth.ether_type_enable = 1; | 747 | rule_hw->eth.ether_type_enable = 1; |
714 | rule_hw->eth.ether_type = spec->eth.ether_type; | 748 | rule_hw->eth.ether_type = spec->eth.ether_type; |
715 | } | 749 | } |
716 | rule_hw->eth.vlan_id = spec->eth.vlan_id; | 750 | rule_hw->eth.vlan_tag = spec->eth.vlan_id; |
717 | rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; | 751 | rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; |
718 | break; | 752 | break; |
719 | 753 | ||
720 | case MLX4_NET_TRANS_RULE_ID_IB: | 754 | case MLX4_NET_TRANS_RULE_ID_IB: |
721 | rule_hw->ib.qpn = spec->ib.r_qpn; | 755 | rule_hw->ib.l3_qpn = spec->ib.l3_qpn; |
722 | rule_hw->ib.qpn_mask = spec->ib.qpn_msk; | 756 | rule_hw->ib.qpn_mask = spec->ib.qpn_msk; |
723 | memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); | 757 | memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); |
724 | memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); | 758 | memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); |
@@ -1136,7 +1170,7 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
1136 | struct mlx4_net_trans_rule rule = { | 1170 | struct mlx4_net_trans_rule rule = { |
1137 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | 1171 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, |
1138 | .exclusive = 0, | 1172 | .exclusive = 0, |
1139 | .promisc_mode = MLX4_FS_PROMISC_NONE, | 1173 | .promisc_mode = MLX4_FS_REGULAR, |
1140 | .priority = MLX4_DOMAIN_NIC, | 1174 | .priority = MLX4_DOMAIN_NIC, |
1141 | }; | 1175 | }; |
1142 | 1176 | ||
@@ -1229,11 +1263,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, | |||
1229 | u64 *regid_p; | 1263 | u64 *regid_p; |
1230 | 1264 | ||
1231 | switch (mode) { | 1265 | switch (mode) { |
1232 | case MLX4_FS_PROMISC_UPLINK: | 1266 | case MLX4_FS_ALL_DEFAULT: |
1233 | case MLX4_FS_PROMISC_FUNCTION_PORT: | ||
1234 | regid_p = &dev->regid_promisc_array[port]; | 1267 | regid_p = &dev->regid_promisc_array[port]; |
1235 | break; | 1268 | break; |
1236 | case MLX4_FS_PROMISC_ALL_MULTI: | 1269 | case MLX4_FS_MC_DEFAULT: |
1237 | regid_p = &dev->regid_allmulti_array[port]; | 1270 | regid_p = &dev->regid_allmulti_array[port]; |
1238 | break; | 1271 | break; |
1239 | default: | 1272 | default: |
@@ -1260,11 +1293,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, | |||
1260 | u64 *regid_p; | 1293 | u64 *regid_p; |
1261 | 1294 | ||
1262 | switch (mode) { | 1295 | switch (mode) { |
1263 | case MLX4_FS_PROMISC_UPLINK: | 1296 | case MLX4_FS_ALL_DEFAULT: |
1264 | case MLX4_FS_PROMISC_FUNCTION_PORT: | ||
1265 | regid_p = &dev->regid_promisc_array[port]; | 1297 | regid_p = &dev->regid_promisc_array[port]; |
1266 | break; | 1298 | break; |
1267 | case MLX4_FS_PROMISC_ALL_MULTI: | 1299 | case MLX4_FS_MC_DEFAULT: |
1268 | regid_p = &dev->regid_allmulti_array[port]; | 1300 | regid_p = &dev->regid_allmulti_array[port]; |
1269 | break; | 1301 | break; |
1270 | default: | 1302 | default: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index eac3dae10efe..df15bb6631cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -730,85 +730,6 @@ struct mlx4_steer { | |||
730 | struct list_head steer_entries[MLX4_NUM_STEERS]; | 730 | struct list_head steer_entries[MLX4_NUM_STEERS]; |
731 | }; | 731 | }; |
732 | 732 | ||
733 | struct mlx4_net_trans_rule_hw_ctrl { | ||
734 | __be32 ctrl; | ||
735 | u8 rsvd1; | ||
736 | u8 funcid; | ||
737 | u8 vep; | ||
738 | u8 port; | ||
739 | __be32 qpn; | ||
740 | __be32 rsvd2; | ||
741 | }; | ||
742 | |||
743 | struct mlx4_net_trans_rule_hw_ib { | ||
744 | u8 size; | ||
745 | u8 rsvd1; | ||
746 | __be16 id; | ||
747 | u32 rsvd2; | ||
748 | __be32 qpn; | ||
749 | __be32 qpn_mask; | ||
750 | u8 dst_gid[16]; | ||
751 | u8 dst_gid_msk[16]; | ||
752 | } __packed; | ||
753 | |||
754 | struct mlx4_net_trans_rule_hw_eth { | ||
755 | u8 size; | ||
756 | u8 rsvd; | ||
757 | __be16 id; | ||
758 | u8 rsvd1[6]; | ||
759 | u8 dst_mac[6]; | ||
760 | u16 rsvd2; | ||
761 | u8 dst_mac_msk[6]; | ||
762 | u16 rsvd3; | ||
763 | u8 src_mac[6]; | ||
764 | u16 rsvd4; | ||
765 | u8 src_mac_msk[6]; | ||
766 | u8 rsvd5; | ||
767 | u8 ether_type_enable; | ||
768 | __be16 ether_type; | ||
769 | __be16 vlan_id_msk; | ||
770 | __be16 vlan_id; | ||
771 | } __packed; | ||
772 | |||
773 | struct mlx4_net_trans_rule_hw_tcp_udp { | ||
774 | u8 size; | ||
775 | u8 rsvd; | ||
776 | __be16 id; | ||
777 | __be16 rsvd1[3]; | ||
778 | __be16 dst_port; | ||
779 | __be16 rsvd2; | ||
780 | __be16 dst_port_msk; | ||
781 | __be16 rsvd3; | ||
782 | __be16 src_port; | ||
783 | __be16 rsvd4; | ||
784 | __be16 src_port_msk; | ||
785 | } __packed; | ||
786 | |||
787 | struct mlx4_net_trans_rule_hw_ipv4 { | ||
788 | u8 size; | ||
789 | u8 rsvd; | ||
790 | __be16 id; | ||
791 | __be32 rsvd1; | ||
792 | __be32 dst_ip; | ||
793 | __be32 dst_ip_msk; | ||
794 | __be32 src_ip; | ||
795 | __be32 src_ip_msk; | ||
796 | } __packed; | ||
797 | |||
798 | struct _rule_hw { | ||
799 | union { | ||
800 | struct { | ||
801 | u8 size; | ||
802 | u8 rsvd; | ||
803 | __be16 id; | ||
804 | }; | ||
805 | struct mlx4_net_trans_rule_hw_eth eth; | ||
806 | struct mlx4_net_trans_rule_hw_ib ib; | ||
807 | struct mlx4_net_trans_rule_hw_ipv4 ipv4; | ||
808 | struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; | ||
809 | }; | ||
810 | }; | ||
811 | |||
812 | enum { | 733 | enum { |
813 | MLX4_PCI_DEV_IS_VF = 1 << 0, | 734 | MLX4_PCI_DEV_IS_VF = 1 << 0, |
814 | MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, | 735 | MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index e329fe1f11b7..79fd269e2c54 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c | |||
@@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev) | |||
298 | return; | 298 | return; |
299 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); | 299 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); |
300 | } | 300 | } |
301 | |||
302 | struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) | ||
303 | { | ||
304 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | ||
305 | struct mlx4_srq *srq; | ||
306 | unsigned long flags; | ||
307 | |||
308 | spin_lock_irqsave(&srq_table->lock, flags); | ||
309 | srq = radix_tree_lookup(&srq_table->tree, | ||
310 | srqn & (dev->caps.num_srqs - 1)); | ||
311 | spin_unlock_irqrestore(&srq_table->lock, flags); | ||
312 | |||
313 | return srq; | ||
314 | } | ||
315 | EXPORT_SYMBOL_GPL(mlx4_srq_lookup); | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 53acaf64189f..a51b0134ce18 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -903,11 +903,12 @@ static inline int map_hw_to_sw_id(u16 header_id) | |||
903 | } | 903 | } |
904 | 904 | ||
905 | enum mlx4_net_trans_promisc_mode { | 905 | enum mlx4_net_trans_promisc_mode { |
906 | MLX4_FS_PROMISC_NONE = 0, | 906 | MLX4_FS_REGULAR = 1, |
907 | MLX4_FS_PROMISC_UPLINK, | 907 | MLX4_FS_ALL_DEFAULT, |
908 | /* For future use. Not implemented yet */ | 908 | MLX4_FS_MC_DEFAULT, |
909 | MLX4_FS_PROMISC_FUNCTION_PORT, | 909 | MLX4_FS_UC_SNIFFER, |
910 | MLX4_FS_PROMISC_ALL_MULTI, | 910 | MLX4_FS_MC_SNIFFER, |
911 | MLX4_FS_MODE_NUM, /* should be last */ | ||
911 | }; | 912 | }; |
912 | 913 | ||
913 | struct mlx4_spec_eth { | 914 | struct mlx4_spec_eth { |
@@ -936,7 +937,7 @@ struct mlx4_spec_ipv4 { | |||
936 | }; | 937 | }; |
937 | 938 | ||
938 | struct mlx4_spec_ib { | 939 | struct mlx4_spec_ib { |
939 | __be32 r_qpn; | 940 | __be32 l3_qpn; |
940 | __be32 qpn_msk; | 941 | __be32 qpn_msk; |
941 | u8 dst_gid[16]; | 942 | u8 dst_gid[16]; |
942 | u8 dst_gid_msk[16]; | 943 | u8 dst_gid_msk[16]; |
@@ -969,6 +970,92 @@ struct mlx4_net_trans_rule { | |||
969 | u32 qpn; | 970 | u32 qpn; |
970 | }; | 971 | }; |
971 | 972 | ||
973 | struct mlx4_net_trans_rule_hw_ctrl { | ||
974 | __be16 prio; | ||
975 | u8 type; | ||
976 | u8 flags; | ||
977 | u8 rsvd1; | ||
978 | u8 funcid; | ||
979 | u8 vep; | ||
980 | u8 port; | ||
981 | __be32 qpn; | ||
982 | __be32 rsvd2; | ||
983 | }; | ||
984 | |||
985 | struct mlx4_net_trans_rule_hw_ib { | ||
986 | u8 size; | ||
987 | u8 rsvd1; | ||
988 | __be16 id; | ||
989 | u32 rsvd2; | ||
990 | __be32 l3_qpn; | ||
991 | __be32 qpn_mask; | ||
992 | u8 dst_gid[16]; | ||
993 | u8 dst_gid_msk[16]; | ||
994 | } __packed; | ||
995 | |||
996 | struct mlx4_net_trans_rule_hw_eth { | ||
997 | u8 size; | ||
998 | u8 rsvd; | ||
999 | __be16 id; | ||
1000 | u8 rsvd1[6]; | ||
1001 | u8 dst_mac[6]; | ||
1002 | u16 rsvd2; | ||
1003 | u8 dst_mac_msk[6]; | ||
1004 | u16 rsvd3; | ||
1005 | u8 src_mac[6]; | ||
1006 | u16 rsvd4; | ||
1007 | u8 src_mac_msk[6]; | ||
1008 | u8 rsvd5; | ||
1009 | u8 ether_type_enable; | ||
1010 | __be16 ether_type; | ||
1011 | __be16 vlan_tag_msk; | ||
1012 | __be16 vlan_tag; | ||
1013 | } __packed; | ||
1014 | |||
1015 | struct mlx4_net_trans_rule_hw_tcp_udp { | ||
1016 | u8 size; | ||
1017 | u8 rsvd; | ||
1018 | __be16 id; | ||
1019 | __be16 rsvd1[3]; | ||
1020 | __be16 dst_port; | ||
1021 | __be16 rsvd2; | ||
1022 | __be16 dst_port_msk; | ||
1023 | __be16 rsvd3; | ||
1024 | __be16 src_port; | ||
1025 | __be16 rsvd4; | ||
1026 | __be16 src_port_msk; | ||
1027 | } __packed; | ||
1028 | |||
1029 | struct mlx4_net_trans_rule_hw_ipv4 { | ||
1030 | u8 size; | ||
1031 | u8 rsvd; | ||
1032 | __be16 id; | ||
1033 | __be32 rsvd1; | ||
1034 | __be32 dst_ip; | ||
1035 | __be32 dst_ip_msk; | ||
1036 | __be32 src_ip; | ||
1037 | __be32 src_ip_msk; | ||
1038 | } __packed; | ||
1039 | |||
1040 | struct _rule_hw { | ||
1041 | union { | ||
1042 | struct { | ||
1043 | u8 size; | ||
1044 | u8 rsvd; | ||
1045 | __be16 id; | ||
1046 | }; | ||
1047 | struct mlx4_net_trans_rule_hw_eth eth; | ||
1048 | struct mlx4_net_trans_rule_hw_ib ib; | ||
1049 | struct mlx4_net_trans_rule_hw_ipv4 ipv4; | ||
1050 | struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; | ||
1051 | }; | ||
1052 | }; | ||
1053 | |||
1054 | /* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */ | ||
1055 | struct mlx4_flow_handle { | ||
1056 | u64 reg_id[2]; | ||
1057 | }; | ||
1058 | |||
972 | int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, | 1059 | int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, |
973 | enum mlx4_net_trans_promisc_mode mode); | 1060 | enum mlx4_net_trans_promisc_mode mode); |
974 | int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, | 1061 | int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, |
@@ -1018,6 +1105,11 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); | |||
1018 | int mlx4_flow_attach(struct mlx4_dev *dev, | 1105 | int mlx4_flow_attach(struct mlx4_dev *dev, |
1019 | struct mlx4_net_trans_rule *rule, u64 *reg_id); | 1106 | struct mlx4_net_trans_rule *rule, u64 *reg_id); |
1020 | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); | 1107 | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); |
1108 | int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, | ||
1109 | enum mlx4_net_trans_promisc_mode flow_type); | ||
1110 | int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, | ||
1111 | enum mlx4_net_trans_rule_id id); | ||
1112 | int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); | ||
1021 | 1113 | ||
1022 | void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, | 1114 | void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, |
1023 | int i, int val); | 1115 | int i, int val); |
diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h index 799a0697a383..192e0f7784f2 100644 --- a/include/linux/mlx4/srq.h +++ b/include/linux/mlx4/srq.h | |||
@@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg { | |||
39 | u32 reserved2[3]; | 39 | u32 reserved2[3]; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn); | ||
43 | |||
42 | #endif /* MLX4_SRQ_H */ | 44 | #endif /* MLX4_SRQ_H */ |