aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c24
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h24
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c36
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c15
-rw-r--r--include/linux/mlx4/device.h104
-rw-r--r--include/linux/mlx4/srq.h2
24 files changed, 311 insertions, 193 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 0bb99bb38809..c47c2034ca71 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work)
878 } 878 }
879 return; 879 return;
880 } 880 }
881 if (empty)
882 return;
881 spin_lock_irqsave(&cm_id_priv->lock, flags); 883 spin_lock_irqsave(&cm_id_priv->lock, flags);
882 } 884 }
883 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 885 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a8fdd3381405..22192deb8828 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
348 struct ib_qp *qp = context; 348 struct ib_qp *qp = context;
349 349
350 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 350 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
351 event->element.qp->event_handler(event, event->element.qp->qp_context); 351 if (event->element.qp->event_handler)
352 event->element.qp->event_handler(event, event->element.qp->qp_context);
352} 353}
353 354
354static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 355static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 31f9201b2980..c40088ecf9f3 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo,
62 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); 62 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
63 if (random) { 63 if (random) {
64 j = 0; 64 j = 0;
65 random_bytes = random32(); 65 random_bytes = prandom_u32();
66 for (i = 0; i < RANDOM_SIZE; i++) 66 for (i = 0; i < RANDOM_SIZE; i++)
67 rarray[i] = i + skip_low; 67 rarray[i] = i + skip_low;
68 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { 68 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
69 if (j >= RANDOM_SIZE) { 69 if (j >= RANDOM_SIZE) {
70 j = 0; 70 j = 0;
71 random_bytes = random32(); 71 random_bytes = prandom_u32();
72 } 72 }
73 idx = (random_bytes >> (j * 2)) & 0xF; 73 idx = (random_bytes >> (j * 2)) & 0xF;
74 kfifo_in(fifo, 74 kfifo_in(fifo,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9c12da0cbd32..e87f2201b220 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
559 __be64 *page_list = NULL; 559 __be64 *page_list = NULL;
560 int shift = 0; 560 int shift = 0;
561 u64 total_size; 561 u64 total_size;
562 int npages; 562 int npages = 0;
563 int ret; 563 int ret;
564 564
565 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); 565 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f95e5df30db2..0161ae6ad629 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
54 54
55 if (obj < alloc->max) { 55 if (obj < alloc->max) {
56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) 56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
57 alloc->last += random32() % RANDOM_SKIP; 57 alloc->last += prandom_u32() % RANDOM_SKIP;
58 else 58 else
59 alloc->last = obj + 1; 59 alloc->last = obj + 1;
60 if (alloc->last >= alloc->max) 60 if (alloc->last >= alloc->max)
@@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
88 alloc->start = start; 88 alloc->start = start;
89 alloc->flags = flags; 89 alloc->flags = flags;
90 if (flags & C4IW_ID_TABLE_F_RANDOM) 90 if (flags & C4IW_ID_TABLE_F_RANDOM)
91 alloc->last = random32() % RANDOM_SKIP; 91 alloc->last = prandom_u32() % RANDOM_SKIP;
92 else 92 else
93 alloc->last = 0; 93 alloc->last = 0;
94 alloc->max = num; 94 alloc->max = num;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f4..ed49ab345b6e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100 return 0; 100 return 0;
101} 101}
102 102
103static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
104{
105 int ret = -ENOSYS;
106 if (user)
107 ret = alloc_oc_sq(rdev, sq);
108 if (ret)
109 ret = alloc_host_sq(rdev, sq);
110 return ret;
111}
112
103static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 113static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
104 struct c4iw_dev_ucontext *uctx) 114 struct c4iw_dev_ucontext *uctx)
105{ 115{
@@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
168 goto free_sw_rq; 178 goto free_sw_rq;
169 } 179 }
170 180
171 if (user) { 181 ret = alloc_sq(rdev, &wq->sq, user);
172 ret = alloc_oc_sq(rdev, &wq->sq); 182 if (ret)
173 if (ret) 183 goto free_hwaddr;
174 goto free_hwaddr;
175
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
179 } else
180 ret = alloc_host_sq(rdev, &wq->sq);
181 if (ret)
182 goto free_hwaddr;
183 memset(wq->sq.queue, 0, wq->sq.memsize); 184 memset(wq->sq.queue, 0, wq->sq.memsize);
184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 185 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
185 186
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index ae67df35dd4d..dab4b5188a27 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/mlx4/cq.h> 34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h> 35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/srq.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37 38
38#include "mlx4_ib.h" 39#include "mlx4_ib.h"
@@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
585 struct mlx4_qp *mqp; 586 struct mlx4_qp *mqp;
586 struct mlx4_ib_wq *wq; 587 struct mlx4_ib_wq *wq;
587 struct mlx4_ib_srq *srq; 588 struct mlx4_ib_srq *srq;
589 struct mlx4_srq *msrq = NULL;
588 int is_send; 590 int is_send;
589 int is_error; 591 int is_error;
590 u32 g_mlpath_rqpn; 592 u32 g_mlpath_rqpn;
@@ -653,6 +655,20 @@ repoll:
653 655
654 wc->qp = &(*cur_qp)->ibqp; 656 wc->qp = &(*cur_qp)->ibqp;
655 657
658 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
659 u32 srq_num;
660 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
661 srq_num = g_mlpath_rqpn & 0xffffff;
662 /* SRQ is also in the radix tree */
663 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
664 srq_num);
665 if (unlikely(!msrq)) {
666 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
667 cq->mcq.cqn, srq_num);
668 return -EINVAL;
669 }
670 }
671
656 if (is_send) { 672 if (is_send) {
657 wq = &(*cur_qp)->sq; 673 wq = &(*cur_qp)->sq;
658 if (!(*cur_qp)->sq_signal_bits) { 674 if (!(*cur_qp)->sq_signal_bits) {
@@ -666,6 +682,11 @@ repoll:
666 wqe_ctr = be16_to_cpu(cqe->wqe_index); 682 wqe_ctr = be16_to_cpu(cqe->wqe_index);
667 wc->wr_id = srq->wrid[wqe_ctr]; 683 wc->wr_id = srq->wrid[wqe_ctr];
668 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 684 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
685 } else if (msrq) {
686 srq = to_mibsrq(msrq);
687 wqe_ctr = be16_to_cpu(cqe->wqe_index);
688 wc->wr_id = srq->wrid[wqe_ctr];
689 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
669 } else { 690 } else {
670 wq = &(*cur_qp)->rq; 691 wq = &(*cur_qp)->rq;
671 tail = wq->tail & (wq->wqe_cnt - 1); 692 tail = wq->tail & (wq->wqe_cnt - 1);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 934792c477bc..4d599cedbb0b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
93__be64 mlx4_ib_gen_node_guid(void) 93__be64 mlx4_ib_gen_node_guid(void)
94{ 94{
95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) 95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
96 return cpu_to_be64(NODE_GUID_HI | random32()); 96 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
97} 97}
98 98
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) 99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 35cced2a4da8..4f10af2905b5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1292 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 1292 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1293 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; 1293 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1294 context->xrcd = cpu_to_be32((u32) qp->xrcdn); 1294 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1295 if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1296 context->param3 |= cpu_to_be32(1 << 30);
1295 } 1297 }
1296 1298
1297 if (qp->ibqp.uobject) 1299 if (qp->ibqp.uobject)
@@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1458 } 1460 }
1459 } 1461 }
1460 1462
1463 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
1464 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1465 MLX4_IB_LINK_TYPE_ETH;
1466
1461 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1467 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1462 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1468 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1463 sqd_event = 1; 1469 sqd_event = 1;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1ef880de3a41..3eceb61e3532 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -460,7 +460,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
460 goto err_qp; 460 goto err_qp;
461 } 461 }
462 462
463 psn = random32() & 0xffffff; 463 psn = prandom_u32() & 0xffffff;
464 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); 464 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
465 if (ret) 465 if (ret)
466 goto err_modify; 466 goto err_modify;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8534afd04e7c..31dd2a7a880f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -828,7 +828,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
828 */ 828 */
829 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 829 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
830 830
831 return 0; 831 return sizeof *header;
832} 832}
833 833
834static void ipoib_set_mcast_list(struct net_device *dev) 834static void ipoib_set_mcast_list(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0ab8c9cc3a78..f19b0998a53c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
82 82
83int iser_debug_level = 0; 83int iser_debug_level = 0;
84 84
85MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover " 85MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
86 "v" DRV_VER " (" DRV_DATE ")");
87MODULE_LICENSE("Dual BSD/GPL"); 86MODULE_LICENSE("Dual BSD/GPL");
88MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); 87MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
88MODULE_VERSION(DRV_VER);
89 89
90module_param_named(debug_level, iser_debug_level, int, 0644); 90module_param_named(debug_level, iser_debug_level, int, 0644);
91MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); 91MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
@@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
370 /* binds the iSER connection retrieved from the previously 370 /* binds the iSER connection retrieved from the previously
371 * connected ep_handle to the iSCSI layer connection. exchanges 371 * connected ep_handle to the iSCSI layer connection. exchanges
372 * connection pointers */ 372 * connection pointers */
373 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", 373 iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n",
374 conn, conn->dd_data, ib_conn); 374 conn, conn->dd_data, ib_conn);
375 iser_conn = conn->dd_data; 375 iser_conn = conn->dd_data;
376 ib_conn->iser_conn = iser_conn; 376 ib_conn->iser_conn = iser_conn;
377 iser_conn->ib_conn = ib_conn; 377 iser_conn->ib_conn = ib_conn;
@@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
475 case ISCSI_PARAM_HDRDGST_EN: 475 case ISCSI_PARAM_HDRDGST_EN:
476 sscanf(buf, "%d", &value); 476 sscanf(buf, "%d", &value);
477 if (value) { 477 if (value) {
478 printk(KERN_ERR "DataDigest wasn't negotiated to None"); 478 iser_err("DataDigest wasn't negotiated to None");
479 return -EPROTO; 479 return -EPROTO;
480 } 480 }
481 break; 481 break;
482 case ISCSI_PARAM_DATADGST_EN: 482 case ISCSI_PARAM_DATADGST_EN:
483 sscanf(buf, "%d", &value); 483 sscanf(buf, "%d", &value);
484 if (value) { 484 if (value) {
485 printk(KERN_ERR "DataDigest wasn't negotiated to None"); 485 iser_err("DataDigest wasn't negotiated to None");
486 return -EPROTO; 486 return -EPROTO;
487 } 487 }
488 break; 488 break;
489 case ISCSI_PARAM_IFMARKER_EN: 489 case ISCSI_PARAM_IFMARKER_EN:
490 sscanf(buf, "%d", &value); 490 sscanf(buf, "%d", &value);
491 if (value) { 491 if (value) {
492 printk(KERN_ERR "IFMarker wasn't negotiated to No"); 492 iser_err("IFMarker wasn't negotiated to No");
493 return -EPROTO; 493 return -EPROTO;
494 } 494 }
495 break; 495 break;
496 case ISCSI_PARAM_OFMARKER_EN: 496 case ISCSI_PARAM_OFMARKER_EN:
497 sscanf(buf, "%d", &value); 497 sscanf(buf, "%d", &value);
498 if (value) { 498 if (value) {
499 printk(KERN_ERR "OFMarker wasn't negotiated to No"); 499 iser_err("OFMarker wasn't negotiated to No");
500 return -EPROTO; 500 return -EPROTO;
501 } 501 }
502 break; 502 break;
@@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
596 ib_conn->state == ISER_CONN_DOWN)) 596 ib_conn->state == ISER_CONN_DOWN))
597 rc = -1; 597 rc = -1;
598 598
599 iser_err("ib conn %p rc = %d\n", ib_conn, rc); 599 iser_info("ib conn %p rc = %d\n", ib_conn, rc);
600 600
601 if (rc > 0) 601 if (rc > 0)
602 return 1; /* success, this is the equivalent of POLLOUT */ 602 return 1; /* success, this is the equivalent of POLLOUT */
@@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
623 iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); 623 iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
624 624
625 625
626 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); 626 iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
627 iser_conn_terminate(ib_conn); 627 iser_conn_terminate(ib_conn);
628} 628}
629 629
@@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param)
682 682
683static struct scsi_host_template iscsi_iser_sht = { 683static struct scsi_host_template iscsi_iser_sht = {
684 .module = THIS_MODULE, 684 .module = THIS_MODULE,
685 .name = "iSCSI Initiator over iSER, v." DRV_VER, 685 .name = "iSCSI Initiator over iSER",
686 .queuecommand = iscsi_queuecommand, 686 .queuecommand = iscsi_queuecommand,
687 .change_queue_depth = iscsi_change_queue_depth, 687 .change_queue_depth = iscsi_change_queue_depth,
688 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 688 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
@@ -740,7 +740,7 @@ static int __init iser_init(void)
740 iser_dbg("Starting iSER datamover...\n"); 740 iser_dbg("Starting iSER datamover...\n");
741 741
742 if (iscsi_max_lun < 1) { 742 if (iscsi_max_lun < 1) {
743 printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); 743 iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
744 return -EINVAL; 744 return -EINVAL;
745 } 745 }
746 746
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5babdb35bda7..06f578cde75b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -42,6 +42,7 @@
42 42
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/net.h> 44#include <linux/net.h>
45#include <linux/printk.h>
45#include <scsi/libiscsi.h> 46#include <scsi/libiscsi.h>
46#include <scsi/scsi_transport_iscsi.h> 47#include <scsi/scsi_transport_iscsi.h>
47 48
@@ -65,20 +66,26 @@
65 66
66#define DRV_NAME "iser" 67#define DRV_NAME "iser"
67#define PFX DRV_NAME ": " 68#define PFX DRV_NAME ": "
68#define DRV_VER "0.1" 69#define DRV_VER "1.1"
69#define DRV_DATE "May 7th, 2006"
70 70
71#define iser_dbg(fmt, arg...) \ 71#define iser_dbg(fmt, arg...) \
72 do { \ 72 do { \
73 if (iser_debug_level > 1) \ 73 if (iser_debug_level > 2) \
74 printk(KERN_DEBUG PFX "%s:" fmt,\ 74 printk(KERN_DEBUG PFX "%s:" fmt,\
75 __func__ , ## arg); \ 75 __func__ , ## arg); \
76 } while (0) 76 } while (0)
77 77
78#define iser_warn(fmt, arg...) \ 78#define iser_warn(fmt, arg...) \
79 do { \ 79 do { \
80 if (iser_debug_level > 1) \
81 pr_warn(PFX "%s:" fmt, \
82 __func__ , ## arg); \
83 } while (0)
84
85#define iser_info(fmt, arg...) \
86 do { \
80 if (iser_debug_level > 0) \ 87 if (iser_debug_level > 0) \
81 printk(KERN_DEBUG PFX "%s:" fmt,\ 88 pr_info(PFX "%s:" fmt, \
82 __func__ , ## arg); \ 89 __func__ , ## arg); \
83 } while (0) 90 } while (0)
84 91
@@ -133,6 +140,15 @@ struct iser_hdr {
133 __be64 read_va; 140 __be64 read_va;
134} __attribute__((packed)); 141} __attribute__((packed));
135 142
143
144#define ISER_ZBVA_NOT_SUPPORTED 0x80
145#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
146
147struct iser_cm_hdr {
148 u8 flags;
149 u8 rsvd[3];
150} __packed;
151
136/* Constant PDU lengths calculations */ 152/* Constant PDU lengths calculations */
137#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) 153#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
138 154
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index be1edb04b085..68ebb7fe072a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
416 for (i=0 ; i<ib_conn->page_vec->length ; i++) 416 for (i=0 ; i<ib_conn->page_vec->length ; i++)
417 iser_err("page_vec[%d] = 0x%llx\n", i, 417 iser_err("page_vec[%d] = 0x%llx\n", i,
418 (unsigned long long) ib_conn->page_vec->pages[i]); 418 (unsigned long long) ib_conn->page_vec->pages[i]);
419 return err;
420 } 419 }
420 if (err)
421 return err;
421 } 422 }
422 return 0; 423 return 0;
423} 424}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4debadc53106..5278916c3103 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
74 struct iser_cq_desc *cq_desc; 74 struct iser_cq_desc *cq_desc;
75 75
76 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); 76 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
77 iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, 77 iser_info("using %d CQs, device %s supports %d vectors\n",
78 device->ib_device->name, device->ib_device->num_comp_vectors); 78 device->cqs_used, device->ib_device->name,
79 device->ib_device->num_comp_vectors);
79 80
80 device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, 81 device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
81 GFP_KERNEL); 82 GFP_KERNEL);
@@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
262 min_index = index; 263 min_index = index;
263 device->cq_active_qps[min_index]++; 264 device->cq_active_qps[min_index]++;
264 mutex_unlock(&ig.connlist_mutex); 265 mutex_unlock(&ig.connlist_mutex);
265 iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); 266 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
266 267
267 init_attr.event_handler = iser_qp_event_callback; 268 init_attr.event_handler = iser_qp_event_callback;
268 init_attr.qp_context = (void *)ib_conn; 269 init_attr.qp_context = (void *)ib_conn;
@@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
280 goto out_err; 281 goto out_err;
281 282
282 ib_conn->qp = ib_conn->cma_id->qp; 283 ib_conn->qp = ib_conn->cma_id->qp;
283 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 284 iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
284 ib_conn, ib_conn->cma_id, 285 ib_conn, ib_conn->cma_id,
285 ib_conn->fmr_pool, ib_conn->cma_id->qp); 286 ib_conn->fmr_pool, ib_conn->cma_id->qp);
286 return ret; 287 return ret;
287 288
288out_err: 289out_err:
@@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
299 int cq_index; 300 int cq_index;
300 BUG_ON(ib_conn == NULL); 301 BUG_ON(ib_conn == NULL);
301 302
302 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", 303 iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
303 ib_conn, ib_conn->cma_id, 304 ib_conn, ib_conn->cma_id,
304 ib_conn->fmr_pool, ib_conn->qp); 305 ib_conn->fmr_pool, ib_conn->qp);
305 306
306 /* qp is created only once both addr & route are resolved */ 307 /* qp is created only once both addr & route are resolved */
307 if (ib_conn->fmr_pool != NULL) 308 if (ib_conn->fmr_pool != NULL)
@@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device)
379{ 380{
380 mutex_lock(&ig.device_list_mutex); 381 mutex_lock(&ig.device_list_mutex);
381 device->refcount--; 382 device->refcount--;
382 iser_err("device %p refcount %d\n",device,device->refcount); 383 iser_info("device %p refcount %d\n", device, device->refcount);
383 if (!device->refcount) { 384 if (!device->refcount) {
384 iser_free_device_ib_res(device); 385 iser_free_device_ib_res(device);
385 list_del(&device->ig_list); 386 list_del(&device->ig_list);
@@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
498{ 499{
499 struct rdma_conn_param conn_param; 500 struct rdma_conn_param conn_param;
500 int ret; 501 int ret;
502 struct iser_cm_hdr req_hdr;
501 503
502 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); 504 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
503 if (ret) 505 if (ret)
@@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
509 conn_param.retry_count = 7; 511 conn_param.retry_count = 7;
510 conn_param.rnr_retry_count = 6; 512 conn_param.rnr_retry_count = 6;
511 513
514 memset(&req_hdr, 0, sizeof(req_hdr));
515 req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
516 ISER_SEND_W_INV_NOT_SUPPORTED);
517 conn_param.private_data = (void *)&req_hdr;
518 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
519
512 ret = rdma_connect(cma_id, &conn_param); 520 ret = rdma_connect(cma_id, &conn_param);
513 if (ret) { 521 if (ret) {
514 iser_err("failure connecting: %d\n", ret); 522 iser_err("failure connecting: %d\n", ret);
@@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
558{ 566{
559 int ret = 0; 567 int ret = 0;
560 568
561 iser_err("event %d status %d conn %p id %p\n", 569 iser_info("event %d status %d conn %p id %p\n",
562 event->event, event->status, cma_id->context, cma_id); 570 event->event, event->status, cma_id->context, cma_id);
563 571
564 switch (event->event) { 572 switch (event->event) {
565 case RDMA_CM_EVENT_ADDR_RESOLVED: 573 case RDMA_CM_EVENT_ADDR_RESOLVED:
@@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn,
619 /* the device is known only --after-- address resolution */ 627 /* the device is known only --after-- address resolution */
620 ib_conn->device = NULL; 628 ib_conn->device = NULL;
621 629
622 iser_err("connecting to: %pI4, port 0x%x\n", 630 iser_info("connecting to: %pI4, port 0x%x\n",
623 &dst_addr->sin_addr, dst_addr->sin_port); 631 &dst_addr->sin_addr, dst_addr->sin_port);
624 632
625 ib_conn->state = ISER_CONN_PENDING; 633 ib_conn->state = ISER_CONN_PENDING;
626 634
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index c09d41b1a2ff..b08ca7a9f76b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1374 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); 1374 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1375 break; 1375 break;
1376 default: 1376 default:
1377 WARN_ON("ERROR: unexpected command state"); 1377 WARN(1, "Unexpected command state (%d)", state);
1378 break; 1378 break;
1379 } 1379 }
1380 1380
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 00f25b5f297f..20476844fb20 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
889 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 889 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
890 .exclusive = 0, 890 .exclusive = 0,
891 .allow_loopback = 1, 891 .allow_loopback = 1,
892 .promisc_mode = MLX4_FS_PROMISC_NONE, 892 .promisc_mode = MLX4_FS_REGULAR,
893 }; 893 };
894 894
895 rule.port = priv->port; 895 rule.port = priv->port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 30d78f806dc3..0860130f2b17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
127 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 127 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
128 .exclusive = 1, 128 .exclusive = 1,
129 .allow_loopback = 1, 129 .allow_loopback = 1,
130 .promisc_mode = MLX4_FS_PROMISC_NONE, 130 .promisc_mode = MLX4_FS_REGULAR,
131 .port = priv->port, 131 .port = priv->port,
132 .priority = MLX4_DOMAIN_RFS, 132 .priority = MLX4_DOMAIN_RFS,
133 }; 133 };
@@ -446,7 +446,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
446 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 446 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
447 .exclusive = 0, 447 .exclusive = 0,
448 .allow_loopback = 1, 448 .allow_loopback = 1,
449 .promisc_mode = MLX4_FS_PROMISC_NONE, 449 .promisc_mode = MLX4_FS_REGULAR,
450 .priority = MLX4_DOMAIN_NIC, 450 .priority = MLX4_DOMAIN_NIC,
451 }; 451 };
452 452
@@ -793,7 +793,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
793 err = mlx4_flow_steer_promisc_add(mdev->dev, 793 err = mlx4_flow_steer_promisc_add(mdev->dev,
794 priv->port, 794 priv->port,
795 priv->base_qpn, 795 priv->base_qpn,
796 MLX4_FS_PROMISC_UPLINK); 796 MLX4_FS_ALL_DEFAULT);
797 if (err) 797 if (err)
798 en_err(priv, "Failed enabling promiscuous mode\n"); 798 en_err(priv, "Failed enabling promiscuous mode\n");
799 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 799 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
@@ -856,7 +856,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
856 case MLX4_STEERING_MODE_DEVICE_MANAGED: 856 case MLX4_STEERING_MODE_DEVICE_MANAGED:
857 err = mlx4_flow_steer_promisc_remove(mdev->dev, 857 err = mlx4_flow_steer_promisc_remove(mdev->dev,
858 priv->port, 858 priv->port,
859 MLX4_FS_PROMISC_UPLINK); 859 MLX4_FS_ALL_DEFAULT);
860 if (err) 860 if (err)
861 en_err(priv, "Failed disabling promiscuous mode\n"); 861 en_err(priv, "Failed disabling promiscuous mode\n");
862 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 862 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -917,7 +917,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
917 err = mlx4_flow_steer_promisc_add(mdev->dev, 917 err = mlx4_flow_steer_promisc_add(mdev->dev,
918 priv->port, 918 priv->port,
919 priv->base_qpn, 919 priv->base_qpn,
920 MLX4_FS_PROMISC_ALL_MULTI); 920 MLX4_FS_MC_DEFAULT);
921 break; 921 break;
922 922
923 case MLX4_STEERING_MODE_B0: 923 case MLX4_STEERING_MODE_B0:
@@ -940,7 +940,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
940 case MLX4_STEERING_MODE_DEVICE_MANAGED: 940 case MLX4_STEERING_MODE_DEVICE_MANAGED:
941 err = mlx4_flow_steer_promisc_remove(mdev->dev, 941 err = mlx4_flow_steer_promisc_remove(mdev->dev,
942 priv->port, 942 priv->port,
943 MLX4_FS_PROMISC_ALL_MULTI); 943 MLX4_FS_MC_DEFAULT);
944 break; 944 break;
945 945
946 case MLX4_STEERING_MODE_B0: 946 case MLX4_STEERING_MODE_B0:
@@ -1598,10 +1598,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1598 MLX4_EN_FLAG_MC_PROMISC); 1598 MLX4_EN_FLAG_MC_PROMISC);
1599 mlx4_flow_steer_promisc_remove(mdev->dev, 1599 mlx4_flow_steer_promisc_remove(mdev->dev,
1600 priv->port, 1600 priv->port,
1601 MLX4_FS_PROMISC_UPLINK); 1601 MLX4_FS_ALL_DEFAULT);
1602 mlx4_flow_steer_promisc_remove(mdev->dev, 1602 mlx4_flow_steer_promisc_remove(mdev->dev,
1603 priv->port, 1603 priv->port,
1604 MLX4_FS_PROMISC_ALL_MULTI); 1604 MLX4_FS_MC_DEFAULT);
1605 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1605 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1606 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1606 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1607 1607
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e3123a1df88..6000342f9725 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
497 break; 497 break;
498 498
499 case MLX4_EVENT_TYPE_SRQ_LIMIT: 499 case MLX4_EVENT_TYPE_SRQ_LIMIT:
500 mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 500 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
501 __func__); 501 __func__);
502 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 502 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
503 if (mlx4_is_master(dev)) { 503 if (mlx4_is_master(dev)) {
504 /* forward only to slave owning the SRQ */ 504 /* forward only to slave owning the SRQ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 52685524708d..00b4e7be7c7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
645 return err; 645 return err;
646} 646}
647 647
648static const u8 __promisc_mode[] = {
649 [MLX4_FS_REGULAR] = 0x0,
650 [MLX4_FS_ALL_DEFAULT] = 0x1,
651 [MLX4_FS_MC_DEFAULT] = 0x3,
652 [MLX4_FS_UC_SNIFFER] = 0x4,
653 [MLX4_FS_MC_SNIFFER] = 0x5,
654};
655
656int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
657 enum mlx4_net_trans_promisc_mode flow_type)
658{
659 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
660 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
661 return -EINVAL;
662 }
663 return __promisc_mode[flow_type];
664}
665EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
666
648static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 667static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
649 struct mlx4_net_trans_rule_hw_ctrl *hw) 668 struct mlx4_net_trans_rule_hw_ctrl *hw)
650{ 669{
651 static const u8 __promisc_mode[] = { 670 u8 flags = 0;
652 [MLX4_FS_PROMISC_NONE] = 0x0, 671
653 [MLX4_FS_PROMISC_UPLINK] = 0x1, 672 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
654 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, 673 flags |= ctrl->exclusive ? (1 << 2) : 0;
655 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, 674 flags |= ctrl->allow_loopback ? (1 << 3) : 0;
656 }; 675
657 676 hw->flags = flags;
658 u32 dw = 0; 677 hw->type = __promisc_mode[ctrl->promisc_mode];
659 678 hw->prio = cpu_to_be16(ctrl->priority);
660 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
661 dw |= ctrl->exclusive ? (1 << 2) : 0;
662 dw |= ctrl->allow_loopback ? (1 << 3) : 0;
663 dw |= __promisc_mode[ctrl->promisc_mode] << 8;
664 dw |= ctrl->priority << 16;
665
666 hw->ctrl = cpu_to_be32(dw);
667 hw->port = ctrl->port; 679 hw->port = ctrl->port;
668 hw->qpn = cpu_to_be32(ctrl->qpn); 680 hw->qpn = cpu_to_be32(ctrl->qpn);
669} 681}
@@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = {
677 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 689 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
678}; 690};
679 691
692int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
693 enum mlx4_net_trans_rule_id id)
694{
695 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
696 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
697 return -EINVAL;
698 }
699 return __sw_id_hw[id];
700}
701EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
702
703static const int __rule_hw_sz[] = {
704 [MLX4_NET_TRANS_RULE_ID_ETH] =
705 sizeof(struct mlx4_net_trans_rule_hw_eth),
706 [MLX4_NET_TRANS_RULE_ID_IB] =
707 sizeof(struct mlx4_net_trans_rule_hw_ib),
708 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
709 [MLX4_NET_TRANS_RULE_ID_IPV4] =
710 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
711 [MLX4_NET_TRANS_RULE_ID_TCP] =
712 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
713 [MLX4_NET_TRANS_RULE_ID_UDP] =
714 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
715};
716
717int mlx4_hw_rule_sz(struct mlx4_dev *dev,
718 enum mlx4_net_trans_rule_id id)
719{
720 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
721 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
722 return -EINVAL;
723 }
724
725 return __rule_hw_sz[id];
726}
727EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
728
680static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 729static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
681 struct _rule_hw *rule_hw) 730 struct _rule_hw *rule_hw)
682{ 731{
683 static const size_t __rule_hw_sz[] = { 732 if (mlx4_hw_rule_sz(dev, spec->id) < 0)
684 [MLX4_NET_TRANS_RULE_ID_ETH] =
685 sizeof(struct mlx4_net_trans_rule_hw_eth),
686 [MLX4_NET_TRANS_RULE_ID_IB] =
687 sizeof(struct mlx4_net_trans_rule_hw_ib),
688 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
689 [MLX4_NET_TRANS_RULE_ID_IPV4] =
690 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
691 [MLX4_NET_TRANS_RULE_ID_TCP] =
692 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
693 [MLX4_NET_TRANS_RULE_ID_UDP] =
694 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
695 };
696 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
697 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
698 return -EINVAL; 733 return -EINVAL;
699 } 734 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
700 memset(rule_hw, 0, __rule_hw_sz[spec->id]);
701 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 735 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
702 rule_hw->size = __rule_hw_sz[spec->id] >> 2; 736 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
703 737
704 switch (spec->id) { 738 switch (spec->id) {
705 case MLX4_NET_TRANS_RULE_ID_ETH: 739 case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
713 rule_hw->eth.ether_type_enable = 1; 747 rule_hw->eth.ether_type_enable = 1;
714 rule_hw->eth.ether_type = spec->eth.ether_type; 748 rule_hw->eth.ether_type = spec->eth.ether_type;
715 } 749 }
716 rule_hw->eth.vlan_id = spec->eth.vlan_id; 750 rule_hw->eth.vlan_tag = spec->eth.vlan_id;
717 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; 751 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
718 break; 752 break;
719 753
720 case MLX4_NET_TRANS_RULE_ID_IB: 754 case MLX4_NET_TRANS_RULE_ID_IB:
721 rule_hw->ib.qpn = spec->ib.r_qpn; 755 rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
722 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 756 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
723 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 757 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
724 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 758 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@@ -1153,7 +1187,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1153 struct mlx4_net_trans_rule rule = { 1187 struct mlx4_net_trans_rule rule = {
1154 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1188 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1155 .exclusive = 0, 1189 .exclusive = 0,
1156 .promisc_mode = MLX4_FS_PROMISC_NONE, 1190 .promisc_mode = MLX4_FS_REGULAR,
1157 .priority = MLX4_DOMAIN_NIC, 1191 .priority = MLX4_DOMAIN_NIC,
1158 }; 1192 };
1159 1193
@@ -1222,11 +1256,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1222 u64 *regid_p; 1256 u64 *regid_p;
1223 1257
1224 switch (mode) { 1258 switch (mode) {
1225 case MLX4_FS_PROMISC_UPLINK: 1259 case MLX4_FS_ALL_DEFAULT:
1226 case MLX4_FS_PROMISC_FUNCTION_PORT:
1227 regid_p = &dev->regid_promisc_array[port]; 1260 regid_p = &dev->regid_promisc_array[port];
1228 break; 1261 break;
1229 case MLX4_FS_PROMISC_ALL_MULTI: 1262 case MLX4_FS_MC_DEFAULT:
1230 regid_p = &dev->regid_allmulti_array[port]; 1263 regid_p = &dev->regid_allmulti_array[port];
1231 break; 1264 break;
1232 default: 1265 default:
@@ -1253,11 +1286,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1253 u64 *regid_p; 1286 u64 *regid_p;
1254 1287
1255 switch (mode) { 1288 switch (mode) {
1256 case MLX4_FS_PROMISC_UPLINK: 1289 case MLX4_FS_ALL_DEFAULT:
1257 case MLX4_FS_PROMISC_FUNCTION_PORT:
1258 regid_p = &dev->regid_promisc_array[port]; 1290 regid_p = &dev->regid_promisc_array[port];
1259 break; 1291 break;
1260 case MLX4_FS_PROMISC_ALL_MULTI: 1292 case MLX4_FS_MC_DEFAULT:
1261 regid_p = &dev->regid_allmulti_array[port]; 1293 regid_p = &dev->regid_allmulti_array[port];
1262 break; 1294 break;
1263 default: 1295 default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d738454116a0..d5fdb19771e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -701,85 +701,6 @@ struct mlx4_steer {
701 struct list_head steer_entries[MLX4_NUM_STEERS]; 701 struct list_head steer_entries[MLX4_NUM_STEERS];
702}; 702};
703 703
704struct mlx4_net_trans_rule_hw_ctrl {
705 __be32 ctrl;
706 u8 rsvd1;
707 u8 funcid;
708 u8 vep;
709 u8 port;
710 __be32 qpn;
711 __be32 rsvd2;
712};
713
714struct mlx4_net_trans_rule_hw_ib {
715 u8 size;
716 u8 rsvd1;
717 __be16 id;
718 u32 rsvd2;
719 __be32 qpn;
720 __be32 qpn_mask;
721 u8 dst_gid[16];
722 u8 dst_gid_msk[16];
723} __packed;
724
725struct mlx4_net_trans_rule_hw_eth {
726 u8 size;
727 u8 rsvd;
728 __be16 id;
729 u8 rsvd1[6];
730 u8 dst_mac[6];
731 u16 rsvd2;
732 u8 dst_mac_msk[6];
733 u16 rsvd3;
734 u8 src_mac[6];
735 u16 rsvd4;
736 u8 src_mac_msk[6];
737 u8 rsvd5;
738 u8 ether_type_enable;
739 __be16 ether_type;
740 __be16 vlan_id_msk;
741 __be16 vlan_id;
742} __packed;
743
744struct mlx4_net_trans_rule_hw_tcp_udp {
745 u8 size;
746 u8 rsvd;
747 __be16 id;
748 __be16 rsvd1[3];
749 __be16 dst_port;
750 __be16 rsvd2;
751 __be16 dst_port_msk;
752 __be16 rsvd3;
753 __be16 src_port;
754 __be16 rsvd4;
755 __be16 src_port_msk;
756} __packed;
757
758struct mlx4_net_trans_rule_hw_ipv4 {
759 u8 size;
760 u8 rsvd;
761 __be16 id;
762 __be32 rsvd1;
763 __be32 dst_ip;
764 __be32 dst_ip_msk;
765 __be32 src_ip;
766 __be32 src_ip_msk;
767} __packed;
768
769struct _rule_hw {
770 union {
771 struct {
772 u8 size;
773 u8 rsvd;
774 __be16 id;
775 };
776 struct mlx4_net_trans_rule_hw_eth eth;
777 struct mlx4_net_trans_rule_hw_ib ib;
778 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
779 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
780 };
781};
782
783enum { 704enum {
784 MLX4_PCI_DEV_IS_VF = 1 << 0, 705 MLX4_PCI_DEV_IS_VF = 1 << 0,
785 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, 706 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index e329fe1f11b7..79fd269e2c54 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
298 return; 298 return;
299 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); 299 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
300} 300}
301
302struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
303{
304 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
305 struct mlx4_srq *srq;
306 unsigned long flags;
307
308 spin_lock_irqsave(&srq_table->lock, flags);
309 srq = radix_tree_lookup(&srq_table->tree,
310 srqn & (dev->caps.num_srqs - 1));
311 spin_unlock_irqrestore(&srq_table->lock, flags);
312
313 return srq;
314}
315EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 811f91cf5e8c..ad4a53fbdddf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -896,11 +896,12 @@ static inline int map_hw_to_sw_id(u16 header_id)
896} 896}
897 897
898enum mlx4_net_trans_promisc_mode { 898enum mlx4_net_trans_promisc_mode {
899 MLX4_FS_PROMISC_NONE = 0, 899 MLX4_FS_REGULAR = 1,
900 MLX4_FS_PROMISC_UPLINK, 900 MLX4_FS_ALL_DEFAULT,
901 /* For future use. Not implemented yet */ 901 MLX4_FS_MC_DEFAULT,
902 MLX4_FS_PROMISC_FUNCTION_PORT, 902 MLX4_FS_UC_SNIFFER,
903 MLX4_FS_PROMISC_ALL_MULTI, 903 MLX4_FS_MC_SNIFFER,
904 MLX4_FS_MODE_NUM, /* should be last */
904}; 905};
905 906
906struct mlx4_spec_eth { 907struct mlx4_spec_eth {
@@ -929,7 +930,7 @@ struct mlx4_spec_ipv4 {
929}; 930};
930 931
931struct mlx4_spec_ib { 932struct mlx4_spec_ib {
932 __be32 r_qpn; 933 __be32 l3_qpn;
933 __be32 qpn_msk; 934 __be32 qpn_msk;
934 u8 dst_gid[16]; 935 u8 dst_gid[16];
935 u8 dst_gid_msk[16]; 936 u8 dst_gid_msk[16];
@@ -962,6 +963,92 @@ struct mlx4_net_trans_rule {
962 u32 qpn; 963 u32 qpn;
963}; 964};
964 965
966struct mlx4_net_trans_rule_hw_ctrl {
967 __be16 prio;
968 u8 type;
969 u8 flags;
970 u8 rsvd1;
971 u8 funcid;
972 u8 vep;
973 u8 port;
974 __be32 qpn;
975 __be32 rsvd2;
976};
977
978struct mlx4_net_trans_rule_hw_ib {
979 u8 size;
980 u8 rsvd1;
981 __be16 id;
982 u32 rsvd2;
983 __be32 l3_qpn;
984 __be32 qpn_mask;
985 u8 dst_gid[16];
986 u8 dst_gid_msk[16];
987} __packed;
988
989struct mlx4_net_trans_rule_hw_eth {
990 u8 size;
991 u8 rsvd;
992 __be16 id;
993 u8 rsvd1[6];
994 u8 dst_mac[6];
995 u16 rsvd2;
996 u8 dst_mac_msk[6];
997 u16 rsvd3;
998 u8 src_mac[6];
999 u16 rsvd4;
1000 u8 src_mac_msk[6];
1001 u8 rsvd5;
1002 u8 ether_type_enable;
1003 __be16 ether_type;
1004 __be16 vlan_tag_msk;
1005 __be16 vlan_tag;
1006} __packed;
1007
1008struct mlx4_net_trans_rule_hw_tcp_udp {
1009 u8 size;
1010 u8 rsvd;
1011 __be16 id;
1012 __be16 rsvd1[3];
1013 __be16 dst_port;
1014 __be16 rsvd2;
1015 __be16 dst_port_msk;
1016 __be16 rsvd3;
1017 __be16 src_port;
1018 __be16 rsvd4;
1019 __be16 src_port_msk;
1020} __packed;
1021
1022struct mlx4_net_trans_rule_hw_ipv4 {
1023 u8 size;
1024 u8 rsvd;
1025 __be16 id;
1026 __be32 rsvd1;
1027 __be32 dst_ip;
1028 __be32 dst_ip_msk;
1029 __be32 src_ip;
1030 __be32 src_ip_msk;
1031} __packed;
1032
1033struct _rule_hw {
1034 union {
1035 struct {
1036 u8 size;
1037 u8 rsvd;
1038 __be16 id;
1039 };
1040 struct mlx4_net_trans_rule_hw_eth eth;
1041 struct mlx4_net_trans_rule_hw_ib ib;
1042 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
1043 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
1044 };
1045};
1046
1047/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
1048struct mlx4_flow_handle {
1049 u64 reg_id[2];
1050};
1051
965int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, 1052int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
966 enum mlx4_net_trans_promisc_mode mode); 1053 enum mlx4_net_trans_promisc_mode mode);
967int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1054int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@ -1011,6 +1098,11 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1011int mlx4_flow_attach(struct mlx4_dev *dev, 1098int mlx4_flow_attach(struct mlx4_dev *dev,
1012 struct mlx4_net_trans_rule *rule, u64 *reg_id); 1099 struct mlx4_net_trans_rule *rule, u64 *reg_id);
1013int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); 1100int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
1101int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
1102 enum mlx4_net_trans_promisc_mode flow_type);
1103int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1104 enum mlx4_net_trans_rule_id id);
1105int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1014 1106
1015void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, 1107void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1016 int i, int val); 1108 int i, int val);
diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h
index 799a0697a383..192e0f7784f2 100644
--- a/include/linux/mlx4/srq.h
+++ b/include/linux/mlx4/srq.h
@@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg {
39 u32 reserved2[3]; 39 u32 reserved2[3];
40}; 40};
41 41
42struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
43
42#endif /* MLX4_SRQ_H */ 44#endif /* MLX4_SRQ_H */