aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-07 19:22:54 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-07 19:22:54 -0500
commite21dd863acec8d3bc5166fb2a0c680a9982b37db (patch)
tree8d328edce3828c60e80989267eb7594d2d7e24d6
parent3cdcf1334cd76bbcabd0f273ee9a13e4cc7816bc (diff)
parent163561a4e2f8af44e96453bc10c7a4f9bcc736e1 (diff)
Merge branch 'mlx4'
Amir Vadai says: ==================== net/mlx4: Mellanox driver update 07-11-2013 This patchset contains some enhancements and bug fixes for the mlx4_* drivers. Patchset was applied and tested against commit: "9bb8ca8 virtio-net: switch to use XPS to choose txq" ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--include/linux/mlx4/device.h3
22 files changed, 383 insertions, 228 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 7567437dbd34..6a0a0d29660d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -526,7 +526,6 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
526 if (IS_ERR(mailbox)) 526 if (IS_ERR(mailbox))
527 return 0; 527 return 0;
528 528
529 memset(mailbox->buf, 0, 256);
530 memcpy(mailbox->buf, props->node_desc, 64); 529 memcpy(mailbox->buf, props->node_desc, 64);
531 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, 530 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
532 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 531 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -547,8 +546,6 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
547 if (IS_ERR(mailbox)) 546 if (IS_ERR(mailbox))
548 return PTR_ERR(mailbox); 547 return PTR_ERR(mailbox);
549 548
550 memset(mailbox->buf, 0, 256);
551
552 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 549 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
553 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; 550 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
554 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); 551 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
@@ -879,8 +876,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
879 struct mlx4_ib_dev *mdev = to_mdev(qp->device); 876 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
880 struct mlx4_cmd_mailbox *mailbox; 877 struct mlx4_cmd_mailbox *mailbox;
881 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 878 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
882 size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
883 (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
884 879
885 static const u16 __mlx4_domain[] = { 880 static const u16 __mlx4_domain[] = {
886 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, 881 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -905,7 +900,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
905 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 900 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
906 if (IS_ERR(mailbox)) 901 if (IS_ERR(mailbox))
907 return PTR_ERR(mailbox); 902 return PTR_ERR(mailbox);
908 memset(mailbox->buf, 0, rule_size);
909 ctrl = mailbox->buf; 903 ctrl = mailbox->buf;
910 904
911 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] | 905 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65d41b76fa2c..1e9970d2f0f3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@ out:
1539 return ret; 1539 return ret;
1540} 1540}
1541 1541
1542static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
1543{
1544 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
1545}
1546
1547static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, 1542static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1548 int slave, int port) 1543 int slave, int port)
1549{ 1544{
@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1553 struct mlx4_dev *dev = &(priv->dev); 1548 struct mlx4_dev *dev = &(priv->dev);
1554 int err; 1549 int err;
1555 int admin_vlan_ix = NO_INDX; 1550 int admin_vlan_ix = NO_INDX;
1556 enum mlx4_vlan_transition vlan_trans;
1557 1551
1558 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1552 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1559 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1553 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1563 vp_oper->state.link_state == vp_admin->link_state) 1557 vp_oper->state.link_state == vp_admin->link_state)
1564 return 0; 1558 return 0;
1565 1559
1566 vlan_trans = calculate_transition(vp_oper->state.default_vlan,
1567 vp_admin->default_vlan);
1568
1569 if (!(priv->mfunc.master.slave_state[slave].active && 1560 if (!(priv->mfunc.master.slave_state[slave].active &&
1570 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && 1561 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1571 vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
1572 /* even if the UPDATE_QP command isn't supported, we still want 1562 /* even if the UPDATE_QP command isn't supported, we still want
1573 * to set this VF link according to the admin directive 1563 * to set this VF link according to the admin directive
1574 */ 1564 */
@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1586 return -ENOMEM; 1576 return -ENOMEM;
1587 1577
1588 if (vp_oper->state.default_vlan != vp_admin->default_vlan) { 1578 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1589 err = __mlx4_register_vlan(&priv->dev, port, 1579 if (MLX4_VGT != vp_admin->default_vlan) {
1590 vp_admin->default_vlan, 1580 err = __mlx4_register_vlan(&priv->dev, port,
1591 &admin_vlan_ix); 1581 vp_admin->default_vlan,
1592 if (err) { 1582 &admin_vlan_ix);
1593 kfree(work); 1583 if (err) {
1594 mlx4_warn((&priv->dev), 1584 kfree(work);
1595 "No vlan resources slave %d, port %d\n", 1585 mlx4_warn((&priv->dev),
1596 slave, port); 1586 "No vlan resources slave %d, port %d\n",
1597 return err; 1587 slave, port);
1588 return err;
1589 }
1590 } else {
1591 admin_vlan_ix = NO_INDX;
1598 } 1592 }
1599 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1593 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1600 mlx4_dbg((&(priv->dev)), 1594 mlx4_dbg((&(priv->dev)),
@@ -2199,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2199 return ERR_PTR(-ENOMEM); 2193 return ERR_PTR(-ENOMEM);
2200 } 2194 }
2201 2195
2196 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2197
2202 return mailbox; 2198 return mailbox;
2203} 2199}
2204EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); 2200EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e4231af67..22fcbe78311c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
128 return PTR_ERR(mailbox); 128 return PTR_ERR(mailbox);
129 129
130 cq_context = mailbox->buf; 130 cq_context = mailbox->buf;
131 memset(cq_context, 0, sizeof *cq_context);
132
133 cq_context->cq_max_count = cpu_to_be16(count); 131 cq_context->cq_max_count = cpu_to_be16(count);
134 cq_context->cq_period = cpu_to_be16(period); 132 cq_context->cq_period = cpu_to_be16(period);
135 133
@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
153 return PTR_ERR(mailbox); 151 return PTR_ERR(mailbox);
154 152
155 cq_context = mailbox->buf; 153 cq_context = mailbox->buf;
156 memset(cq_context, 0, sizeof *cq_context);
157
158 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); 154 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
159 cq_context->log_page_size = mtt->page_shift - 12; 155 cq_context->log_page_size = mtt->page_shift - 12;
160 mtt_addr = mlx4_mtt_addr(dev, mtt); 156 mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
274 } 270 }
275 271
276 cq_context = mailbox->buf; 272 cq_context = mailbox->buf;
277 memset(cq_context, 0, sizeof *cq_context);
278
279 cq_context->flags = cpu_to_be32(!!collapsed << 18); 273 cq_context->flags = cpu_to_be32(!!collapsed << 18);
280 if (timestamp_en) 274 if (timestamp_en)
281 cq_context->flags |= cpu_to_be32(1 << 19); 275 cq_context->flags |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d5047cdb3..3a098cc4d349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
44 44
45 45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv, 46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq, 47 struct mlx4_en_cq **pcq,
48 int entries, int ring, enum cq_type mode) 48 int entries, int ring, enum cq_type mode,
49 int node)
49{ 50{
50 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
52 struct mlx4_en_cq *cq;
51 int err; 53 int err;
52 54
55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
56 if (!cq) {
57 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
58 if (!cq) {
59 en_err(priv, "Failed to allocate CQ structure\n");
60 return -ENOMEM;
61 }
62 }
63
53 cq->size = entries; 64 cq->size = entries;
54 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; 65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
55 66
@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
57 cq->is_tx = mode; 68 cq->is_tx = mode;
58 spin_lock_init(&cq->lock); 69 spin_lock_init(&cq->lock);
59 70
71 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow.
73 */
74 set_dev_node(&mdev->dev->pdev->dev, node);
60 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 75 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
61 cq->buf_size, 2 * PAGE_SIZE); 76 cq->buf_size, 2 * PAGE_SIZE);
77 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
62 if (err) 78 if (err)
63 return err; 79 goto err_cq;
64 80
65 err = mlx4_en_map_buffer(&cq->wqres.buf); 81 err = mlx4_en_map_buffer(&cq->wqres.buf);
66 if (err) 82 if (err)
67 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 83 goto err_res;
68 else
69 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
70 84
85 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
86 *pcq = cq;
87
88 return 0;
89
90err_res:
91 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
92err_cq:
93 kfree(cq);
94 *pcq = NULL;
71 return err; 95 return err;
72} 96}
73 97
@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
117 struct mlx4_en_cq *rx_cq; 141 struct mlx4_en_cq *rx_cq;
118 142
119 cq_idx = cq_idx % priv->rx_ring_num; 143 cq_idx = cq_idx % priv->rx_ring_num;
120 rx_cq = &priv->rx_cq[cq_idx]; 144 rx_cq = priv->rx_cq[cq_idx];
121 cq->vector = rx_cq->vector; 145 cq->vector = rx_cq->vector;
122 } 146 }
123 147
124 if (!cq->is_tx) 148 if (!cq->is_tx)
125 cq->size = priv->rx_ring[cq->ring].actual_size; 149 cq->size = priv->rx_ring[cq->ring]->actual_size;
126 150
127 if ((cq->is_tx && priv->hwtstamp_config.tx_type) || 151 if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
128 (!cq->is_tx && priv->hwtstamp_config.rx_filter)) 152 (!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
146 return 0; 170 return 0;
147} 171}
148 172
149void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 173void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
150{ 174{
151 struct mlx4_en_dev *mdev = priv->mdev; 175 struct mlx4_en_dev *mdev = priv->mdev;
176 struct mlx4_en_cq *cq = *pcq;
152 177
153 mlx4_en_unmap_buffer(&cq->wqres.buf); 178 mlx4_en_unmap_buffer(&cq->wqres.buf);
154 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 179 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
157 cq->vector = 0; 182 cq->vector = 0;
158 cq->buf_size = 0; 183 cq->buf_size = 0;
159 cq->buf = NULL; 184 cq->buf = NULL;
185 kfree(cq);
186 *pcq = NULL;
160} 187}
161 188
162void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 189void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c750985f47e..0596f9f85a0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
51 int err = 0; 51 int err = 0;
52 52
53 for (i = 0; i < priv->tx_ring_num; i++) { 53 for (i = 0; i < priv->tx_ring_num; i++) {
54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 54 priv->tx_cq[i]->moder_cnt = priv->tx_frames;
55 priv->tx_cq[i].moder_time = priv->tx_usecs; 55 priv->tx_cq[i]->moder_time = priv->tx_usecs;
56 if (priv->port_up) { 56 if (priv->port_up) {
57 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 57 err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
58 if (err) 58 if (err)
59 return err; 59 return err;
60 } 60 }
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
64 return 0; 64 return 0;
65 65
66 for (i = 0; i < priv->rx_ring_num; i++) { 66 for (i = 0; i < priv->rx_ring_num; i++) {
67 priv->rx_cq[i].moder_cnt = priv->rx_frames; 67 priv->rx_cq[i]->moder_cnt = priv->rx_frames;
68 priv->rx_cq[i].moder_time = priv->rx_usecs; 68 priv->rx_cq[i]->moder_time = priv->rx_usecs;
69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
70 if (priv->port_up) { 70 if (priv->port_up) {
71 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 71 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
72 if (err) 72 if (err)
73 return err; 73 return err;
74 } 74 }
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
274 } 274 }
275 } 275 }
276 for (i = 0; i < priv->tx_ring_num; i++) { 276 for (i = 0; i < priv->tx_ring_num; i++) {
277 data[index++] = priv->tx_ring[i].packets; 277 data[index++] = priv->tx_ring[i]->packets;
278 data[index++] = priv->tx_ring[i].bytes; 278 data[index++] = priv->tx_ring[i]->bytes;
279 } 279 }
280 for (i = 0; i < priv->rx_ring_num; i++) { 280 for (i = 0; i < priv->rx_ring_num; i++) {
281 data[index++] = priv->rx_ring[i].packets; 281 data[index++] = priv->rx_ring[i]->packets;
282 data[index++] = priv->rx_ring[i].bytes; 282 data[index++] = priv->rx_ring[i]->bytes;
283#ifdef CONFIG_NET_RX_BUSY_POLL 283#ifdef CONFIG_NET_RX_BUSY_POLL
284 data[index++] = priv->rx_ring[i].yields; 284 data[index++] = priv->rx_ring[i]->yields;
285 data[index++] = priv->rx_ring[i].misses; 285 data[index++] = priv->rx_ring[i]->misses;
286 data[index++] = priv->rx_ring[i].cleaned; 286 data[index++] = priv->rx_ring[i]->cleaned;
287#endif 287#endif
288 } 288 }
289 spin_unlock_bh(&priv->stats_lock); 289 spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
512 512
513 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : 513 if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
514 priv->rx_ring[0].size) && 514 priv->rx_ring[0]->size) &&
515 tx_size == priv->tx_ring[0].size) 515 tx_size == priv->tx_ring[0]->size)
516 return 0; 516 return 0;
517 517
518 mutex_lock(&mdev->state_lock); 518 mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
555 param->rx_pending = priv->port_up ? 555 param->rx_pending = priv->port_up ?
556 priv->rx_ring[0].actual_size : priv->rx_ring[0].size; 556 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
557 param->tx_pending = priv->tx_ring[0].size; 557 param->tx_pending = priv->tx_ring[0]->size;
558} 558}
559 559
560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b5554121aca4..e72d8a112a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
76 struct net_device *dev = cq->dev; 76 struct net_device *dev = cq->dev;
77 struct mlx4_en_priv *priv = netdev_priv(dev); 77 struct mlx4_en_priv *priv = netdev_priv(dev);
78 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 78 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
79 int done; 79 int done;
80 80
81 if (!priv->port_up) 81 if (!priv->port_up)
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
102 struct list_head next; 102 struct list_head next;
103 struct work_struct work; 103 struct work_struct work;
104 104
105 u8 ip_proto;
105 __be32 src_ip; 106 __be32 src_ip;
106 __be32 dst_ip; 107 __be32 dst_ip;
107 __be16 src_port; 108 __be16 src_port;
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
120 121
121static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 122static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
122 123
124static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
125{
126 switch (ip_proto) {
127 case IPPROTO_UDP:
128 return MLX4_NET_TRANS_RULE_ID_UDP;
129 case IPPROTO_TCP:
130 return MLX4_NET_TRANS_RULE_ID_TCP;
131 default:
132 return -EPROTONOSUPPORT;
133 }
134};
135
123static void mlx4_en_filter_work(struct work_struct *work) 136static void mlx4_en_filter_work(struct work_struct *work)
124{ 137{
125 struct mlx4_en_filter *filter = container_of(work, 138 struct mlx4_en_filter *filter = container_of(work,
126 struct mlx4_en_filter, 139 struct mlx4_en_filter,
127 work); 140 work);
128 struct mlx4_en_priv *priv = filter->priv; 141 struct mlx4_en_priv *priv = filter->priv;
129 struct mlx4_spec_list spec_tcp = { 142 struct mlx4_spec_list spec_tcp_udp = {
130 .id = MLX4_NET_TRANS_RULE_ID_TCP, 143 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
131 { 144 {
132 .tcp_udp = { 145 .tcp_udp = {
133 .dst_port = filter->dst_port, 146 .dst_port = filter->dst_port,
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
163 int rc; 176 int rc;
164 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 177 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
165 178
179 if (spec_tcp_udp.id < 0) {
180 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
181 filter->ip_proto);
182 goto ignore;
183 }
166 list_add_tail(&spec_eth.list, &rule.list); 184 list_add_tail(&spec_eth.list, &rule.list);
167 list_add_tail(&spec_ip.list, &rule.list); 185 list_add_tail(&spec_ip.list, &rule.list);
168 list_add_tail(&spec_tcp.list, &rule.list); 186 list_add_tail(&spec_tcp_udp.list, &rule.list);
169 187
170 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 188 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
171 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 189 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
183 if (rc) 201 if (rc)
184 en_err(priv, "Error attaching flow. err = %d\n", rc); 202 en_err(priv, "Error attaching flow. err = %d\n", rc);
185 203
204ignore:
186 mlx4_en_filter_rfs_expire(priv); 205 mlx4_en_filter_rfs_expire(priv);
187 206
188 filter->activated = 1; 207 filter->activated = 1;
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
206 225
207static struct mlx4_en_filter * 226static struct mlx4_en_filter *
208mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 227mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
209 __be32 dst_ip, __be16 src_port, __be16 dst_port, 228 __be32 dst_ip, u8 ip_proto, __be16 src_port,
210 u32 flow_id) 229 __be16 dst_port, u32 flow_id)
211{ 230{
212 struct mlx4_en_filter *filter = NULL; 231 struct mlx4_en_filter *filter = NULL;
213 232
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
221 240
222 filter->src_ip = src_ip; 241 filter->src_ip = src_ip;
223 filter->dst_ip = dst_ip; 242 filter->dst_ip = dst_ip;
243 filter->ip_proto = ip_proto;
224 filter->src_port = src_port; 244 filter->src_port = src_port;
225 filter->dst_port = dst_port; 245 filter->dst_port = dst_port;
226 246
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
252 272
253static inline struct mlx4_en_filter * 273static inline struct mlx4_en_filter *
254mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 274mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
255 __be16 src_port, __be16 dst_port) 275 u8 ip_proto, __be16 src_port, __be16 dst_port)
256{ 276{
257 struct mlx4_en_filter *filter; 277 struct mlx4_en_filter *filter;
258 struct mlx4_en_filter *ret = NULL; 278 struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
263 filter_chain) { 283 filter_chain) {
264 if (filter->src_ip == src_ip && 284 if (filter->src_ip == src_ip &&
265 filter->dst_ip == dst_ip && 285 filter->dst_ip == dst_ip &&
286 filter->ip_proto == ip_proto &&
266 filter->src_port == src_port && 287 filter->src_port == src_port &&
267 filter->dst_port == dst_port) { 288 filter->dst_port == dst_port) {
268 ret = filter; 289 ret = filter;
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
281 struct mlx4_en_filter *filter; 302 struct mlx4_en_filter *filter;
282 const struct iphdr *ip; 303 const struct iphdr *ip;
283 const __be16 *ports; 304 const __be16 *ports;
305 u8 ip_proto;
284 __be32 src_ip; 306 __be32 src_ip;
285 __be32 dst_ip; 307 __be32 dst_ip;
286 __be16 src_port; 308 __be16 src_port;
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
295 if (ip_is_fragment(ip)) 317 if (ip_is_fragment(ip))
296 return -EPROTONOSUPPORT; 318 return -EPROTONOSUPPORT;
297 319
320 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
321 return -EPROTONOSUPPORT;
298 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 322 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
299 323
324 ip_proto = ip->protocol;
300 src_ip = ip->saddr; 325 src_ip = ip->saddr;
301 dst_ip = ip->daddr; 326 dst_ip = ip->daddr;
302 src_port = ports[0]; 327 src_port = ports[0];
303 dst_port = ports[1]; 328 dst_port = ports[1];
304 329
305 if (ip->protocol != IPPROTO_TCP)
306 return -EPROTONOSUPPORT;
307
308 spin_lock_bh(&priv->filters_lock); 330 spin_lock_bh(&priv->filters_lock);
309 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); 331 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
332 src_port, dst_port);
310 if (filter) { 333 if (filter) {
311 if (filter->rxq_index == rxq_index) 334 if (filter->rxq_index == rxq_index)
312 goto out; 335 goto out;
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
314 filter->rxq_index = rxq_index; 337 filter->rxq_index = rxq_index;
315 } else { 338 } else {
316 filter = mlx4_en_filter_alloc(priv, rxq_index, 339 filter = mlx4_en_filter_alloc(priv, rxq_index,
317 src_ip, dst_ip, 340 src_ip, dst_ip, ip_proto,
318 src_port, dst_port, flow_id); 341 src_port, dst_port, flow_id);
319 if (!filter) { 342 if (!filter) {
320 ret = -ENOMEM; 343 ret = -ENOMEM;
@@ -332,8 +355,7 @@ err:
332 return ret; 355 return ret;
333} 356}
334 357
335void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 358void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
336 struct mlx4_en_rx_ring *rx_ring)
337{ 359{
338 struct mlx4_en_filter *filter, *tmp; 360 struct mlx4_en_filter *filter, *tmp;
339 LIST_HEAD(del_list); 361 LIST_HEAD(del_list);
@@ -1219,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
1219 int i; 1241 int i;
1220 1242
1221 for (i = 0; i < priv->rx_ring_num; i++) { 1243 for (i = 0; i < priv->rx_ring_num; i++) {
1222 cq = &priv->rx_cq[i]; 1244 cq = priv->rx_cq[i];
1223 spin_lock_irqsave(&cq->lock, flags); 1245 spin_lock_irqsave(&cq->lock, flags);
1224 napi_synchronize(&cq->napi); 1246 napi_synchronize(&cq->napi);
1225 mlx4_en_process_rx_cq(dev, cq, 0); 1247 mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1241,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1241 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1263 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1242 continue; 1264 continue;
1243 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1265 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1244 i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, 1266 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1245 priv->tx_ring[i].cons, priv->tx_ring[i].prod); 1267 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1246 } 1268 }
1247 1269
1248 priv->port_stats.tx_timeout++; 1270 priv->port_stats.tx_timeout++;
@@ -1282,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1282 1304
1283 /* Setup cq moderation params */ 1305 /* Setup cq moderation params */
1284 for (i = 0; i < priv->rx_ring_num; i++) { 1306 for (i = 0; i < priv->rx_ring_num; i++) {
1285 cq = &priv->rx_cq[i]; 1307 cq = priv->rx_cq[i];
1286 cq->moder_cnt = priv->rx_frames; 1308 cq->moder_cnt = priv->rx_frames;
1287 cq->moder_time = priv->rx_usecs; 1309 cq->moder_time = priv->rx_usecs;
1288 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1310 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1291,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1291 } 1313 }
1292 1314
1293 for (i = 0; i < priv->tx_ring_num; i++) { 1315 for (i = 0; i < priv->tx_ring_num; i++) {
1294 cq = &priv->tx_cq[i]; 1316 cq = priv->tx_cq[i];
1295 cq->moder_cnt = priv->tx_frames; 1317 cq->moder_cnt = priv->tx_frames;
1296 cq->moder_time = priv->tx_usecs; 1318 cq->moder_time = priv->tx_usecs;
1297 } 1319 }
@@ -1325,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1325 1347
1326 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1348 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1327 spin_lock_bh(&priv->stats_lock); 1349 spin_lock_bh(&priv->stats_lock);
1328 rx_packets = priv->rx_ring[ring].packets; 1350 rx_packets = priv->rx_ring[ring]->packets;
1329 rx_bytes = priv->rx_ring[ring].bytes; 1351 rx_bytes = priv->rx_ring[ring]->bytes;
1330 spin_unlock_bh(&priv->stats_lock); 1352 spin_unlock_bh(&priv->stats_lock);
1331 1353
1332 rx_pkt_diff = ((unsigned long) (rx_packets - 1354 rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1355,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1355 1377
1356 if (moder_time != priv->last_moder_time[ring]) { 1378 if (moder_time != priv->last_moder_time[ring]) {
1357 priv->last_moder_time[ring] = moder_time; 1379 priv->last_moder_time[ring] = moder_time;
1358 cq = &priv->rx_cq[ring]; 1380 cq = priv->rx_cq[ring];
1359 cq->moder_time = moder_time; 1381 cq->moder_time = moder_time;
1360 cq->moder_cnt = priv->rx_frames; 1382 cq->moder_cnt = priv->rx_frames;
1361 err = mlx4_en_set_cq_moder(priv, cq); 1383 err = mlx4_en_set_cq_moder(priv, cq);
@@ -1478,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
1478 return err; 1500 return err;
1479 } 1501 }
1480 for (i = 0; i < priv->rx_ring_num; i++) { 1502 for (i = 0; i < priv->rx_ring_num; i++) {
1481 cq = &priv->rx_cq[i]; 1503 cq = priv->rx_cq[i];
1482 1504
1483 mlx4_en_cq_init_lock(cq); 1505 mlx4_en_cq_init_lock(cq);
1484 1506
@@ -1496,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
1496 goto cq_err; 1518 goto cq_err;
1497 } 1519 }
1498 mlx4_en_arm_cq(priv, cq); 1520 mlx4_en_arm_cq(priv, cq);
1499 priv->rx_ring[i].cqn = cq->mcq.cqn; 1521 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1500 ++rx_index; 1522 ++rx_index;
1501 } 1523 }
1502 1524
@@ -1522,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
1522 /* Configure tx cq's and rings */ 1544 /* Configure tx cq's and rings */
1523 for (i = 0; i < priv->tx_ring_num; i++) { 1545 for (i = 0; i < priv->tx_ring_num; i++) {
1524 /* Configure cq */ 1546 /* Configure cq */
1525 cq = &priv->tx_cq[i]; 1547 cq = priv->tx_cq[i];
1526 err = mlx4_en_activate_cq(priv, cq, i); 1548 err = mlx4_en_activate_cq(priv, cq, i);
1527 if (err) { 1549 if (err) {
1528 en_err(priv, "Failed allocating Tx CQ\n"); 1550 en_err(priv, "Failed allocating Tx CQ\n");
@@ -1538,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
1538 cq->buf->wqe_index = cpu_to_be16(0xffff); 1560 cq->buf->wqe_index = cpu_to_be16(0xffff);
1539 1561
1540 /* Configure ring */ 1562 /* Configure ring */
1541 tx_ring = &priv->tx_ring[i]; 1563 tx_ring = priv->tx_ring[i];
1542 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1564 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1543 i / priv->num_tx_rings_p_up); 1565 i / priv->num_tx_rings_p_up);
1544 if (err) { 1566 if (err) {
@@ -1608,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
1608 1630
1609tx_err: 1631tx_err:
1610 while (tx_index--) { 1632 while (tx_index--) {
1611 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1633 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1612 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1634 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1613 } 1635 }
1614 mlx4_en_destroy_drop_qp(priv); 1636 mlx4_en_destroy_drop_qp(priv);
1615rss_err: 1637rss_err:
@@ -1618,9 +1640,9 @@ mac_err:
1618 mlx4_en_put_qp(priv); 1640 mlx4_en_put_qp(priv);
1619cq_err: 1641cq_err:
1620 while (rx_index--) 1642 while (rx_index--)
1621 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 1643 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1622 for (i = 0; i < priv->rx_ring_num; i++) 1644 for (i = 0; i < priv->rx_ring_num; i++)
1623 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1645 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1624 1646
1625 return err; /* need to close devices */ 1647 return err; /* need to close devices */
1626} 1648}
@@ -1716,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1716 1738
1717 /* Free TX Rings */ 1739 /* Free TX Rings */
1718 for (i = 0; i < priv->tx_ring_num; i++) { 1740 for (i = 0; i < priv->tx_ring_num; i++) {
1719 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1741 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1720 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 1742 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1721 } 1743 }
1722 msleep(10); 1744 msleep(10);
1723 1745
1724 for (i = 0; i < priv->tx_ring_num; i++) 1746 for (i = 0; i < priv->tx_ring_num; i++)
1725 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 1747 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1726 1748
1727 /* Free RSS qps */ 1749 /* Free RSS qps */
1728 mlx4_en_release_rss_steer(priv); 1750 mlx4_en_release_rss_steer(priv);
@@ -1734,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1734 1756
1735 /* Free RX Rings */ 1757 /* Free RX Rings */
1736 for (i = 0; i < priv->rx_ring_num; i++) { 1758 for (i = 0; i < priv->rx_ring_num; i++) {
1737 struct mlx4_en_cq *cq = &priv->rx_cq[i]; 1759 struct mlx4_en_cq *cq = priv->rx_cq[i];
1738 1760
1739 local_bh_disable(); 1761 local_bh_disable();
1740 while (!mlx4_en_cq_lock_napi(cq)) { 1762 while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1745,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1745 1767
1746 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) 1768 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1747 msleep(1); 1769 msleep(1);
1748 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1770 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1749 mlx4_en_deactivate_cq(priv, cq); 1771 mlx4_en_deactivate_cq(priv, cq);
1750 } 1772 }
1751} 1773}
@@ -1783,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1783 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1805 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1784 1806
1785 for (i = 0; i < priv->tx_ring_num; i++) { 1807 for (i = 0; i < priv->tx_ring_num; i++) {
1786 priv->tx_ring[i].bytes = 0; 1808 priv->tx_ring[i]->bytes = 0;
1787 priv->tx_ring[i].packets = 0; 1809 priv->tx_ring[i]->packets = 0;
1788 priv->tx_ring[i].tx_csum = 0; 1810 priv->tx_ring[i]->tx_csum = 0;
1789 } 1811 }
1790 for (i = 0; i < priv->rx_ring_num; i++) { 1812 for (i = 0; i < priv->rx_ring_num; i++) {
1791 priv->rx_ring[i].bytes = 0; 1813 priv->rx_ring[i]->bytes = 0;
1792 priv->rx_ring[i].packets = 0; 1814 priv->rx_ring[i]->packets = 0;
1793 priv->rx_ring[i].csum_ok = 0; 1815 priv->rx_ring[i]->csum_ok = 0;
1794 priv->rx_ring[i].csum_none = 0; 1816 priv->rx_ring[i]->csum_none = 0;
1795 } 1817 }
1796} 1818}
1797 1819
@@ -1848,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1848#endif 1870#endif
1849 1871
1850 for (i = 0; i < priv->tx_ring_num; i++) { 1872 for (i = 0; i < priv->tx_ring_num; i++) {
1851 if (priv->tx_ring[i].tx_info) 1873 if (priv->tx_ring && priv->tx_ring[i])
1852 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1874 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1853 if (priv->tx_cq[i].buf) 1875 if (priv->tx_cq && priv->tx_cq[i])
1854 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1876 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1855 } 1877 }
1856 1878
1857 for (i = 0; i < priv->rx_ring_num; i++) { 1879 for (i = 0; i < priv->rx_ring_num; i++) {
1858 if (priv->rx_ring[i].rx_info) 1880 if (priv->rx_ring[i])
1859 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1881 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1860 priv->prof->rx_ring_size, priv->stride); 1882 priv->prof->rx_ring_size, priv->stride);
1861 if (priv->rx_cq[i].buf) 1883 if (priv->rx_cq[i])
1862 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1884 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1863 } 1885 }
1864 1886
@@ -1873,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1873 struct mlx4_en_port_profile *prof = priv->prof; 1895 struct mlx4_en_port_profile *prof = priv->prof;
1874 int i; 1896 int i;
1875 int err; 1897 int err;
1898 int node;
1876 1899
1877 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1900 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1878 if (err) { 1901 if (err) {
@@ -1882,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1882 1905
1883 /* Create tx Rings */ 1906 /* Create tx Rings */
1884 for (i = 0; i < priv->tx_ring_num; i++) { 1907 for (i = 0; i < priv->tx_ring_num; i++) {
1908 node = cpu_to_node(i % num_online_cpus());
1885 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1909 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1886 prof->tx_ring_size, i, TX)) 1910 prof->tx_ring_size, i, TX, node))
1887 goto err; 1911 goto err;
1888 1912
1889 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1913 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
1890 prof->tx_ring_size, TXBB_SIZE)) 1914 prof->tx_ring_size, TXBB_SIZE, node))
1891 goto err; 1915 goto err;
1892 } 1916 }
1893 1917
1894 /* Create rx Rings */ 1918 /* Create rx Rings */
1895 for (i = 0; i < priv->rx_ring_num; i++) { 1919 for (i = 0; i < priv->rx_ring_num; i++) {
1920 node = cpu_to_node(i % num_online_cpus());
1896 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1921 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1897 prof->rx_ring_size, i, RX)) 1922 prof->rx_ring_size, i, RX, node))
1898 goto err; 1923 goto err;
1899 1924
1900 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1925 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1901 prof->rx_ring_size, priv->stride)) 1926 prof->rx_ring_size, priv->stride,
1927 node))
1902 goto err; 1928 goto err;
1903 } 1929 }
1904 1930
@@ -1914,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1914 1940
1915err: 1941err:
1916 en_err(priv, "Failed to allocate NIC resources\n"); 1942 en_err(priv, "Failed to allocate NIC resources\n");
1943 for (i = 0; i < priv->rx_ring_num; i++) {
1944 if (priv->rx_ring[i])
1945 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1946 prof->rx_ring_size,
1947 priv->stride);
1948 if (priv->rx_cq[i])
1949 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1950 }
1951 for (i = 0; i < priv->tx_ring_num; i++) {
1952 if (priv->tx_ring[i])
1953 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1954 if (priv->tx_cq[i])
1955 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1956 }
1917 return -ENOMEM; 1957 return -ENOMEM;
1918} 1958}
1919 1959
@@ -2207,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2207 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2247 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2208 priv->tx_ring_num = prof->tx_ring_num; 2248 priv->tx_ring_num = prof->tx_ring_num;
2209 2249
2210 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, 2250 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2211 GFP_KERNEL); 2251 GFP_KERNEL);
2212 if (!priv->tx_ring) { 2252 if (!priv->tx_ring) {
2213 err = -ENOMEM; 2253 err = -ENOMEM;
2214 goto out; 2254 goto out;
2215 } 2255 }
2216 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, 2256 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2217 GFP_KERNEL); 2257 GFP_KERNEL);
2218 if (!priv->tx_cq) { 2258 if (!priv->tx_cq) {
2219 err = -ENOMEM; 2259 err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 331791467a22..dae1a1f4ae55 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
56 return PTR_ERR(mailbox); 56 return PTR_ERR(mailbox);
57 57
58 filter = mailbox->buf; 58 filter = mailbox->buf;
59 memset(filter, 0, sizeof(*filter));
60 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { 59 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
61 entry = 0; 60 entry = 0;
62 for (j = 0; j < 32; j++) 61 for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
81 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 80 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
82 if (IS_ERR(mailbox)) 81 if (IS_ERR(mailbox))
83 return PTR_ERR(mailbox); 82 return PTR_ERR(mailbox);
84 memset(mailbox->buf, 0, sizeof(*qport_context));
85 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, 83 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
86 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 84 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
87 MLX4_CMD_WRAPPED); 85 MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
127 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 125 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
128 if (IS_ERR(mailbox)) 126 if (IS_ERR(mailbox))
129 return PTR_ERR(mailbox); 127 return PTR_ERR(mailbox);
130 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
131 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 128 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
132 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 129 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
133 MLX4_CMD_WRAPPED); 130 MLX4_CMD_WRAPPED);
@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
143 priv->port_stats.rx_chksum_good = 0; 140 priv->port_stats.rx_chksum_good = 0;
144 priv->port_stats.rx_chksum_none = 0; 141 priv->port_stats.rx_chksum_none = 0;
145 for (i = 0; i < priv->rx_ring_num; i++) { 142 for (i = 0; i < priv->rx_ring_num; i++) {
146 stats->rx_packets += priv->rx_ring[i].packets; 143 stats->rx_packets += priv->rx_ring[i]->packets;
147 stats->rx_bytes += priv->rx_ring[i].bytes; 144 stats->rx_bytes += priv->rx_ring[i]->bytes;
148 priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; 145 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
149 priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; 146 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
150 } 147 }
151 stats->tx_packets = 0; 148 stats->tx_packets = 0;
152 stats->tx_bytes = 0; 149 stats->tx_bytes = 0;
153 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
154 for (i = 0; i < priv->tx_ring_num; i++) { 151 for (i = 0; i < priv->tx_ring_num; i++) {
155 stats->tx_packets += priv->tx_ring[i].packets; 152 stats->tx_packets += priv->tx_ring[i]->packets;
156 stats->tx_bytes += priv->tx_ring[i].bytes; 153 stats->tx_bytes += priv->tx_ring[i]->bytes;
157 priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; 154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
158 } 155 }
159 156
160 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa69c86..07a1d0fbae47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
264 264
265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
267 ring = &priv->rx_ring[ring_ind]; 267 ring = priv->rx_ring[ring_ind];
268 268
269 if (mlx4_en_prepare_rx_desc(priv, ring, 269 if (mlx4_en_prepare_rx_desc(priv, ring,
270 ring->actual_size, 270 ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
289 289
290reduce_rings: 290reduce_rings:
291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
292 ring = &priv->rx_ring[ring_ind]; 292 ring = priv->rx_ring[ring_ind];
293 while (ring->actual_size > new_size) { 293 while (ring->actual_size > new_size) {
294 ring->actual_size--; 294 ring->actual_size--;
295 ring->prod--; 295 ring->prod--;
@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
319} 319}
320 320
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 322 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride, int node)
323{ 324{
324 struct mlx4_en_dev *mdev = priv->mdev; 325 struct mlx4_en_dev *mdev = priv->mdev;
326 struct mlx4_en_rx_ring *ring;
325 int err = -ENOMEM; 327 int err = -ENOMEM;
326 int tmp; 328 int tmp;
327 329
330 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
331 if (!ring) {
332 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
333 if (!ring) {
334 en_err(priv, "Failed to allocate RX ring structure\n");
335 return -ENOMEM;
336 }
337 }
338
328 ring->prod = 0; 339 ring->prod = 0;
329 ring->cons = 0; 340 ring->cons = 0;
330 ring->size = size; 341 ring->size = size;
@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 346
336 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 347 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
337 sizeof(struct mlx4_en_rx_alloc)); 348 sizeof(struct mlx4_en_rx_alloc));
338 ring->rx_info = vmalloc(tmp); 349 ring->rx_info = vmalloc_node(tmp, node);
339 if (!ring->rx_info) 350 if (!ring->rx_info) {
340 return -ENOMEM; 351 ring->rx_info = vmalloc(tmp);
352 if (!ring->rx_info) {
353 err = -ENOMEM;
354 goto err_ring;
355 }
356 }
341 357
342 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 358 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
343 ring->rx_info, tmp); 359 ring->rx_info, tmp);
344 360
361 /* Allocate HW buffers on provided NUMA node */
362 set_dev_node(&mdev->dev->pdev->dev, node);
345 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
346 ring->buf_size, 2 * PAGE_SIZE); 364 ring->buf_size, 2 * PAGE_SIZE);
365 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
347 if (err) 366 if (err)
348 goto err_ring; 367 goto err_info;
349 368
350 err = mlx4_en_map_buffer(&ring->wqres.buf); 369 err = mlx4_en_map_buffer(&ring->wqres.buf);
351 if (err) { 370 if (err) {
@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
356 375
357 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; 376 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
358 377
378 *pring = ring;
359 return 0; 379 return 0;
360 380
361err_hwq: 381err_hwq:
362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 382 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
363err_ring: 383err_info:
364 vfree(ring->rx_info); 384 vfree(ring->rx_info);
365 ring->rx_info = NULL; 385 ring->rx_info = NULL;
386err_ring:
387 kfree(ring);
388 *pring = NULL;
389
366 return err; 390 return err;
367} 391}
368 392
@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
376 DS_SIZE * priv->num_frags); 400 DS_SIZE * priv->num_frags);
377 401
378 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 402 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
379 ring = &priv->rx_ring[ring_ind]; 403 ring = priv->rx_ring[ring_ind];
380 404
381 ring->prod = 0; 405 ring->prod = 0;
382 ring->cons = 0; 406 ring->cons = 0;
383 ring->actual_size = 0; 407 ring->actual_size = 0;
384 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; 408 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
385 409
386 ring->stride = stride; 410 ring->stride = stride;
387 if (ring->stride <= TXBB_SIZE) 411 if (ring->stride <= TXBB_SIZE)
@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
412 goto err_buffers; 436 goto err_buffers;
413 437
414 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 438 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
415 ring = &priv->rx_ring[ring_ind]; 439 ring = priv->rx_ring[ring_ind];
416 440
417 ring->size_mask = ring->actual_size - 1; 441 ring->size_mask = ring->actual_size - 1;
418 mlx4_en_update_rx_prod_db(ring); 442 mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
422 446
423err_buffers: 447err_buffers:
424 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 448 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
425 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); 449 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
426 450
427 ring_ind = priv->rx_ring_num - 1; 451 ring_ind = priv->rx_ring_num - 1;
428err_allocator: 452err_allocator:
429 while (ring_ind >= 0) { 453 while (ring_ind >= 0) {
430 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) 454 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
431 priv->rx_ring[ring_ind].buf -= TXBB_SIZE; 455 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
432 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 456 mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
433 ring_ind--; 457 ring_ind--;
434 } 458 }
435 return err; 459 return err;
436} 460}
437 461
438void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 462void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
439 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 463 struct mlx4_en_rx_ring **pring,
464 u32 size, u16 stride)
440{ 465{
441 struct mlx4_en_dev *mdev = priv->mdev; 466 struct mlx4_en_dev *mdev = priv->mdev;
467 struct mlx4_en_rx_ring *ring = *pring;
442 468
443 mlx4_en_unmap_buffer(&ring->wqres.buf); 469 mlx4_en_unmap_buffer(&ring->wqres.buf);
444 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 470 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
445 vfree(ring->rx_info); 471 vfree(ring->rx_info);
446 ring->rx_info = NULL; 472 ring->rx_info = NULL;
473 kfree(ring);
474 *pring = NULL;
447#ifdef CONFIG_RFS_ACCEL 475#ifdef CONFIG_RFS_ACCEL
448 mlx4_en_cleanup_filters(priv, ring); 476 mlx4_en_cleanup_filters(priv);
449#endif 477#endif
450} 478}
451 479
@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
592 struct mlx4_en_priv *priv = netdev_priv(dev); 620 struct mlx4_en_priv *priv = netdev_priv(dev);
593 struct mlx4_en_dev *mdev = priv->mdev; 621 struct mlx4_en_dev *mdev = priv->mdev;
594 struct mlx4_cqe *cqe; 622 struct mlx4_cqe *cqe;
595 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 623 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
596 struct mlx4_en_rx_alloc *frags; 624 struct mlx4_en_rx_alloc *frags;
597 struct mlx4_en_rx_desc *rx_desc; 625 struct mlx4_en_rx_desc *rx_desc;
598 struct sk_buff *skb; 626 struct sk_buff *skb;
@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
991 1019
992 for (i = 0; i < priv->rx_ring_num; i++) { 1020 for (i = 0; i < priv->rx_ring_num; i++) {
993 qpn = rss_map->base_qpn + i; 1021 qpn = rss_map->base_qpn + i;
994 err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], 1022 err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
995 &rss_map->state[i], 1023 &rss_map->state[i],
996 &rss_map->qps[i]); 1024 &rss_map->qps[i]);
997 if (err) 1025 if (err)
@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1008 } 1036 }
1009 rss_map->indir_qp.event = mlx4_en_sqp_event; 1037 rss_map->indir_qp.event = mlx4_en_sqp_event;
1010 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 1038 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1011 priv->rx_ring[0].cqn, -1, &context); 1039 priv->rx_ring[0]->cqn, -1, &context);
1012 1040
1013 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 1041 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1014 rss_rings = priv->rx_ring_num; 1042 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d669e6..40626690e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
156 * since we turned the carrier off */ 156 * since we turned the carrier off */
157 msleep(200); 157 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { 158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = &priv->tx_ring[i]; 159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) 160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx; 161 goto retry_tx;
162 } 162 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82d6ff1..f54ebd5a1702 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444);
54MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 54MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
55 55
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
57 struct mlx4_en_tx_ring *ring, int qpn, u32 size, 57 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
58 u16 stride) 58 u16 stride, int node)
59{ 59{
60 struct mlx4_en_dev *mdev = priv->mdev; 60 struct mlx4_en_dev *mdev = priv->mdev;
61 struct mlx4_en_tx_ring *ring;
61 int tmp; 62 int tmp;
62 int err; 63 int err;
63 64
65 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
66 if (!ring) {
67 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
68 if (!ring) {
69 en_err(priv, "Failed allocating TX ring\n");
70 return -ENOMEM;
71 }
72 }
73
64 ring->size = size; 74 ring->size = size;
65 ring->size_mask = size - 1; 75 ring->size_mask = size - 1;
66 ring->stride = stride; 76 ring->stride = stride;
@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 inline_thold = min(inline_thold, MAX_INLINE); 78 inline_thold = min(inline_thold, MAX_INLINE);
69 79
70 tmp = size * sizeof(struct mlx4_en_tx_info); 80 tmp = size * sizeof(struct mlx4_en_tx_info);
71 ring->tx_info = vmalloc(tmp); 81 ring->tx_info = vmalloc_node(tmp, node);
72 if (!ring->tx_info) 82 if (!ring->tx_info) {
73 return -ENOMEM; 83 ring->tx_info = vmalloc(tmp);
84 if (!ring->tx_info) {
85 err = -ENOMEM;
86 goto err_ring;
87 }
88 }
74 89
75 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 90 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
76 ring->tx_info, tmp); 91 ring->tx_info, tmp);
77 92
78 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 93 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
79 if (!ring->bounce_buf) { 94 if (!ring->bounce_buf) {
80 err = -ENOMEM; 95 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
81 goto err_tx; 96 if (!ring->bounce_buf) {
97 err = -ENOMEM;
98 goto err_info;
99 }
82 } 100 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 101 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84 102
103 /* Allocate HW buffers on provided NUMA node */
104 set_dev_node(&mdev->dev->pdev->dev, node);
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 105 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 106 2 * PAGE_SIZE);
107 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
87 if (err) { 108 if (err) {
88 en_err(priv, "Failed allocating hwq resources\n"); 109 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 110 goto err_bounce;
@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
109 } 130 }
110 ring->qp.event = mlx4_en_sqp_event; 131 ring->qp.event = mlx4_en_sqp_event;
111 132
112 err = mlx4_bf_alloc(mdev->dev, &ring->bf); 133 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
113 if (err) { 134 if (err) {
114 en_dbg(DRV, priv, "working without blueflame (%d)", err); 135 en_dbg(DRV, priv, "working without blueflame (%d)", err);
115 ring->bf.uar = &mdev->priv_uar; 136 ring->bf.uar = &mdev->priv_uar;
@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
120 141
121 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; 142 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
122 143
144 *pring = ring;
123 return 0; 145 return 0;
124 146
125err_map: 147err_map:
@@ -129,16 +151,20 @@ err_hwq_res:
129err_bounce: 151err_bounce:
130 kfree(ring->bounce_buf); 152 kfree(ring->bounce_buf);
131 ring->bounce_buf = NULL; 153 ring->bounce_buf = NULL;
132err_tx: 154err_info:
133 vfree(ring->tx_info); 155 vfree(ring->tx_info);
134 ring->tx_info = NULL; 156 ring->tx_info = NULL;
157err_ring:
158 kfree(ring);
159 *pring = NULL;
135 return err; 160 return err;
136} 161}
137 162
138void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 163void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_tx_ring *ring) 164 struct mlx4_en_tx_ring **pring)
140{ 165{
141 struct mlx4_en_dev *mdev = priv->mdev; 166 struct mlx4_en_dev *mdev = priv->mdev;
167 struct mlx4_en_tx_ring *ring = *pring;
142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 168 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
143 169
144 if (ring->bf_enabled) 170 if (ring->bf_enabled)
@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
151 ring->bounce_buf = NULL; 177 ring->bounce_buf = NULL;
152 vfree(ring->tx_info); 178 vfree(ring->tx_info);
153 ring->tx_info = NULL; 179 ring->tx_info = NULL;
180 kfree(ring);
181 *pring = NULL;
154} 182}
155 183
156int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 184int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
330{ 358{
331 struct mlx4_en_priv *priv = netdev_priv(dev); 359 struct mlx4_en_priv *priv = netdev_priv(dev);
332 struct mlx4_cq *mcq = &cq->mcq; 360 struct mlx4_cq *mcq = &cq->mcq;
333 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 361 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
334 struct mlx4_cqe *cqe; 362 struct mlx4_cqe *cqe;
335 u16 index; 363 u16 index;
336 u16 new_index, ring_index, stamp_index; 364 u16 new_index, ring_index, stamp_index;
@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
622 } 650 }
623 651
624 tx_ind = skb->queue_mapping; 652 tx_ind = skb->queue_mapping;
625 ring = &priv->tx_ring[tx_ind]; 653 ring = priv->tx_ring[tx_ind];
626 if (vlan_tx_tag_present(skb)) 654 if (vlan_tx_tag_present(skb))
627 vlan_tag = vlan_tx_tag_get(skb); 655 vlan_tag = vlan_tx_tag_get(skb);
628 656
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b3b35c..c9cdb2a2c596 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
936 if (err) 936 if (err)
937 goto err_out_free_mtt; 937 goto err_out_free_mtt;
938 938
939 memset(eq_context, 0, sizeof *eq_context);
940 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 939 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
941 MLX4_EQ_STATE_ARMED); 940 MLX4_EQ_STATE_ARMED);
942 eq_context->log_eq_size = ilog2(eq->nent); 941 eq_context->log_eq_size = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c3e70bc2d875..fda26679f7d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
159 return PTR_ERR(mailbox); 159 return PTR_ERR(mailbox);
160 inbox = mailbox->buf; 160 inbox = mailbox->buf;
161 161
162 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
163
164 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 162 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
165 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 163 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
166 164
@@ -967,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
967 mailbox = mlx4_alloc_cmd_mailbox(dev); 965 mailbox = mlx4_alloc_cmd_mailbox(dev);
968 if (IS_ERR(mailbox)) 966 if (IS_ERR(mailbox))
969 return PTR_ERR(mailbox); 967 return PTR_ERR(mailbox);
970 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
971 pages = mailbox->buf; 968 pages = mailbox->buf;
972 969
973 for (mlx4_icm_first(icm, &iter); 970 for (mlx4_icm_first(icm, &iter);
@@ -1316,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1316 return PTR_ERR(mailbox); 1313 return PTR_ERR(mailbox);
1317 inbox = mailbox->buf; 1314 inbox = mailbox->buf;
1318 1315
1319 memset(inbox, 0, INIT_HCA_IN_SIZE);
1320
1321 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1316 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1322 1317
1323 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1318 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1616,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1616 return PTR_ERR(mailbox); 1611 return PTR_ERR(mailbox);
1617 inbox = mailbox->buf; 1612 inbox = mailbox->buf;
1618 1613
1619 memset(inbox, 0, INIT_PORT_IN_SIZE);
1620
1621 flags = 0; 1614 flags = 0;
1622 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 1615 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1623 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 1616 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d02649be41..5fbf4924c272 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
93 kfree(icm); 93 kfree(icm);
94} 94}
95 95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
97 gfp_t gfp_mask, int node)
97{ 98{
98 struct page *page; 99 struct page *page;
99 100
100 page = alloc_pages(gfp_mask, order); 101 page = alloc_pages_node(node, gfp_mask, order);
101 if (!page) 102 if (!page) {
102 return -ENOMEM; 103 page = alloc_pages(gfp_mask, order);
104 if (!page)
105 return -ENOMEM;
106 }
103 107
104 sg_set_page(mem, page, PAGE_SIZE << order, 0); 108 sg_set_page(mem, page, PAGE_SIZE << order, 0);
105 return 0; 109 return 0;
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
130 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 134 /* We use sg_set_buf for coherent allocs, which assumes low memory */
131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 135 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
132 136
133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 137 icm = kmalloc_node(sizeof(*icm),
134 if (!icm) 138 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
135 return NULL; 139 dev->numa_node);
140 if (!icm) {
141 icm = kmalloc(sizeof(*icm),
142 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
143 if (!icm)
144 return NULL;
145 }
136 146
137 icm->refcount = 0; 147 icm->refcount = 0;
138 INIT_LIST_HEAD(&icm->chunk_list); 148 INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
141 151
142 while (npages > 0) { 152 while (npages > 0) {
143 if (!chunk) { 153 if (!chunk) {
144 chunk = kmalloc(sizeof *chunk, 154 chunk = kmalloc_node(sizeof(*chunk),
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 155 gfp_mask & ~(__GFP_HIGHMEM |
146 if (!chunk) 156 __GFP_NOWARN),
147 goto fail; 157 dev->numa_node);
158 if (!chunk) {
159 chunk = kmalloc(sizeof(*chunk),
160 gfp_mask & ~(__GFP_HIGHMEM |
161 __GFP_NOWARN));
162 if (!chunk)
163 goto fail;
164 }
148 165
149 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 166 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
150 chunk->npages = 0; 167 chunk->npages = 0;
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
161 cur_order, gfp_mask); 178 cur_order, gfp_mask);
162 else 179 else
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 180 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask); 181 cur_order, gfp_mask,
182 dev->numa_node);
165 183
166 if (ret) { 184 if (ret) {
167 if (--cur_order < 0) 185 if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7d2628dfdc29..5789ea2c934d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2191,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2191 mutex_init(&priv->bf_mutex); 2191 mutex_init(&priv->bf_mutex);
2192 2192
2193 dev->rev_id = pdev->revision; 2193 dev->rev_id = pdev->revision;
2194 dev->numa_node = dev_to_node(&pdev->dev);
2194 /* Detect if this device is a virtual function */ 2195 /* Detect if this device is a virtual function */
2195 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2196 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2196 /* When acting as pf, we normally skip vfs unless explicitly 2197 /* When acting as pf, we normally skip vfs unless explicitly
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 70f0213d68c4..acf9d5f1f922 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
506 goto out_list; 506 goto out_list;
507 } 507 }
508 mgm = mailbox->buf; 508 mgm = mailbox->buf;
509 memset(mgm, 0, sizeof *mgm);
510 members_count = 0; 509 members_count = 0;
511 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 510 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
512 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 511 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
857 if (IS_ERR(mailbox)) 856 if (IS_ERR(mailbox))
858 return PTR_ERR(mailbox); 857 return PTR_ERR(mailbox);
859 858
860 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
861 trans_rule_ctrl_to_hw(rule, mailbox->buf); 859 trans_rule_ctrl_to_hw(rule, mailbox->buf);
862 860
863 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 861 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e3610d27..f3758de59c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
530 u16 num_frags; 530 u16 num_frags;
531 u16 log_rx_info; 531 u16 log_rx_info;
532 532
533 struct mlx4_en_tx_ring *tx_ring; 533 struct mlx4_en_tx_ring **tx_ring;
534 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 534 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
535 struct mlx4_en_cq *tx_cq; 535 struct mlx4_en_cq **tx_cq;
536 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 536 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
537 struct mlx4_qp drop_qp; 537 struct mlx4_qp drop_qp;
538 struct work_struct rx_mode_task; 538 struct work_struct rx_mode_task;
539 struct work_struct watchdog_task; 539 struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
626 if ((cq->state & MLX4_CQ_LOCKED)) { 626 if ((cq->state & MLX4_CQ_LOCKED)) {
627 struct net_device *dev = cq->dev; 627 struct net_device *dev = cq->dev;
628 struct mlx4_en_priv *priv = netdev_priv(dev); 628 struct mlx4_en_priv *priv = netdev_priv(dev);
629 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 629 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
630 630
631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; 631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
632 rc = false; 632 rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
704void mlx4_en_free_resources(struct mlx4_en_priv *priv); 704void mlx4_en_free_resources(struct mlx4_en_priv *priv);
705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
706 706
707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
708 int entries, int ring, enum cq_type mode); 708 int entries, int ring, enum cq_type mode, int node);
709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
711 int cq_idx); 711 int cq_idx);
712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 719
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
721 int qpn, u32 size, u16 stride); 721 struct mlx4_en_tx_ring **pring,
722void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 722 int qpn, u32 size, u16 stride, int node);
723void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring **pring);
723int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 725int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring *ring, 726 struct mlx4_en_tx_ring *ring,
725 int cq, int user_prio); 727 int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
727 struct mlx4_en_tx_ring *ring); 729 struct mlx4_en_tx_ring *ring);
728 730
729int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 731int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
730 struct mlx4_en_rx_ring *ring, 732 struct mlx4_en_rx_ring **pring,
731 u32 size, u16 stride); 733 u32 size, u16 stride, int node);
732void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 734void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
733 struct mlx4_en_rx_ring *ring, 735 struct mlx4_en_rx_ring **pring,
734 u32 size, u16 stride); 736 u32 size, u16 stride);
735int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); 737int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
736void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 738void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
768int mlx4_en_setup_tc(struct net_device *dev, u8 up); 770int mlx4_en_setup_tc(struct net_device *dev, u8 up);
769 771
770#ifdef CONFIG_RFS_ACCEL 772#ifdef CONFIG_RFS_ACCEL
771void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 773void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
772 struct mlx4_en_rx_ring *rx_ring);
773#endif 774#endif
774 775
775#define MLX4_EN_NUM_SELF_TEST 5 776#define MLX4_EN_NUM_SELF_TEST 5
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 63391a1a7f8c..b3ee9bafff5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
480 goto err_table; 480 goto err_table;
481 } 481 }
482 mpt_entry = mailbox->buf; 482 mpt_entry = mailbox->buf;
483
484 memset(mpt_entry, 0, sizeof *mpt_entry);
485
486 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 483 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
487 MLX4_MPT_FLAG_REGION | 484 MLX4_MPT_FLAG_REGION |
488 mr->access); 485 mr->access);
@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
695 } 692 }
696 mpt_entry = mailbox->buf; 693 mpt_entry = mailbox->buf;
697 694
698 memset(mpt_entry, 0, sizeof(*mpt_entry));
699
700 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned 695 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
701 * off, thus creating a memory window and not a memory region. 696 * off, thus creating a memory window and not a memory region.
702 */ 697 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223acada7..84cfb40bf451 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
168} 168}
169EXPORT_SYMBOL_GPL(mlx4_uar_free); 169EXPORT_SYMBOL_GPL(mlx4_uar_free);
170 170
171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) 171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
172{ 172{
173 struct mlx4_priv *priv = mlx4_priv(dev); 173 struct mlx4_priv *priv = mlx4_priv(dev);
174 struct mlx4_uar *uar; 174 struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
186 err = -ENOMEM; 186 err = -ENOMEM;
187 goto out; 187 goto out;
188 } 188 }
189 uar = kmalloc(sizeof *uar, GFP_KERNEL); 189 uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
190 if (!uar) { 190 if (!uar) {
191 err = -ENOMEM; 191 uar = kmalloc(sizeof(*uar), GFP_KERNEL);
192 goto out; 192 if (!uar) {
193 err = -ENOMEM;
194 goto out;
195 }
193 } 196 }
194 err = mlx4_uar_alloc(dev, uar); 197 err = mlx4_uar_alloc(dev, uar);
195 if (err) 198 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index caaa15470395..97d342fa5032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -469,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
469 469
470 inbuf = inmailbox->buf; 470 inbuf = inmailbox->buf;
471 outbuf = outmailbox->buf; 471 outbuf = outmailbox->buf;
472 memset(inbuf, 0, 256);
473 memset(outbuf, 0, 256);
474 inbuf[0] = 1; 472 inbuf[0] = 1;
475 inbuf[1] = 1; 473 inbuf[1] = 1;
476 inbuf[2] = 1; 474 inbuf[2] = 1;
@@ -653,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
653 if (IS_ERR(mailbox)) 651 if (IS_ERR(mailbox))
654 return PTR_ERR(mailbox); 652 return PTR_ERR(mailbox);
655 653
656 memset(mailbox->buf, 0, 256);
657
658 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 654 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
659 655
660 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { 656 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -692,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
692 if (IS_ERR(mailbox)) 688 if (IS_ERR(mailbox))
693 return PTR_ERR(mailbox); 689 return PTR_ERR(mailbox);
694 context = mailbox->buf; 690 context = mailbox->buf;
695 memset(context, 0, sizeof *context);
696
697 context->flags = SET_PORT_GEN_ALL_VALID; 691 context->flags = SET_PORT_GEN_ALL_VALID;
698 context->mtu = cpu_to_be16(mtu); 692 context->mtu = cpu_to_be16(mtu);
699 context->pptx = (pptx * (!pfctx)) << 7; 693 context->pptx = (pptx * (!pfctx)) << 7;
@@ -727,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
727 if (IS_ERR(mailbox)) 721 if (IS_ERR(mailbox))
728 return PTR_ERR(mailbox); 722 return PTR_ERR(mailbox);
729 context = mailbox->buf; 723 context = mailbox->buf;
730 memset(context, 0, sizeof *context);
731
732 context->base_qpn = cpu_to_be32(base_qpn); 724 context->base_qpn = cpu_to_be32(base_qpn);
733 context->n_mac = dev->caps.log_num_macs; 725 context->n_mac = dev->caps.log_num_macs;
734 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 726 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -761,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
761 if (IS_ERR(mailbox)) 753 if (IS_ERR(mailbox))
762 return PTR_ERR(mailbox); 754 return PTR_ERR(mailbox);
763 context = mailbox->buf; 755 context = mailbox->buf;
764 memset(context, 0, sizeof *context);
765
766 for (i = 0; i < MLX4_NUM_UP; i += 2) 756 for (i = 0; i < MLX4_NUM_UP; i += 2)
767 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; 757 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
768 758
@@ -788,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
788 if (IS_ERR(mailbox)) 778 if (IS_ERR(mailbox))
789 return PTR_ERR(mailbox); 779 return PTR_ERR(mailbox);
790 context = mailbox->buf; 780 context = mailbox->buf;
791 memset(context, 0, sizeof *context);
792 781
793 for (i = 0; i < MLX4_NUM_TC; i++) { 782 for (i = 0; i < MLX4_NUM_TC; i++) {
794 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; 783 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b1603e2287a7..2f3f2bc7f283 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -110,7 +110,14 @@ struct res_qp {
110 int local_qpn; 110 int local_qpn;
111 atomic_t ref_count; 111 atomic_t ref_count;
112 u32 qpc_flags; 112 u32 qpc_flags;
113 /* saved qp params before VST enforcement in order to restore on VGT */
113 u8 sched_queue; 114 u8 sched_queue;
115 __be32 param3;
116 u8 vlan_control;
117 u8 fvl_rx;
118 u8 pri_path_fl;
119 u8 vlan_index;
120 u8 feup;
114}; 121};
115 122
116enum res_mtt_states { 123enum res_mtt_states {
@@ -2568,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2568 return err; 2575 return err;
2569 qp->local_qpn = local_qpn; 2576 qp->local_qpn = local_qpn;
2570 qp->sched_queue = 0; 2577 qp->sched_queue = 0;
2578 qp->param3 = 0;
2579 qp->vlan_control = 0;
2580 qp->fvl_rx = 0;
2581 qp->pri_path_fl = 0;
2582 qp->vlan_index = 0;
2583 qp->feup = 0;
2571 qp->qpc_flags = be32_to_cpu(qpc->flags); 2584 qp->qpc_flags = be32_to_cpu(qpc->flags);
2572 2585
2573 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2586 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -3294,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3294 int qpn = vhcr->in_modifier & 0x7fffff; 3307 int qpn = vhcr->in_modifier & 0x7fffff;
3295 struct res_qp *qp; 3308 struct res_qp *qp;
3296 u8 orig_sched_queue; 3309 u8 orig_sched_queue;
3310 __be32 orig_param3 = qpc->param3;
3311 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3312 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3313 u8 orig_pri_path_fl = qpc->pri_path.fl;
3314 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3315 u8 orig_feup = qpc->pri_path.feup;
3297 3316
3298 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3317 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3299 if (err) 3318 if (err)
@@ -3321,9 +3340,15 @@ out:
3321 * essentially the QOS value provided by the VF. This will be useful 3340 * essentially the QOS value provided by the VF. This will be useful
3322 * if we allow dynamic changes from VST back to VGT 3341 * if we allow dynamic changes from VST back to VGT
3323 */ 3342 */
3324 if (!err) 3343 if (!err) {
3325 qp->sched_queue = orig_sched_queue; 3344 qp->sched_queue = orig_sched_queue;
3326 3345 qp->param3 = orig_param3;
3346 qp->vlan_control = orig_vlan_control;
3347 qp->fvl_rx = orig_fvl_rx;
3348 qp->pri_path_fl = orig_pri_path_fl;
3349 qp->vlan_index = orig_vlan_index;
3350 qp->feup = orig_feup;
3351 }
3327 put_res(dev, slave, qpn, RES_QP); 3352 put_res(dev, slave, qpn, RES_QP);
3328 return err; 3353 return err;
3329} 3354}
@@ -4437,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4437 &tracker->slave_list[work->slave].res_list[RES_QP]; 4462 &tracker->slave_list[work->slave].res_list[RES_QP];
4438 struct res_qp *qp; 4463 struct res_qp *qp;
4439 struct res_qp *tmp; 4464 struct res_qp *tmp;
4440 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 4465 u64 qp_path_mask_vlan_ctrl =
4466 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4441 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 4467 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4442 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 4468 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4443 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 4469 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4444 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 4470 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4445 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | 4471 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4446 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 4472
4473 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4474 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4475 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4476 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4477 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4478 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4447 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 4479 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4448 4480
4449 int err; 4481 int err;
@@ -4475,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4475 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 4507 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4476 4508
4477 upd_context = mailbox->buf; 4509 upd_context = mailbox->buf;
4478 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); 4510 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4479 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4480 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4481 4511
4482 spin_lock_irq(mlx4_tlock(dev)); 4512 spin_lock_irq(mlx4_tlock(dev));
4483 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4513 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4495,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4495 spin_lock_irq(mlx4_tlock(dev)); 4525 spin_lock_irq(mlx4_tlock(dev));
4496 continue; 4526 continue;
4497 } 4527 }
4498 upd_context->qp_context.pri_path.sched_queue = 4528 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4499 qp->sched_queue & 0xC7; 4529 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4500 upd_context->qp_context.pri_path.sched_queue |= 4530 else
4501 ((work->qos & 0x7) << 3); 4531 upd_context->primary_addr_path_mask =
4532 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4533 if (work->vlan_id == MLX4_VGT) {
4534 upd_context->qp_context.param3 = qp->param3;
4535 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4536 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4537 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4538 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4539 upd_context->qp_context.pri_path.feup = qp->feup;
4540 upd_context->qp_context.pri_path.sched_queue =
4541 qp->sched_queue;
4542 } else {
4543 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4544 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4545 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4546 upd_context->qp_context.pri_path.fvl_rx =
4547 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4548 upd_context->qp_context.pri_path.fl =
4549 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4550 upd_context->qp_context.pri_path.feup =
4551 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4552 upd_context->qp_context.pri_path.sched_queue =
4553 qp->sched_queue & 0xC7;
4554 upd_context->qp_context.pri_path.sched_queue |=
4555 ((work->qos & 0x7) << 3);
4556 }
4502 4557
4503 err = mlx4_cmd(dev, mailbox->dma, 4558 err = mlx4_cmd(dev, mailbox->dma,
4504 qp->local_qpn & 0xffffff, 4559 qp->local_qpn & 0xffffff,
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9e08e35ce351..8fdf23753779 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -189,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
189 } 189 }
190 190
191 srq_context = mailbox->buf; 191 srq_context = mailbox->buf;
192 memset(srq_context, 0, sizeof *srq_context);
193
194 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | 192 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
195 srq->srqn); 193 srq->srqn);
196 srq_context->logstride = srq->wqe_shift - 4; 194 srq_context->logstride = srq->wqe_shift - 4;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6f59271f857..7d3a523160ba 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -662,6 +662,7 @@ struct mlx4_dev {
662 u8 rev_id; 662 u8 rev_id;
663 char board_id[MLX4_BOARD_ID_LEN]; 663 char board_id[MLX4_BOARD_ID_LEN];
664 int num_vfs; 664 int num_vfs;
665 int numa_node;
665 int oper_log_mgm_entry_size; 666 int oper_log_mgm_entry_size;
666 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 667 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
667 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 668 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
@@ -834,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
834 835
835int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); 836int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
836void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); 837void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
837int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); 838int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
838void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); 839void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
839 840
840int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 841int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,