aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c3
23 files changed, 1072 insertions, 349 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index bb11624a1f39..1e9970d2f0f3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@ out:
1539 return ret; 1539 return ret;
1540} 1540}
1541 1541
1542static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
1543{
1544 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
1545}
1546
1547static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, 1542static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1548 int slave, int port) 1543 int slave, int port)
1549{ 1544{
@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1553 struct mlx4_dev *dev = &(priv->dev); 1548 struct mlx4_dev *dev = &(priv->dev);
1554 int err; 1549 int err;
1555 int admin_vlan_ix = NO_INDX; 1550 int admin_vlan_ix = NO_INDX;
1556 enum mlx4_vlan_transition vlan_trans;
1557 1551
1558 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1552 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1559 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1553 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1563 vp_oper->state.link_state == vp_admin->link_state) 1557 vp_oper->state.link_state == vp_admin->link_state)
1564 return 0; 1558 return 0;
1565 1559
1566 vlan_trans = calculate_transition(vp_oper->state.default_vlan,
1567 vp_admin->default_vlan);
1568
1569 if (!(priv->mfunc.master.slave_state[slave].active && 1560 if (!(priv->mfunc.master.slave_state[slave].active &&
1570 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && 1561 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1571 vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
1572 /* even if the UPDATE_QP command isn't supported, we still want 1562 /* even if the UPDATE_QP command isn't supported, we still want
1573 * to set this VF link according to the admin directive 1563 * to set this VF link according to the admin directive
1574 */ 1564 */
@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1586 return -ENOMEM; 1576 return -ENOMEM;
1587 1577
1588 if (vp_oper->state.default_vlan != vp_admin->default_vlan) { 1578 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1589 err = __mlx4_register_vlan(&priv->dev, port, 1579 if (MLX4_VGT != vp_admin->default_vlan) {
1590 vp_admin->default_vlan, 1580 err = __mlx4_register_vlan(&priv->dev, port,
1591 &admin_vlan_ix); 1581 vp_admin->default_vlan,
1592 if (err) { 1582 &admin_vlan_ix);
1593 kfree(work); 1583 if (err) {
1594 mlx4_warn((&priv->dev), 1584 kfree(work);
1595 "No vlan resources slave %d, port %d\n", 1585 mlx4_warn((&priv->dev),
1596 slave, port); 1586 "No vlan resources slave %d, port %d\n",
1597 return err; 1587 slave, port);
1588 return err;
1589 }
1590 } else {
1591 admin_vlan_ix = NO_INDX;
1598 } 1592 }
1599 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1593 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1600 mlx4_dbg((&(priv->dev)), 1594 mlx4_dbg((&(priv->dev)),
@@ -1687,7 +1681,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
1687 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1681 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1688 if (NO_INDX != vp_oper->vlan_idx) { 1682 if (NO_INDX != vp_oper->vlan_idx) {
1689 __mlx4_unregister_vlan(&priv->dev, 1683 __mlx4_unregister_vlan(&priv->dev,
1690 port, vp_oper->vlan_idx); 1684 port, vp_oper->state.default_vlan);
1691 vp_oper->vlan_idx = NO_INDX; 1685 vp_oper->vlan_idx = NO_INDX;
1692 } 1686 }
1693 if (NO_INDX != vp_oper->mac_idx) { 1687 if (NO_INDX != vp_oper->mac_idx) {
@@ -1718,6 +1712,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1718 if (cmd == MLX4_COMM_CMD_RESET) { 1712 if (cmd == MLX4_COMM_CMD_RESET) {
1719 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1713 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1720 slave_state[slave].active = false; 1714 slave_state[slave].active = false;
1715 slave_state[slave].old_vlan_api = false;
1721 mlx4_master_deactivate_admin_state(priv, slave); 1716 mlx4_master_deactivate_admin_state(priv, slave);
1722 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1717 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1723 slave_state[slave].event_eq[i].eqn = -1; 1718 slave_state[slave].event_eq[i].eqn = -1;
@@ -2198,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2198 return ERR_PTR(-ENOMEM); 2193 return ERR_PTR(-ENOMEM);
2199 } 2194 }
2200 2195
2196 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2197
2201 return mailbox; 2198 return mailbox;
2202} 2199}
2203EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); 2200EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
@@ -2253,7 +2250,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2253int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) 2250int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2254{ 2251{
2255 struct mlx4_priv *priv = mlx4_priv(dev); 2252 struct mlx4_priv *priv = mlx4_priv(dev);
2256 struct mlx4_vport_oper_state *vf_oper;
2257 struct mlx4_vport_state *vf_admin; 2253 struct mlx4_vport_state *vf_admin;
2258 int slave; 2254 int slave;
2259 2255
@@ -2269,7 +2265,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2269 return -EINVAL; 2265 return -EINVAL;
2270 2266
2271 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2267 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2272 vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2273 2268
2274 if ((0 == vlan) && (0 == qos)) 2269 if ((0 == vlan) && (0 == qos))
2275 vf_admin->default_vlan = MLX4_VGT; 2270 vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e4231af67..22fcbe78311c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
128 return PTR_ERR(mailbox); 128 return PTR_ERR(mailbox);
129 129
130 cq_context = mailbox->buf; 130 cq_context = mailbox->buf;
131 memset(cq_context, 0, sizeof *cq_context);
132
133 cq_context->cq_max_count = cpu_to_be16(count); 131 cq_context->cq_max_count = cpu_to_be16(count);
134 cq_context->cq_period = cpu_to_be16(period); 132 cq_context->cq_period = cpu_to_be16(period);
135 133
@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
153 return PTR_ERR(mailbox); 151 return PTR_ERR(mailbox);
154 152
155 cq_context = mailbox->buf; 153 cq_context = mailbox->buf;
156 memset(cq_context, 0, sizeof *cq_context);
157
158 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); 154 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
159 cq_context->log_page_size = mtt->page_shift - 12; 155 cq_context->log_page_size = mtt->page_shift - 12;
160 mtt_addr = mlx4_mtt_addr(dev, mtt); 156 mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
274 } 270 }
275 271
276 cq_context = mailbox->buf; 272 cq_context = mailbox->buf;
277 memset(cq_context, 0, sizeof *cq_context);
278
279 cq_context->flags = cpu_to_be32(!!collapsed << 18); 273 cq_context->flags = cpu_to_be32(!!collapsed << 18);
280 if (timestamp_en) 274 if (timestamp_en)
281 cq_context->flags |= cpu_to_be32(1 << 19); 275 cq_context->flags |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d5047cdb3..3a098cc4d349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
44 44
45 45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv, 46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq, 47 struct mlx4_en_cq **pcq,
48 int entries, int ring, enum cq_type mode) 48 int entries, int ring, enum cq_type mode,
49 int node)
49{ 50{
50 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
52 struct mlx4_en_cq *cq;
51 int err; 53 int err;
52 54
55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
56 if (!cq) {
57 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
58 if (!cq) {
59 en_err(priv, "Failed to allocate CQ structure\n");
60 return -ENOMEM;
61 }
62 }
63
53 cq->size = entries; 64 cq->size = entries;
54 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; 65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
55 66
@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
57 cq->is_tx = mode; 68 cq->is_tx = mode;
58 spin_lock_init(&cq->lock); 69 spin_lock_init(&cq->lock);
59 70
71 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow.
73 */
74 set_dev_node(&mdev->dev->pdev->dev, node);
60 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 75 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
61 cq->buf_size, 2 * PAGE_SIZE); 76 cq->buf_size, 2 * PAGE_SIZE);
77 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
62 if (err) 78 if (err)
63 return err; 79 goto err_cq;
64 80
65 err = mlx4_en_map_buffer(&cq->wqres.buf); 81 err = mlx4_en_map_buffer(&cq->wqres.buf);
66 if (err) 82 if (err)
67 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 83 goto err_res;
68 else
69 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
70 84
85 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
86 *pcq = cq;
87
88 return 0;
89
90err_res:
91 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
92err_cq:
93 kfree(cq);
94 *pcq = NULL;
71 return err; 95 return err;
72} 96}
73 97
@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
117 struct mlx4_en_cq *rx_cq; 141 struct mlx4_en_cq *rx_cq;
118 142
119 cq_idx = cq_idx % priv->rx_ring_num; 143 cq_idx = cq_idx % priv->rx_ring_num;
120 rx_cq = &priv->rx_cq[cq_idx]; 144 rx_cq = priv->rx_cq[cq_idx];
121 cq->vector = rx_cq->vector; 145 cq->vector = rx_cq->vector;
122 } 146 }
123 147
124 if (!cq->is_tx) 148 if (!cq->is_tx)
125 cq->size = priv->rx_ring[cq->ring].actual_size; 149 cq->size = priv->rx_ring[cq->ring]->actual_size;
126 150
127 if ((cq->is_tx && priv->hwtstamp_config.tx_type) || 151 if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
128 (!cq->is_tx && priv->hwtstamp_config.rx_filter)) 152 (!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
146 return 0; 170 return 0;
147} 171}
148 172
149void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 173void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
150{ 174{
151 struct mlx4_en_dev *mdev = priv->mdev; 175 struct mlx4_en_dev *mdev = priv->mdev;
176 struct mlx4_en_cq *cq = *pcq;
152 177
153 mlx4_en_unmap_buffer(&cq->wqres.buf); 178 mlx4_en_unmap_buffer(&cq->wqres.buf);
154 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 179 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
157 cq->vector = 0; 182 cq->vector = 0;
158 cq->buf_size = 0; 183 cq->buf_size = 0;
159 cq->buf = NULL; 184 cq->buf = NULL;
185 kfree(cq);
186 *pcq = NULL;
160} 187}
161 188
162void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 189void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c750985f47e..0596f9f85a0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
51 int err = 0; 51 int err = 0;
52 52
53 for (i = 0; i < priv->tx_ring_num; i++) { 53 for (i = 0; i < priv->tx_ring_num; i++) {
54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 54 priv->tx_cq[i]->moder_cnt = priv->tx_frames;
55 priv->tx_cq[i].moder_time = priv->tx_usecs; 55 priv->tx_cq[i]->moder_time = priv->tx_usecs;
56 if (priv->port_up) { 56 if (priv->port_up) {
57 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 57 err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
58 if (err) 58 if (err)
59 return err; 59 return err;
60 } 60 }
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
64 return 0; 64 return 0;
65 65
66 for (i = 0; i < priv->rx_ring_num; i++) { 66 for (i = 0; i < priv->rx_ring_num; i++) {
67 priv->rx_cq[i].moder_cnt = priv->rx_frames; 67 priv->rx_cq[i]->moder_cnt = priv->rx_frames;
68 priv->rx_cq[i].moder_time = priv->rx_usecs; 68 priv->rx_cq[i]->moder_time = priv->rx_usecs;
69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
70 if (priv->port_up) { 70 if (priv->port_up) {
71 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 71 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
72 if (err) 72 if (err)
73 return err; 73 return err;
74 } 74 }
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
274 } 274 }
275 } 275 }
276 for (i = 0; i < priv->tx_ring_num; i++) { 276 for (i = 0; i < priv->tx_ring_num; i++) {
277 data[index++] = priv->tx_ring[i].packets; 277 data[index++] = priv->tx_ring[i]->packets;
278 data[index++] = priv->tx_ring[i].bytes; 278 data[index++] = priv->tx_ring[i]->bytes;
279 } 279 }
280 for (i = 0; i < priv->rx_ring_num; i++) { 280 for (i = 0; i < priv->rx_ring_num; i++) {
281 data[index++] = priv->rx_ring[i].packets; 281 data[index++] = priv->rx_ring[i]->packets;
282 data[index++] = priv->rx_ring[i].bytes; 282 data[index++] = priv->rx_ring[i]->bytes;
283#ifdef CONFIG_NET_RX_BUSY_POLL 283#ifdef CONFIG_NET_RX_BUSY_POLL
284 data[index++] = priv->rx_ring[i].yields; 284 data[index++] = priv->rx_ring[i]->yields;
285 data[index++] = priv->rx_ring[i].misses; 285 data[index++] = priv->rx_ring[i]->misses;
286 data[index++] = priv->rx_ring[i].cleaned; 286 data[index++] = priv->rx_ring[i]->cleaned;
287#endif 287#endif
288 } 288 }
289 spin_unlock_bh(&priv->stats_lock); 289 spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
512 512
513 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : 513 if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
514 priv->rx_ring[0].size) && 514 priv->rx_ring[0]->size) &&
515 tx_size == priv->tx_ring[0].size) 515 tx_size == priv->tx_ring[0]->size)
516 return 0; 516 return 0;
517 517
518 mutex_lock(&mdev->state_lock); 518 mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
555 param->rx_pending = priv->port_up ? 555 param->rx_pending = priv->port_up ?
556 priv->rx_ring[0].actual_size : priv->rx_ring[0].size; 556 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
557 param->tx_pending = priv->tx_ring[0].size; 557 param->tx_pending = priv->tx_ring[0]->size;
558} 558}
559 559
560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a071cda2dd04..0d087b03a7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
264 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 264 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
265 mdev->port_cnt++; 265 mdev->port_cnt++;
266 266
267 /* Initialize time stamp mechanism */
268 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
269 mlx4_en_init_timestamp(mdev);
270
267 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 271 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
268 if (!dev->caps.comp_pool) { 272 if (!dev->caps.comp_pool) {
269 mdev->profile.prof[i].rx_ring_num = 273 mdev->profile.prof[i].rx_ring_num =
@@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
301 mdev->pndev[i] = NULL; 305 mdev->pndev[i] = NULL;
302 } 306 }
303 307
304 /* Initialize time stamp mechanism */
305 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
306 mlx4_en_init_timestamp(mdev);
307
308 return mdev; 308 return mdev;
309 309
310err_mr: 310err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a61213..e72d8a112a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
76 struct net_device *dev = cq->dev; 76 struct net_device *dev = cq->dev;
77 struct mlx4_en_priv *priv = netdev_priv(dev); 77 struct mlx4_en_priv *priv = netdev_priv(dev);
78 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 78 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
79 int done; 79 int done;
80 80
81 if (!priv->port_up) 81 if (!priv->port_up)
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
102 struct list_head next; 102 struct list_head next;
103 struct work_struct work; 103 struct work_struct work;
104 104
105 u8 ip_proto;
105 __be32 src_ip; 106 __be32 src_ip;
106 __be32 dst_ip; 107 __be32 dst_ip;
107 __be16 src_port; 108 __be16 src_port;
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
120 121
121static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 122static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
122 123
124static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
125{
126 switch (ip_proto) {
127 case IPPROTO_UDP:
128 return MLX4_NET_TRANS_RULE_ID_UDP;
129 case IPPROTO_TCP:
130 return MLX4_NET_TRANS_RULE_ID_TCP;
131 default:
132 return -EPROTONOSUPPORT;
133 }
134};
135
123static void mlx4_en_filter_work(struct work_struct *work) 136static void mlx4_en_filter_work(struct work_struct *work)
124{ 137{
125 struct mlx4_en_filter *filter = container_of(work, 138 struct mlx4_en_filter *filter = container_of(work,
126 struct mlx4_en_filter, 139 struct mlx4_en_filter,
127 work); 140 work);
128 struct mlx4_en_priv *priv = filter->priv; 141 struct mlx4_en_priv *priv = filter->priv;
129 struct mlx4_spec_list spec_tcp = { 142 struct mlx4_spec_list spec_tcp_udp = {
130 .id = MLX4_NET_TRANS_RULE_ID_TCP, 143 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
131 { 144 {
132 .tcp_udp = { 145 .tcp_udp = {
133 .dst_port = filter->dst_port, 146 .dst_port = filter->dst_port,
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
163 int rc; 176 int rc;
164 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 177 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
165 178
179 if (spec_tcp_udp.id < 0) {
180 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
181 filter->ip_proto);
182 goto ignore;
183 }
166 list_add_tail(&spec_eth.list, &rule.list); 184 list_add_tail(&spec_eth.list, &rule.list);
167 list_add_tail(&spec_ip.list, &rule.list); 185 list_add_tail(&spec_ip.list, &rule.list);
168 list_add_tail(&spec_tcp.list, &rule.list); 186 list_add_tail(&spec_tcp_udp.list, &rule.list);
169 187
170 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 188 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
171 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 189 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
183 if (rc) 201 if (rc)
184 en_err(priv, "Error attaching flow. err = %d\n", rc); 202 en_err(priv, "Error attaching flow. err = %d\n", rc);
185 203
204ignore:
186 mlx4_en_filter_rfs_expire(priv); 205 mlx4_en_filter_rfs_expire(priv);
187 206
188 filter->activated = 1; 207 filter->activated = 1;
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
206 225
207static struct mlx4_en_filter * 226static struct mlx4_en_filter *
208mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 227mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
209 __be32 dst_ip, __be16 src_port, __be16 dst_port, 228 __be32 dst_ip, u8 ip_proto, __be16 src_port,
210 u32 flow_id) 229 __be16 dst_port, u32 flow_id)
211{ 230{
212 struct mlx4_en_filter *filter = NULL; 231 struct mlx4_en_filter *filter = NULL;
213 232
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
221 240
222 filter->src_ip = src_ip; 241 filter->src_ip = src_ip;
223 filter->dst_ip = dst_ip; 242 filter->dst_ip = dst_ip;
243 filter->ip_proto = ip_proto;
224 filter->src_port = src_port; 244 filter->src_port = src_port;
225 filter->dst_port = dst_port; 245 filter->dst_port = dst_port;
226 246
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
252 272
253static inline struct mlx4_en_filter * 273static inline struct mlx4_en_filter *
254mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 274mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
255 __be16 src_port, __be16 dst_port) 275 u8 ip_proto, __be16 src_port, __be16 dst_port)
256{ 276{
257 struct mlx4_en_filter *filter; 277 struct mlx4_en_filter *filter;
258 struct mlx4_en_filter *ret = NULL; 278 struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
263 filter_chain) { 283 filter_chain) {
264 if (filter->src_ip == src_ip && 284 if (filter->src_ip == src_ip &&
265 filter->dst_ip == dst_ip && 285 filter->dst_ip == dst_ip &&
286 filter->ip_proto == ip_proto &&
266 filter->src_port == src_port && 287 filter->src_port == src_port &&
267 filter->dst_port == dst_port) { 288 filter->dst_port == dst_port) {
268 ret = filter; 289 ret = filter;
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
281 struct mlx4_en_filter *filter; 302 struct mlx4_en_filter *filter;
282 const struct iphdr *ip; 303 const struct iphdr *ip;
283 const __be16 *ports; 304 const __be16 *ports;
305 u8 ip_proto;
284 __be32 src_ip; 306 __be32 src_ip;
285 __be32 dst_ip; 307 __be32 dst_ip;
286 __be16 src_port; 308 __be16 src_port;
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
295 if (ip_is_fragment(ip)) 317 if (ip_is_fragment(ip))
296 return -EPROTONOSUPPORT; 318 return -EPROTONOSUPPORT;
297 319
320 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
321 return -EPROTONOSUPPORT;
298 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 322 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
299 323
324 ip_proto = ip->protocol;
300 src_ip = ip->saddr; 325 src_ip = ip->saddr;
301 dst_ip = ip->daddr; 326 dst_ip = ip->daddr;
302 src_port = ports[0]; 327 src_port = ports[0];
303 dst_port = ports[1]; 328 dst_port = ports[1];
304 329
305 if (ip->protocol != IPPROTO_TCP)
306 return -EPROTONOSUPPORT;
307
308 spin_lock_bh(&priv->filters_lock); 330 spin_lock_bh(&priv->filters_lock);
309 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); 331 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
332 src_port, dst_port);
310 if (filter) { 333 if (filter) {
311 if (filter->rxq_index == rxq_index) 334 if (filter->rxq_index == rxq_index)
312 goto out; 335 goto out;
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
314 filter->rxq_index = rxq_index; 337 filter->rxq_index = rxq_index;
315 } else { 338 } else {
316 filter = mlx4_en_filter_alloc(priv, rxq_index, 339 filter = mlx4_en_filter_alloc(priv, rxq_index,
317 src_ip, dst_ip, 340 src_ip, dst_ip, ip_proto,
318 src_port, dst_port, flow_id); 341 src_port, dst_port, flow_id);
319 if (!filter) { 342 if (!filter) {
320 ret = -ENOMEM; 343 ret = -ENOMEM;
@@ -332,8 +355,7 @@ err:
332 return ret; 355 return ret;
333} 356}
334 357
335void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 358void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
336 struct mlx4_en_rx_ring *rx_ring)
337{ 359{
338 struct mlx4_en_filter *filter, *tmp; 360 struct mlx4_en_filter *filter, *tmp;
339 LIST_HEAD(del_list); 361 LIST_HEAD(del_list);
@@ -417,7 +439,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
417 struct mlx4_en_priv *priv = netdev_priv(dev); 439 struct mlx4_en_priv *priv = netdev_priv(dev);
418 struct mlx4_en_dev *mdev = priv->mdev; 440 struct mlx4_en_dev *mdev = priv->mdev;
419 int err; 441 int err;
420 int idx;
421 442
422 en_dbg(HW, priv, "Killing VID:%d\n", vid); 443 en_dbg(HW, priv, "Killing VID:%d\n", vid);
423 444
@@ -425,10 +446,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
425 446
426 /* Remove VID from port VLAN filter */ 447 /* Remove VID from port VLAN filter */
427 mutex_lock(&mdev->state_lock); 448 mutex_lock(&mdev->state_lock);
428 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 449 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
429 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
430 else
431 en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
432 450
433 if (mdev->device_up && priv->port_up) { 451 if (mdev->device_up && priv->port_up) {
434 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 452 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1223,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
1223 int i; 1241 int i;
1224 1242
1225 for (i = 0; i < priv->rx_ring_num; i++) { 1243 for (i = 0; i < priv->rx_ring_num; i++) {
1226 cq = &priv->rx_cq[i]; 1244 cq = priv->rx_cq[i];
1227 spin_lock_irqsave(&cq->lock, flags); 1245 spin_lock_irqsave(&cq->lock, flags);
1228 napi_synchronize(&cq->napi); 1246 napi_synchronize(&cq->napi);
1229 mlx4_en_process_rx_cq(dev, cq, 0); 1247 mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1245,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1245 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1263 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1246 continue; 1264 continue;
1247 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1265 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1248 i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, 1266 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1249 priv->tx_ring[i].cons, priv->tx_ring[i].prod); 1267 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1250 } 1268 }
1251 1269
1252 priv->port_stats.tx_timeout++; 1270 priv->port_stats.tx_timeout++;
@@ -1286,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1286 1304
1287 /* Setup cq moderation params */ 1305 /* Setup cq moderation params */
1288 for (i = 0; i < priv->rx_ring_num; i++) { 1306 for (i = 0; i < priv->rx_ring_num; i++) {
1289 cq = &priv->rx_cq[i]; 1307 cq = priv->rx_cq[i];
1290 cq->moder_cnt = priv->rx_frames; 1308 cq->moder_cnt = priv->rx_frames;
1291 cq->moder_time = priv->rx_usecs; 1309 cq->moder_time = priv->rx_usecs;
1292 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1310 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1295,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1295 } 1313 }
1296 1314
1297 for (i = 0; i < priv->tx_ring_num; i++) { 1315 for (i = 0; i < priv->tx_ring_num; i++) {
1298 cq = &priv->tx_cq[i]; 1316 cq = priv->tx_cq[i];
1299 cq->moder_cnt = priv->tx_frames; 1317 cq->moder_cnt = priv->tx_frames;
1300 cq->moder_time = priv->tx_usecs; 1318 cq->moder_time = priv->tx_usecs;
1301 } 1319 }
@@ -1329,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1329 1347
1330 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1348 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1331 spin_lock_bh(&priv->stats_lock); 1349 spin_lock_bh(&priv->stats_lock);
1332 rx_packets = priv->rx_ring[ring].packets; 1350 rx_packets = priv->rx_ring[ring]->packets;
1333 rx_bytes = priv->rx_ring[ring].bytes; 1351 rx_bytes = priv->rx_ring[ring]->bytes;
1334 spin_unlock_bh(&priv->stats_lock); 1352 spin_unlock_bh(&priv->stats_lock);
1335 1353
1336 rx_pkt_diff = ((unsigned long) (rx_packets - 1354 rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1359,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1359 1377
1360 if (moder_time != priv->last_moder_time[ring]) { 1378 if (moder_time != priv->last_moder_time[ring]) {
1361 priv->last_moder_time[ring] = moder_time; 1379 priv->last_moder_time[ring] = moder_time;
1362 cq = &priv->rx_cq[ring]; 1380 cq = priv->rx_cq[ring];
1363 cq->moder_time = moder_time; 1381 cq->moder_time = moder_time;
1364 cq->moder_cnt = priv->rx_frames; 1382 cq->moder_cnt = priv->rx_frames;
1365 err = mlx4_en_set_cq_moder(priv, cq); 1383 err = mlx4_en_set_cq_moder(priv, cq);
@@ -1482,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
1482 return err; 1500 return err;
1483 } 1501 }
1484 for (i = 0; i < priv->rx_ring_num; i++) { 1502 for (i = 0; i < priv->rx_ring_num; i++) {
1485 cq = &priv->rx_cq[i]; 1503 cq = priv->rx_cq[i];
1486 1504
1487 mlx4_en_cq_init_lock(cq); 1505 mlx4_en_cq_init_lock(cq);
1488 1506
@@ -1500,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
1500 goto cq_err; 1518 goto cq_err;
1501 } 1519 }
1502 mlx4_en_arm_cq(priv, cq); 1520 mlx4_en_arm_cq(priv, cq);
1503 priv->rx_ring[i].cqn = cq->mcq.cqn; 1521 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1504 ++rx_index; 1522 ++rx_index;
1505 } 1523 }
1506 1524
@@ -1526,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
1526 /* Configure tx cq's and rings */ 1544 /* Configure tx cq's and rings */
1527 for (i = 0; i < priv->tx_ring_num; i++) { 1545 for (i = 0; i < priv->tx_ring_num; i++) {
1528 /* Configure cq */ 1546 /* Configure cq */
1529 cq = &priv->tx_cq[i]; 1547 cq = priv->tx_cq[i];
1530 err = mlx4_en_activate_cq(priv, cq, i); 1548 err = mlx4_en_activate_cq(priv, cq, i);
1531 if (err) { 1549 if (err) {
1532 en_err(priv, "Failed allocating Tx CQ\n"); 1550 en_err(priv, "Failed allocating Tx CQ\n");
@@ -1542,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
1542 cq->buf->wqe_index = cpu_to_be16(0xffff); 1560 cq->buf->wqe_index = cpu_to_be16(0xffff);
1543 1561
1544 /* Configure ring */ 1562 /* Configure ring */
1545 tx_ring = &priv->tx_ring[i]; 1563 tx_ring = priv->tx_ring[i];
1546 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1564 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1547 i / priv->num_tx_rings_p_up); 1565 i / priv->num_tx_rings_p_up);
1548 if (err) { 1566 if (err) {
@@ -1612,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
1612 1630
1613tx_err: 1631tx_err:
1614 while (tx_index--) { 1632 while (tx_index--) {
1615 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1633 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1616 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1634 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1617 } 1635 }
1618 mlx4_en_destroy_drop_qp(priv); 1636 mlx4_en_destroy_drop_qp(priv);
1619rss_err: 1637rss_err:
@@ -1622,9 +1640,9 @@ mac_err:
1622 mlx4_en_put_qp(priv); 1640 mlx4_en_put_qp(priv);
1623cq_err: 1641cq_err:
1624 while (rx_index--) 1642 while (rx_index--)
1625 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 1643 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1626 for (i = 0; i < priv->rx_ring_num; i++) 1644 for (i = 0; i < priv->rx_ring_num; i++)
1627 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1645 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1628 1646
1629 return err; /* need to close devices */ 1647 return err; /* need to close devices */
1630} 1648}
@@ -1720,25 +1738,25 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1720 1738
1721 /* Free TX Rings */ 1739 /* Free TX Rings */
1722 for (i = 0; i < priv->tx_ring_num; i++) { 1740 for (i = 0; i < priv->tx_ring_num; i++) {
1723 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1741 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1724 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 1742 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1725 } 1743 }
1726 msleep(10); 1744 msleep(10);
1727 1745
1728 for (i = 0; i < priv->tx_ring_num; i++) 1746 for (i = 0; i < priv->tx_ring_num; i++)
1729 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 1747 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1730 1748
1731 /* Free RSS qps */ 1749 /* Free RSS qps */
1732 mlx4_en_release_rss_steer(priv); 1750 mlx4_en_release_rss_steer(priv);
1733 1751
1734 /* Unregister Mac address for the port */ 1752 /* Unregister Mac address for the port */
1735 mlx4_en_put_qp(priv); 1753 mlx4_en_put_qp(priv);
1736 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) 1754 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1737 mdev->mac_removed[priv->port] = 1; 1755 mdev->mac_removed[priv->port] = 1;
1738 1756
1739 /* Free RX Rings */ 1757 /* Free RX Rings */
1740 for (i = 0; i < priv->rx_ring_num; i++) { 1758 for (i = 0; i < priv->rx_ring_num; i++) {
1741 struct mlx4_en_cq *cq = &priv->rx_cq[i]; 1759 struct mlx4_en_cq *cq = priv->rx_cq[i];
1742 1760
1743 local_bh_disable(); 1761 local_bh_disable();
1744 while (!mlx4_en_cq_lock_napi(cq)) { 1762 while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1749,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1749 1767
1750 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) 1768 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1751 msleep(1); 1769 msleep(1);
1752 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1770 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1753 mlx4_en_deactivate_cq(priv, cq); 1771 mlx4_en_deactivate_cq(priv, cq);
1754 } 1772 }
1755} 1773}
@@ -1787,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1787 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1805 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1788 1806
1789 for (i = 0; i < priv->tx_ring_num; i++) { 1807 for (i = 0; i < priv->tx_ring_num; i++) {
1790 priv->tx_ring[i].bytes = 0; 1808 priv->tx_ring[i]->bytes = 0;
1791 priv->tx_ring[i].packets = 0; 1809 priv->tx_ring[i]->packets = 0;
1792 priv->tx_ring[i].tx_csum = 0; 1810 priv->tx_ring[i]->tx_csum = 0;
1793 } 1811 }
1794 for (i = 0; i < priv->rx_ring_num; i++) { 1812 for (i = 0; i < priv->rx_ring_num; i++) {
1795 priv->rx_ring[i].bytes = 0; 1813 priv->rx_ring[i]->bytes = 0;
1796 priv->rx_ring[i].packets = 0; 1814 priv->rx_ring[i]->packets = 0;
1797 priv->rx_ring[i].csum_ok = 0; 1815 priv->rx_ring[i]->csum_ok = 0;
1798 priv->rx_ring[i].csum_none = 0; 1816 priv->rx_ring[i]->csum_none = 0;
1799 } 1817 }
1800} 1818}
1801 1819
@@ -1852,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1852#endif 1870#endif
1853 1871
1854 for (i = 0; i < priv->tx_ring_num; i++) { 1872 for (i = 0; i < priv->tx_ring_num; i++) {
1855 if (priv->tx_ring[i].tx_info) 1873 if (priv->tx_ring && priv->tx_ring[i])
1856 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1874 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1857 if (priv->tx_cq[i].buf) 1875 if (priv->tx_cq && priv->tx_cq[i])
1858 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1876 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1859 } 1877 }
1860 1878
1861 for (i = 0; i < priv->rx_ring_num; i++) { 1879 for (i = 0; i < priv->rx_ring_num; i++) {
1862 if (priv->rx_ring[i].rx_info) 1880 if (priv->rx_ring[i])
1863 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1881 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1864 priv->prof->rx_ring_size, priv->stride); 1882 priv->prof->rx_ring_size, priv->stride);
1865 if (priv->rx_cq[i].buf) 1883 if (priv->rx_cq[i])
1866 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1884 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1867 } 1885 }
1868 1886
@@ -1877,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1877 struct mlx4_en_port_profile *prof = priv->prof; 1895 struct mlx4_en_port_profile *prof = priv->prof;
1878 int i; 1896 int i;
1879 int err; 1897 int err;
1898 int node;
1880 1899
1881 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1900 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1882 if (err) { 1901 if (err) {
@@ -1886,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1886 1905
1887 /* Create tx Rings */ 1906 /* Create tx Rings */
1888 for (i = 0; i < priv->tx_ring_num; i++) { 1907 for (i = 0; i < priv->tx_ring_num; i++) {
1908 node = cpu_to_node(i % num_online_cpus());
1889 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1909 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1890 prof->tx_ring_size, i, TX)) 1910 prof->tx_ring_size, i, TX, node))
1891 goto err; 1911 goto err;
1892 1912
1893 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1913 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
1894 prof->tx_ring_size, TXBB_SIZE)) 1914 prof->tx_ring_size, TXBB_SIZE, node))
1895 goto err; 1915 goto err;
1896 } 1916 }
1897 1917
1898 /* Create rx Rings */ 1918 /* Create rx Rings */
1899 for (i = 0; i < priv->rx_ring_num; i++) { 1919 for (i = 0; i < priv->rx_ring_num; i++) {
1920 node = cpu_to_node(i % num_online_cpus());
1900 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1921 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1901 prof->rx_ring_size, i, RX)) 1922 prof->rx_ring_size, i, RX, node))
1902 goto err; 1923 goto err;
1903 1924
1904 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1925 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1905 prof->rx_ring_size, priv->stride)) 1926 prof->rx_ring_size, priv->stride,
1927 node))
1906 goto err; 1928 goto err;
1907 } 1929 }
1908 1930
@@ -1918,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1918 1940
1919err: 1941err:
1920 en_err(priv, "Failed to allocate NIC resources\n"); 1942 en_err(priv, "Failed to allocate NIC resources\n");
1943 for (i = 0; i < priv->rx_ring_num; i++) {
1944 if (priv->rx_ring[i])
1945 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1946 prof->rx_ring_size,
1947 priv->stride);
1948 if (priv->rx_cq[i])
1949 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1950 }
1951 for (i = 0; i < priv->tx_ring_num; i++) {
1952 if (priv->tx_ring[i])
1953 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1954 if (priv->tx_cq[i])
1955 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1956 }
1921 return -ENOMEM; 1957 return -ENOMEM;
1922} 1958}
1923 1959
@@ -2211,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2211 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2247 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2212 priv->tx_ring_num = prof->tx_ring_num; 2248 priv->tx_ring_num = prof->tx_ring_num;
2213 2249
2214 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, 2250 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2215 GFP_KERNEL); 2251 GFP_KERNEL);
2216 if (!priv->tx_ring) { 2252 if (!priv->tx_ring) {
2217 err = -ENOMEM; 2253 err = -ENOMEM;
2218 goto out; 2254 goto out;
2219 } 2255 }
2220 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, 2256 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2221 GFP_KERNEL); 2257 GFP_KERNEL);
2222 if (!priv->tx_cq) { 2258 if (!priv->tx_cq) {
2223 err = -ENOMEM; 2259 err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 331791467a22..dae1a1f4ae55 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
56 return PTR_ERR(mailbox); 56 return PTR_ERR(mailbox);
57 57
58 filter = mailbox->buf; 58 filter = mailbox->buf;
59 memset(filter, 0, sizeof(*filter));
60 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { 59 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
61 entry = 0; 60 entry = 0;
62 for (j = 0; j < 32; j++) 61 for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
81 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 80 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
82 if (IS_ERR(mailbox)) 81 if (IS_ERR(mailbox))
83 return PTR_ERR(mailbox); 82 return PTR_ERR(mailbox);
84 memset(mailbox->buf, 0, sizeof(*qport_context));
85 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, 83 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
86 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 84 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
87 MLX4_CMD_WRAPPED); 85 MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
127 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 125 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
128 if (IS_ERR(mailbox)) 126 if (IS_ERR(mailbox))
129 return PTR_ERR(mailbox); 127 return PTR_ERR(mailbox);
130 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
131 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 128 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
132 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 129 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
133 MLX4_CMD_WRAPPED); 130 MLX4_CMD_WRAPPED);
@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
143 priv->port_stats.rx_chksum_good = 0; 140 priv->port_stats.rx_chksum_good = 0;
144 priv->port_stats.rx_chksum_none = 0; 141 priv->port_stats.rx_chksum_none = 0;
145 for (i = 0; i < priv->rx_ring_num; i++) { 142 for (i = 0; i < priv->rx_ring_num; i++) {
146 stats->rx_packets += priv->rx_ring[i].packets; 143 stats->rx_packets += priv->rx_ring[i]->packets;
147 stats->rx_bytes += priv->rx_ring[i].bytes; 144 stats->rx_bytes += priv->rx_ring[i]->bytes;
148 priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; 145 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
149 priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; 146 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
150 } 147 }
151 stats->tx_packets = 0; 148 stats->tx_packets = 0;
152 stats->tx_bytes = 0; 149 stats->tx_bytes = 0;
153 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
154 for (i = 0; i < priv->tx_ring_num; i++) { 151 for (i = 0; i < priv->tx_ring_num; i++) {
155 stats->tx_packets += priv->tx_ring[i].packets; 152 stats->tx_packets += priv->tx_ring[i]->packets;
156 stats->tx_bytes += priv->tx_ring[i].bytes; 153 stats->tx_bytes += priv->tx_ring[i]->bytes;
157 priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; 154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
158 } 155 }
159 156
160 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa69c86..07a1d0fbae47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
264 264
265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
267 ring = &priv->rx_ring[ring_ind]; 267 ring = priv->rx_ring[ring_ind];
268 268
269 if (mlx4_en_prepare_rx_desc(priv, ring, 269 if (mlx4_en_prepare_rx_desc(priv, ring,
270 ring->actual_size, 270 ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
289 289
290reduce_rings: 290reduce_rings:
291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
292 ring = &priv->rx_ring[ring_ind]; 292 ring = priv->rx_ring[ring_ind];
293 while (ring->actual_size > new_size) { 293 while (ring->actual_size > new_size) {
294 ring->actual_size--; 294 ring->actual_size--;
295 ring->prod--; 295 ring->prod--;
@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
319} 319}
320 320
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 322 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride, int node)
323{ 324{
324 struct mlx4_en_dev *mdev = priv->mdev; 325 struct mlx4_en_dev *mdev = priv->mdev;
326 struct mlx4_en_rx_ring *ring;
325 int err = -ENOMEM; 327 int err = -ENOMEM;
326 int tmp; 328 int tmp;
327 329
330 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
331 if (!ring) {
332 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
333 if (!ring) {
334 en_err(priv, "Failed to allocate RX ring structure\n");
335 return -ENOMEM;
336 }
337 }
338
328 ring->prod = 0; 339 ring->prod = 0;
329 ring->cons = 0; 340 ring->cons = 0;
330 ring->size = size; 341 ring->size = size;
@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 346
336 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 347 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
337 sizeof(struct mlx4_en_rx_alloc)); 348 sizeof(struct mlx4_en_rx_alloc));
338 ring->rx_info = vmalloc(tmp); 349 ring->rx_info = vmalloc_node(tmp, node);
339 if (!ring->rx_info) 350 if (!ring->rx_info) {
340 return -ENOMEM; 351 ring->rx_info = vmalloc(tmp);
352 if (!ring->rx_info) {
353 err = -ENOMEM;
354 goto err_ring;
355 }
356 }
341 357
342 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 358 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
343 ring->rx_info, tmp); 359 ring->rx_info, tmp);
344 360
361 /* Allocate HW buffers on provided NUMA node */
362 set_dev_node(&mdev->dev->pdev->dev, node);
345 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
346 ring->buf_size, 2 * PAGE_SIZE); 364 ring->buf_size, 2 * PAGE_SIZE);
365 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
347 if (err) 366 if (err)
348 goto err_ring; 367 goto err_info;
349 368
350 err = mlx4_en_map_buffer(&ring->wqres.buf); 369 err = mlx4_en_map_buffer(&ring->wqres.buf);
351 if (err) { 370 if (err) {
@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
356 375
357 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; 376 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
358 377
378 *pring = ring;
359 return 0; 379 return 0;
360 380
361err_hwq: 381err_hwq:
362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 382 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
363err_ring: 383err_info:
364 vfree(ring->rx_info); 384 vfree(ring->rx_info);
365 ring->rx_info = NULL; 385 ring->rx_info = NULL;
386err_ring:
387 kfree(ring);
388 *pring = NULL;
389
366 return err; 390 return err;
367} 391}
368 392
@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
376 DS_SIZE * priv->num_frags); 400 DS_SIZE * priv->num_frags);
377 401
378 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 402 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
379 ring = &priv->rx_ring[ring_ind]; 403 ring = priv->rx_ring[ring_ind];
380 404
381 ring->prod = 0; 405 ring->prod = 0;
382 ring->cons = 0; 406 ring->cons = 0;
383 ring->actual_size = 0; 407 ring->actual_size = 0;
384 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; 408 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
385 409
386 ring->stride = stride; 410 ring->stride = stride;
387 if (ring->stride <= TXBB_SIZE) 411 if (ring->stride <= TXBB_SIZE)
@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
412 goto err_buffers; 436 goto err_buffers;
413 437
414 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 438 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
415 ring = &priv->rx_ring[ring_ind]; 439 ring = priv->rx_ring[ring_ind];
416 440
417 ring->size_mask = ring->actual_size - 1; 441 ring->size_mask = ring->actual_size - 1;
418 mlx4_en_update_rx_prod_db(ring); 442 mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
422 446
423err_buffers: 447err_buffers:
424 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 448 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
425 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); 449 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
426 450
427 ring_ind = priv->rx_ring_num - 1; 451 ring_ind = priv->rx_ring_num - 1;
428err_allocator: 452err_allocator:
429 while (ring_ind >= 0) { 453 while (ring_ind >= 0) {
430 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) 454 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
431 priv->rx_ring[ring_ind].buf -= TXBB_SIZE; 455 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
432 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 456 mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
433 ring_ind--; 457 ring_ind--;
434 } 458 }
435 return err; 459 return err;
436} 460}
437 461
438void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 462void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
439 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 463 struct mlx4_en_rx_ring **pring,
464 u32 size, u16 stride)
440{ 465{
441 struct mlx4_en_dev *mdev = priv->mdev; 466 struct mlx4_en_dev *mdev = priv->mdev;
467 struct mlx4_en_rx_ring *ring = *pring;
442 468
443 mlx4_en_unmap_buffer(&ring->wqres.buf); 469 mlx4_en_unmap_buffer(&ring->wqres.buf);
444 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 470 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
445 vfree(ring->rx_info); 471 vfree(ring->rx_info);
446 ring->rx_info = NULL; 472 ring->rx_info = NULL;
473 kfree(ring);
474 *pring = NULL;
447#ifdef CONFIG_RFS_ACCEL 475#ifdef CONFIG_RFS_ACCEL
448 mlx4_en_cleanup_filters(priv, ring); 476 mlx4_en_cleanup_filters(priv);
449#endif 477#endif
450} 478}
451 479
@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
592 struct mlx4_en_priv *priv = netdev_priv(dev); 620 struct mlx4_en_priv *priv = netdev_priv(dev);
593 struct mlx4_en_dev *mdev = priv->mdev; 621 struct mlx4_en_dev *mdev = priv->mdev;
594 struct mlx4_cqe *cqe; 622 struct mlx4_cqe *cqe;
595 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 623 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
596 struct mlx4_en_rx_alloc *frags; 624 struct mlx4_en_rx_alloc *frags;
597 struct mlx4_en_rx_desc *rx_desc; 625 struct mlx4_en_rx_desc *rx_desc;
598 struct sk_buff *skb; 626 struct sk_buff *skb;
@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
991 1019
992 for (i = 0; i < priv->rx_ring_num; i++) { 1020 for (i = 0; i < priv->rx_ring_num; i++) {
993 qpn = rss_map->base_qpn + i; 1021 qpn = rss_map->base_qpn + i;
994 err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], 1022 err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
995 &rss_map->state[i], 1023 &rss_map->state[i],
996 &rss_map->qps[i]); 1024 &rss_map->qps[i]);
997 if (err) 1025 if (err)
@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1008 } 1036 }
1009 rss_map->indir_qp.event = mlx4_en_sqp_event; 1037 rss_map->indir_qp.event = mlx4_en_sqp_event;
1010 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 1038 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1011 priv->rx_ring[0].cqn, -1, &context); 1039 priv->rx_ring[0]->cqn, -1, &context);
1012 1040
1013 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 1041 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1014 rss_rings = priv->rx_ring_num; 1042 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d669e6..40626690e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
156 * since we turned the carrier off */ 156 * since we turned the carrier off */
157 msleep(200); 157 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { 158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = &priv->tx_ring[i]; 159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) 160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx; 161 goto retry_tx;
162 } 162 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82d6ff1..f54ebd5a1702 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444);
54MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 54MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
55 55
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
57 struct mlx4_en_tx_ring *ring, int qpn, u32 size, 57 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
58 u16 stride) 58 u16 stride, int node)
59{ 59{
60 struct mlx4_en_dev *mdev = priv->mdev; 60 struct mlx4_en_dev *mdev = priv->mdev;
61 struct mlx4_en_tx_ring *ring;
61 int tmp; 62 int tmp;
62 int err; 63 int err;
63 64
65 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
66 if (!ring) {
67 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
68 if (!ring) {
69 en_err(priv, "Failed allocating TX ring\n");
70 return -ENOMEM;
71 }
72 }
73
64 ring->size = size; 74 ring->size = size;
65 ring->size_mask = size - 1; 75 ring->size_mask = size - 1;
66 ring->stride = stride; 76 ring->stride = stride;
@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 inline_thold = min(inline_thold, MAX_INLINE); 78 inline_thold = min(inline_thold, MAX_INLINE);
69 79
70 tmp = size * sizeof(struct mlx4_en_tx_info); 80 tmp = size * sizeof(struct mlx4_en_tx_info);
71 ring->tx_info = vmalloc(tmp); 81 ring->tx_info = vmalloc_node(tmp, node);
72 if (!ring->tx_info) 82 if (!ring->tx_info) {
73 return -ENOMEM; 83 ring->tx_info = vmalloc(tmp);
84 if (!ring->tx_info) {
85 err = -ENOMEM;
86 goto err_ring;
87 }
88 }
74 89
75 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 90 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
76 ring->tx_info, tmp); 91 ring->tx_info, tmp);
77 92
78 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 93 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
79 if (!ring->bounce_buf) { 94 if (!ring->bounce_buf) {
80 err = -ENOMEM; 95 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
81 goto err_tx; 96 if (!ring->bounce_buf) {
97 err = -ENOMEM;
98 goto err_info;
99 }
82 } 100 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 101 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84 102
103 /* Allocate HW buffers on provided NUMA node */
104 set_dev_node(&mdev->dev->pdev->dev, node);
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 105 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 106 2 * PAGE_SIZE);
107 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
87 if (err) { 108 if (err) {
88 en_err(priv, "Failed allocating hwq resources\n"); 109 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 110 goto err_bounce;
@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
109 } 130 }
110 ring->qp.event = mlx4_en_sqp_event; 131 ring->qp.event = mlx4_en_sqp_event;
111 132
112 err = mlx4_bf_alloc(mdev->dev, &ring->bf); 133 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
113 if (err) { 134 if (err) {
114 en_dbg(DRV, priv, "working without blueflame (%d)", err); 135 en_dbg(DRV, priv, "working without blueflame (%d)", err);
115 ring->bf.uar = &mdev->priv_uar; 136 ring->bf.uar = &mdev->priv_uar;
@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
120 141
121 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; 142 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
122 143
144 *pring = ring;
123 return 0; 145 return 0;
124 146
125err_map: 147err_map:
@@ -129,16 +151,20 @@ err_hwq_res:
129err_bounce: 151err_bounce:
130 kfree(ring->bounce_buf); 152 kfree(ring->bounce_buf);
131 ring->bounce_buf = NULL; 153 ring->bounce_buf = NULL;
132err_tx: 154err_info:
133 vfree(ring->tx_info); 155 vfree(ring->tx_info);
134 ring->tx_info = NULL; 156 ring->tx_info = NULL;
157err_ring:
158 kfree(ring);
159 *pring = NULL;
135 return err; 160 return err;
136} 161}
137 162
138void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 163void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_tx_ring *ring) 164 struct mlx4_en_tx_ring **pring)
140{ 165{
141 struct mlx4_en_dev *mdev = priv->mdev; 166 struct mlx4_en_dev *mdev = priv->mdev;
167 struct mlx4_en_tx_ring *ring = *pring;
142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 168 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
143 169
144 if (ring->bf_enabled) 170 if (ring->bf_enabled)
@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
151 ring->bounce_buf = NULL; 177 ring->bounce_buf = NULL;
152 vfree(ring->tx_info); 178 vfree(ring->tx_info);
153 ring->tx_info = NULL; 179 ring->tx_info = NULL;
180 kfree(ring);
181 *pring = NULL;
154} 182}
155 183
156int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 184int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
330{ 358{
331 struct mlx4_en_priv *priv = netdev_priv(dev); 359 struct mlx4_en_priv *priv = netdev_priv(dev);
332 struct mlx4_cq *mcq = &cq->mcq; 360 struct mlx4_cq *mcq = &cq->mcq;
333 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 361 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
334 struct mlx4_cqe *cqe; 362 struct mlx4_cqe *cqe;
335 u16 index; 363 u16 index;
336 u16 new_index, ring_index, stamp_index; 364 u16 new_index, ring_index, stamp_index;
@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
622 } 650 }
623 651
624 tx_ind = skb->queue_mapping; 652 tx_ind = skb->queue_mapping;
625 ring = &priv->tx_ring[tx_ind]; 653 ring = priv->tx_ring[tx_ind];
626 if (vlan_tx_tag_present(skb)) 654 if (vlan_tx_tag_present(skb))
627 vlan_tag = vlan_tx_tag_get(skb); 655 vlan_tag = vlan_tx_tag_get(skb);
628 656
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b3b35c..c9cdb2a2c596 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
936 if (err) 936 if (err)
937 goto err_out_free_mtt; 937 goto err_out_free_mtt;
938 938
939 memset(eq_context, 0, sizeof *eq_context);
940 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 939 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
941 MLX4_EQ_STATE_ARMED); 940 MLX4_EQ_STATE_ARMED);
942 eq_context->log_eq_size = ilog2(eq->nent); 941 eq_context->log_eq_size = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa2f422..fda26679f7d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
159 return PTR_ERR(mailbox); 159 return PTR_ERR(mailbox);
160 inbox = mailbox->buf; 160 inbox = mailbox->buf;
161 161
162 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
163
164 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 162 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
165 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 163 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
166 164
@@ -177,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
177 struct mlx4_cmd_mailbox *outbox, 175 struct mlx4_cmd_mailbox *outbox,
178 struct mlx4_cmd_info *cmd) 176 struct mlx4_cmd_info *cmd)
179{ 177{
178 struct mlx4_priv *priv = mlx4_priv(dev);
180 u8 field; 179 u8 field;
181 u32 size; 180 u32 size;
182 int err = 0; 181 int err = 0;
@@ -185,18 +184,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
185#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 184#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
186#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 185#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
187#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 186#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
188#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 187#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
189#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 188#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
190#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 189#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
191#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 190#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
192#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 191#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
193#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 192#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
194#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 193#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
195#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 194#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
196 195
196#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
197#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
198#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
199#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
200#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
201#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
202
197#define QUERY_FUNC_CAP_FMR_FLAG 0x80 203#define QUERY_FUNC_CAP_FMR_FLAG 0x80
198#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 204#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
199#define QUERY_FUNC_CAP_FLAG_ETH 0x80 205#define QUERY_FUNC_CAP_FLAG_ETH 0x80
206#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
200 207
201/* when opcode modifier = 1 */ 208/* when opcode modifier = 1 */
202#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 209#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -237,8 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
237 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); 244 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
238 245
239 } else if (vhcr->op_modifier == 0) { 246 } else if (vhcr->op_modifier == 0) {
240 /* enable rdma and ethernet interfaces */ 247 /* enable rdma and ethernet interfaces, and new quota locations */
241 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); 248 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
249 QUERY_FUNC_CAP_FLAG_QUOTAS);
242 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 250 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
243 251
244 field = dev->caps.num_ports; 252 field = dev->caps.num_ports;
@@ -250,14 +258,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
250 field = 0; /* protected FMR support not available as yet */ 258 field = 0; /* protected FMR support not available as yet */
251 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 259 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
252 260
253 size = dev->caps.num_qps; 261 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
254 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 262 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
263 size = dev->caps.num_qps;
264 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
255 265
256 size = dev->caps.num_srqs; 266 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
257 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 267 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
268 size = dev->caps.num_srqs;
269 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
258 270
259 size = dev->caps.num_cqs; 271 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
260 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 272 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
273 size = dev->caps.num_cqs;
274 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
261 275
262 size = dev->caps.num_eqs; 276 size = dev->caps.num_eqs;
263 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 277 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@@ -265,14 +279,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
265 size = dev->caps.reserved_eqs; 279 size = dev->caps.reserved_eqs;
266 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 280 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
267 281
268 size = dev->caps.num_mpts; 282 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
269 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 283 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
284 size = dev->caps.num_mpts;
285 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
270 286
271 size = dev->caps.num_mtts; 287 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
272 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 288 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
289 size = dev->caps.num_mtts;
290 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
273 291
274 size = dev->caps.num_mgms + dev->caps.num_amgms; 292 size = dev->caps.num_mgms + dev->caps.num_amgms;
275 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 293 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
294 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
276 295
277 } else 296 } else
278 err = -EINVAL; 297 err = -EINVAL;
@@ -287,7 +306,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
287 u32 *outbox; 306 u32 *outbox;
288 u8 field, op_modifier; 307 u8 field, op_modifier;
289 u32 size; 308 u32 size;
290 int err = 0; 309 int err = 0, quotas = 0;
291 310
292 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 311 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
293 312
@@ -311,6 +330,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
311 goto out; 330 goto out;
312 } 331 }
313 func_cap->flags = field; 332 func_cap->flags = field;
333 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
314 334
315 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 335 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
316 func_cap->num_ports = field; 336 func_cap->num_ports = field;
@@ -318,29 +338,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
318 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 338 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
319 func_cap->pf_context_behaviour = size; 339 func_cap->pf_context_behaviour = size;
320 340
321 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 341 if (quotas) {
322 func_cap->qp_quota = size & 0xFFFFFF; 342 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
343 func_cap->qp_quota = size & 0xFFFFFF;
323 344
324 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 345 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
325 func_cap->srq_quota = size & 0xFFFFFF; 346 func_cap->srq_quota = size & 0xFFFFFF;
326 347
327 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 348 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
328 func_cap->cq_quota = size & 0xFFFFFF; 349 func_cap->cq_quota = size & 0xFFFFFF;
329 350
351 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
352 func_cap->mpt_quota = size & 0xFFFFFF;
353
354 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
355 func_cap->mtt_quota = size & 0xFFFFFF;
356
357 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
358 func_cap->mcg_quota = size & 0xFFFFFF;
359
360 } else {
361 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
362 func_cap->qp_quota = size & 0xFFFFFF;
363
364 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
365 func_cap->srq_quota = size & 0xFFFFFF;
366
367 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
368 func_cap->cq_quota = size & 0xFFFFFF;
369
370 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
371 func_cap->mpt_quota = size & 0xFFFFFF;
372
373 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
374 func_cap->mtt_quota = size & 0xFFFFFF;
375
376 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
377 func_cap->mcg_quota = size & 0xFFFFFF;
378 }
330 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 379 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
331 func_cap->max_eq = size & 0xFFFFFF; 380 func_cap->max_eq = size & 0xFFFFFF;
332 381
333 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 382 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
334 func_cap->reserved_eq = size & 0xFFFFFF; 383 func_cap->reserved_eq = size & 0xFFFFFF;
335 384
336 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
337 func_cap->mpt_quota = size & 0xFFFFFF;
338
339 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
340 func_cap->mtt_quota = size & 0xFFFFFF;
341
342 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
343 func_cap->mcg_quota = size & 0xFFFFFF;
344 goto out; 385 goto out;
345 } 386 }
346 387
@@ -652,7 +693,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
652 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 693 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
653 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 694 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
654 if (field & 1<<6) 695 if (field & 1<<6)
655 dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN; 696 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
656 MLX4_GET(dev_cap->max_icm_sz, outbox, 697 MLX4_GET(dev_cap->max_icm_sz, outbox,
657 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 698 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
658 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 699 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -924,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
924 mailbox = mlx4_alloc_cmd_mailbox(dev); 965 mailbox = mlx4_alloc_cmd_mailbox(dev);
925 if (IS_ERR(mailbox)) 966 if (IS_ERR(mailbox))
926 return PTR_ERR(mailbox); 967 return PTR_ERR(mailbox);
927 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
928 pages = mailbox->buf; 968 pages = mailbox->buf;
929 969
930 for (mlx4_icm_first(icm, &iter); 970 for (mlx4_icm_first(icm, &iter);
@@ -1273,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1273 return PTR_ERR(mailbox); 1313 return PTR_ERR(mailbox);
1274 inbox = mailbox->buf; 1314 inbox = mailbox->buf;
1275 1315
1276 memset(inbox, 0, INIT_HCA_IN_SIZE);
1277
1278 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1316 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1279 1317
1280 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1318 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1573,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1573 return PTR_ERR(mailbox); 1611 return PTR_ERR(mailbox);
1574 inbox = mailbox->buf; 1612 inbox = mailbox->buf;
1575 1613
1576 memset(inbox, 0, INIT_PORT_IN_SIZE);
1577
1578 flags = 0; 1614 flags = 0;
1579 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 1615 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1580 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 1616 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
@@ -1713,7 +1749,6 @@ void mlx4_opreq_action(struct work_struct *work)
1713 u32 *outbox; 1749 u32 *outbox;
1714 u32 modifier; 1750 u32 modifier;
1715 u16 token; 1751 u16 token;
1716 u16 type_m;
1717 u16 type; 1752 u16 type;
1718 int err; 1753 int err;
1719 u32 num_qps; 1754 u32 num_qps;
@@ -1746,7 +1781,6 @@ void mlx4_opreq_action(struct work_struct *work)
1746 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 1781 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1747 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 1782 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1748 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 1783 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1749 type_m = type >> 12;
1750 type &= 0xfff; 1784 type &= 0xfff;
1751 1785
1752 switch (type) { 1786 switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d02649be41..5fbf4924c272 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
93 kfree(icm); 93 kfree(icm);
94} 94}
95 95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
97 gfp_t gfp_mask, int node)
97{ 98{
98 struct page *page; 99 struct page *page;
99 100
100 page = alloc_pages(gfp_mask, order); 101 page = alloc_pages_node(node, gfp_mask, order);
101 if (!page) 102 if (!page) {
102 return -ENOMEM; 103 page = alloc_pages(gfp_mask, order);
104 if (!page)
105 return -ENOMEM;
106 }
103 107
104 sg_set_page(mem, page, PAGE_SIZE << order, 0); 108 sg_set_page(mem, page, PAGE_SIZE << order, 0);
105 return 0; 109 return 0;
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
130 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 134 /* We use sg_set_buf for coherent allocs, which assumes low memory */
131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 135 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
132 136
133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 137 icm = kmalloc_node(sizeof(*icm),
134 if (!icm) 138 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
135 return NULL; 139 dev->numa_node);
140 if (!icm) {
141 icm = kmalloc(sizeof(*icm),
142 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
143 if (!icm)
144 return NULL;
145 }
136 146
137 icm->refcount = 0; 147 icm->refcount = 0;
138 INIT_LIST_HEAD(&icm->chunk_list); 148 INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
141 151
142 while (npages > 0) { 152 while (npages > 0) {
143 if (!chunk) { 153 if (!chunk) {
144 chunk = kmalloc(sizeof *chunk, 154 chunk = kmalloc_node(sizeof(*chunk),
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 155 gfp_mask & ~(__GFP_HIGHMEM |
146 if (!chunk) 156 __GFP_NOWARN),
147 goto fail; 157 dev->numa_node);
158 if (!chunk) {
159 chunk = kmalloc(sizeof(*chunk),
160 gfp_mask & ~(__GFP_HIGHMEM |
161 __GFP_NOWARN));
162 if (!chunk)
163 goto fail;
164 }
148 165
149 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 166 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
150 chunk->npages = 0; 167 chunk->npages = 0;
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
161 cur_order, gfp_mask); 178 cur_order, gfp_mask);
162 else 179 else
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 180 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask); 181 cur_order, gfp_mask,
182 dev->numa_node);
165 183
166 if (ret) { 184 if (ret) {
167 if (--cur_order < 0) 185 if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f103fc..5789ea2c934d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h> 44#include <linux/netdevice.h>
45#include <linux/kmod.h>
45 46
46#include <linux/mlx4/device.h> 47#include <linux/mlx4/device.h>
47#include <linux/mlx4/doorbell.h> 48#include <linux/mlx4/doorbell.h>
@@ -561,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
561 } 562 }
562 563
563 dev->caps.num_ports = func_cap.num_ports; 564 dev->caps.num_ports = func_cap.num_ports;
564 dev->caps.num_qps = func_cap.qp_quota; 565 dev->quotas.qp = func_cap.qp_quota;
565 dev->caps.num_srqs = func_cap.srq_quota; 566 dev->quotas.srq = func_cap.srq_quota;
566 dev->caps.num_cqs = func_cap.cq_quota; 567 dev->quotas.cq = func_cap.cq_quota;
567 dev->caps.num_eqs = func_cap.max_eq; 568 dev->quotas.mpt = func_cap.mpt_quota;
568 dev->caps.reserved_eqs = func_cap.reserved_eq; 569 dev->quotas.mtt = func_cap.mtt_quota;
569 dev->caps.num_mpts = func_cap.mpt_quota; 570 dev->caps.num_qps = 1 << hca_param.log_num_qps;
570 dev->caps.num_mtts = func_cap.mtt_quota; 571 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
572 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
573 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
574 dev->caps.num_eqs = func_cap.max_eq;
575 dev->caps.reserved_eqs = func_cap.reserved_eq;
571 dev->caps.num_pds = MLX4_NUM_PDS; 576 dev->caps.num_pds = MLX4_NUM_PDS;
572 dev->caps.num_mgms = 0; 577 dev->caps.num_mgms = 0;
573 dev->caps.num_amgms = 0; 578 dev->caps.num_amgms = 0;
@@ -650,6 +655,27 @@ err_mem:
650 return err; 655 return err;
651} 656}
652 657
658static void mlx4_request_modules(struct mlx4_dev *dev)
659{
660 int port;
661 int has_ib_port = false;
662 int has_eth_port = false;
663#define EN_DRV_NAME "mlx4_en"
664#define IB_DRV_NAME "mlx4_ib"
665
666 for (port = 1; port <= dev->caps.num_ports; port++) {
667 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
668 has_ib_port = true;
669 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
670 has_eth_port = true;
671 }
672
673 if (has_ib_port)
674 request_module_nowait(IB_DRV_NAME);
675 if (has_eth_port)
676 request_module_nowait(EN_DRV_NAME);
677}
678
653/* 679/*
654 * Change the port configuration of the device. 680 * Change the port configuration of the device.
655 * Every user of this function must hold the port mutex. 681 * Every user of this function must hold the port mutex.
@@ -681,6 +707,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
681 } 707 }
682 mlx4_set_port_mask(dev); 708 mlx4_set_port_mask(dev);
683 err = mlx4_register_device(dev); 709 err = mlx4_register_device(dev);
710 if (err) {
711 mlx4_err(dev, "Failed to register device\n");
712 goto out;
713 }
714 mlx4_request_modules(dev);
684 } 715 }
685 716
686out: 717out:
@@ -2075,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2075 "aborting.\n"); 2106 "aborting.\n");
2076 return err; 2107 return err;
2077 } 2108 }
2078 if (num_vfs > MLX4_MAX_NUM_VF) { 2109
2079 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 2110 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2080 num_vfs, MLX4_MAX_NUM_VF); 2111 * per port, we must limit the number of VFs to 63 (since their are
2112 * 128 MACs)
2113 */
2114 if (num_vfs >= MLX4_MAX_NUM_VF) {
2115 dev_err(&pdev->dev,
2116 "Requested more VF's (%d) than allowed (%d)\n",
2117 num_vfs, MLX4_MAX_NUM_VF - 1);
2081 return -EINVAL; 2118 return -EINVAL;
2082 } 2119 }
2083 2120
@@ -2154,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2154 mutex_init(&priv->bf_mutex); 2191 mutex_init(&priv->bf_mutex);
2155 2192
2156 dev->rev_id = pdev->revision; 2193 dev->rev_id = pdev->revision;
2194 dev->numa_node = dev_to_node(&pdev->dev);
2157 /* Detect if this device is a virtual function */ 2195 /* Detect if this device is a virtual function */
2158 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2196 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2159 /* When acting as pf, we normally skip vfs unless explicitly 2197 /* When acting as pf, we normally skip vfs unless explicitly
@@ -2295,6 +2333,8 @@ slave_start:
2295 if (err) 2333 if (err)
2296 goto err_steer; 2334 goto err_steer;
2297 2335
2336 mlx4_init_quotas(dev);
2337
2298 for (port = 1; port <= dev->caps.num_ports; port++) { 2338 for (port = 1; port <= dev->caps.num_ports; port++) {
2299 err = mlx4_init_port_info(dev, port); 2339 err = mlx4_init_port_info(dev, port);
2300 if (err) 2340 if (err)
@@ -2305,6 +2345,8 @@ slave_start:
2305 if (err) 2345 if (err)
2306 goto err_port; 2346 goto err_port;
2307 2347
2348 mlx4_request_modules(dev);
2349
2308 mlx4_sense_init(dev); 2350 mlx4_sense_init(dev);
2309 mlx4_start_sense(dev); 2351 mlx4_start_sense(dev);
2310 2352
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245efb6c..acf9d5f1f922 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
506 goto out_list; 506 goto out_list;
507 } 507 }
508 mgm = mailbox->buf; 508 mgm = mailbox->buf;
509 memset(mgm, 0, sizeof *mgm);
510 members_count = 0; 509 members_count = 0;
511 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 510 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
512 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 511 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -645,7 +644,7 @@ static const u8 __promisc_mode[] = {
645int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 644int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
646 enum mlx4_net_trans_promisc_mode flow_type) 645 enum mlx4_net_trans_promisc_mode flow_type)
647{ 646{
648 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 647 if (flow_type >= MLX4_FS_MODE_NUM) {
649 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 648 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
650 return -EINVAL; 649 return -EINVAL;
651 } 650 }
@@ -681,7 +680,7 @@ const u16 __sw_id_hw[] = {
681int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 680int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
682 enum mlx4_net_trans_rule_id id) 681 enum mlx4_net_trans_rule_id id)
683{ 682{
684 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 683 if (id >= MLX4_NET_TRANS_RULE_NUM) {
685 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 684 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
686 return -EINVAL; 685 return -EINVAL;
687 } 686 }
@@ -706,7 +705,7 @@ static const int __rule_hw_sz[] = {
706int mlx4_hw_rule_sz(struct mlx4_dev *dev, 705int mlx4_hw_rule_sz(struct mlx4_dev *dev,
707 enum mlx4_net_trans_rule_id id) 706 enum mlx4_net_trans_rule_id id)
708{ 707{
709 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 708 if (id >= MLX4_NET_TRANS_RULE_NUM) {
710 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 709 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
711 return -EINVAL; 710 return -EINVAL;
712 } 711 }
@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
857 if (IS_ERR(mailbox)) 856 if (IS_ERR(mailbox))
858 return PTR_ERR(mailbox); 857 return PTR_ERR(mailbox);
859 858
860 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
861 trans_rule_ctrl_to_hw(rule, mailbox->buf); 859 trans_rule_ctrl_to_hw(rule, mailbox->buf);
862 860
863 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 861 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 348bb8c7d9a7..e582a41a802b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -455,6 +455,7 @@ struct mlx4_slave_state {
455 u8 last_cmd; 455 u8 last_cmd;
456 u8 init_port_mask; 456 u8 init_port_mask;
457 bool active; 457 bool active;
458 bool old_vlan_api;
458 u8 function; 459 u8 function;
459 dma_addr_t vhcr_dma; 460 dma_addr_t vhcr_dma;
460 u16 mtu[MLX4_MAX_PORTS + 1]; 461 u16 mtu[MLX4_MAX_PORTS + 1];
@@ -503,12 +504,28 @@ struct slave_list {
503 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 504 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
504}; 505};
505 506
507struct resource_allocator {
508 spinlock_t alloc_lock; /* protect quotas */
509 union {
510 int res_reserved;
511 int res_port_rsvd[MLX4_MAX_PORTS];
512 };
513 union {
514 int res_free;
515 int res_port_free[MLX4_MAX_PORTS];
516 };
517 int *quota;
518 int *allocated;
519 int *guaranteed;
520};
521
506struct mlx4_resource_tracker { 522struct mlx4_resource_tracker {
507 spinlock_t lock; 523 spinlock_t lock;
508 /* tree for each resources */ 524 /* tree for each resources */
509 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 525 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
510 /* num_of_slave's lists, one per slave */ 526 /* num_of_slave's lists, one per slave */
511 struct slave_list *slave_list; 527 struct slave_list *slave_list;
528 struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
512}; 529};
513 530
514#define SLAVE_EVENT_EQ_SIZE 128 531#define SLAVE_EVENT_EQ_SIZE 128
@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1111 1128
1112void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1129void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1113void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1130void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1114void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 1131void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1115int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1132int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1116 1133
1117int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); 1134int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
1252 1269
1253void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); 1270void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1254 1271
1272void mlx4_init_quotas(struct mlx4_dev *dev);
1273
1255#endif /* MLX4_H */ 1274#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e3610d27..f3758de59c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
530 u16 num_frags; 530 u16 num_frags;
531 u16 log_rx_info; 531 u16 log_rx_info;
532 532
533 struct mlx4_en_tx_ring *tx_ring; 533 struct mlx4_en_tx_ring **tx_ring;
534 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 534 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
535 struct mlx4_en_cq *tx_cq; 535 struct mlx4_en_cq **tx_cq;
536 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 536 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
537 struct mlx4_qp drop_qp; 537 struct mlx4_qp drop_qp;
538 struct work_struct rx_mode_task; 538 struct work_struct rx_mode_task;
539 struct work_struct watchdog_task; 539 struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
626 if ((cq->state & MLX4_CQ_LOCKED)) { 626 if ((cq->state & MLX4_CQ_LOCKED)) {
627 struct net_device *dev = cq->dev; 627 struct net_device *dev = cq->dev;
628 struct mlx4_en_priv *priv = netdev_priv(dev); 628 struct mlx4_en_priv *priv = netdev_priv(dev);
629 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 629 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
630 630
631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; 631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
632 rc = false; 632 rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
704void mlx4_en_free_resources(struct mlx4_en_priv *priv); 704void mlx4_en_free_resources(struct mlx4_en_priv *priv);
705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
706 706
707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
708 int entries, int ring, enum cq_type mode); 708 int entries, int ring, enum cq_type mode, int node);
709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
711 int cq_idx); 711 int cq_idx);
712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 719
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
721 int qpn, u32 size, u16 stride); 721 struct mlx4_en_tx_ring **pring,
722void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 722 int qpn, u32 size, u16 stride, int node);
723void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring **pring);
723int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 725int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring *ring, 726 struct mlx4_en_tx_ring *ring,
725 int cq, int user_prio); 727 int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
727 struct mlx4_en_tx_ring *ring); 729 struct mlx4_en_tx_ring *ring);
728 730
729int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 731int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
730 struct mlx4_en_rx_ring *ring, 732 struct mlx4_en_rx_ring **pring,
731 u32 size, u16 stride); 733 u32 size, u16 stride, int node);
732void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 734void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
733 struct mlx4_en_rx_ring *ring, 735 struct mlx4_en_rx_ring **pring,
734 u32 size, u16 stride); 736 u32 size, u16 stride);
735int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); 737int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
736void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 738void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
768int mlx4_en_setup_tc(struct net_device *dev, u8 up); 770int mlx4_en_setup_tc(struct net_device *dev, u8 up);
769 771
770#ifdef CONFIG_RFS_ACCEL 772#ifdef CONFIG_RFS_ACCEL
771void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 773void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
772 struct mlx4_en_rx_ring *rx_ring);
773#endif 774#endif
774 775
775#define MLX4_EN_NUM_SELF_TEST 5 776#define MLX4_EN_NUM_SELF_TEST 5
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index f91719a08cba..b3ee9bafff5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
480 goto err_table; 480 goto err_table;
481 } 481 }
482 mpt_entry = mailbox->buf; 482 mpt_entry = mailbox->buf;
483
484 memset(mpt_entry, 0, sizeof *mpt_entry);
485
486 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 483 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
487 MLX4_MPT_FLAG_REGION | 484 MLX4_MPT_FLAG_REGION |
488 mr->access); 485 mr->access);
@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
695 } 692 }
696 mpt_entry = mailbox->buf; 693 mpt_entry = mailbox->buf;
697 694
698 memset(mpt_entry, 0, sizeof(*mpt_entry));
699
700 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned 695 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
701 * off, thus creating a memory window and not a memory region. 696 * off, thus creating a memory window and not a memory region.
702 */ 697 */
@@ -755,14 +750,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
755 struct mlx4_mr_table *mr_table = &priv->mr_table; 750 struct mlx4_mr_table *mr_table = &priv->mr_table;
756 int err; 751 int err;
757 752
758 if (!is_power_of_2(dev->caps.num_mpts))
759 return -EINVAL;
760
761 /* Nothing to do for slaves - all MR handling is forwarded 753 /* Nothing to do for slaves - all MR handling is forwarded
762 * to the master */ 754 * to the master */
763 if (mlx4_is_slave(dev)) 755 if (mlx4_is_slave(dev))
764 return 0; 756 return 0;
765 757
758 if (!is_power_of_2(dev->caps.num_mpts))
759 return -EINVAL;
760
766 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 761 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
767 ~0, dev->caps.reserved_mrws, 0); 762 ~0, dev->caps.reserved_mrws, 0);
768 if (err) 763 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223acada7..84cfb40bf451 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
168} 168}
169EXPORT_SYMBOL_GPL(mlx4_uar_free); 169EXPORT_SYMBOL_GPL(mlx4_uar_free);
170 170
171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) 171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
172{ 172{
173 struct mlx4_priv *priv = mlx4_priv(dev); 173 struct mlx4_priv *priv = mlx4_priv(dev);
174 struct mlx4_uar *uar; 174 struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
186 err = -ENOMEM; 186 err = -ENOMEM;
187 goto out; 187 goto out;
188 } 188 }
189 uar = kmalloc(sizeof *uar, GFP_KERNEL); 189 uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
190 if (!uar) { 190 if (!uar) {
191 err = -ENOMEM; 191 uar = kmalloc(sizeof(*uar), GFP_KERNEL);
192 goto out; 192 if (!uar) {
193 err = -ENOMEM;
194 goto out;
195 }
193 } 196 }
194 err = mlx4_uar_alloc(dev, uar); 197 err = mlx4_uar_alloc(dev, uar);
195 if (err) 198 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 946e0af5faef..97d342fa5032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
178int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 178int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
179{ 179{
180 u64 out_param = 0; 180 u64 out_param = 0;
181 int err; 181 int err = -EINVAL;
182 182
183 if (mlx4_is_mfunc(dev)) { 183 if (mlx4_is_mfunc(dev)) {
184 set_param_l(&out_param, port); 184 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
185 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 185 err = mlx4_cmd_imm(dev, mac, &out_param,
186 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 186 ((u32) port) << 8 | (u32) RES_MAC,
187 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 187 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
188 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
189 }
190 if (err && err == -EINVAL && mlx4_is_slave(dev)) {
191 /* retry using old REG_MAC format */
192 set_param_l(&out_param, port);
193 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
194 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
195 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
196 if (!err)
197 dev->flags |= MLX4_FLAG_OLD_REG_MAC;
198 }
188 if (err) 199 if (err)
189 return err; 200 return err;
190 201
@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
231 u64 out_param = 0; 242 u64 out_param = 0;
232 243
233 if (mlx4_is_mfunc(dev)) { 244 if (mlx4_is_mfunc(dev)) {
234 set_param_l(&out_param, port); 245 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
235 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 246 (void) mlx4_cmd_imm(dev, mac, &out_param,
236 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 247 ((u32) port) << 8 | (u32) RES_MAC,
237 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 248 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
249 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
250 } else {
251 /* use old unregister mac format */
252 set_param_l(&out_param, port);
253 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
254 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
255 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
256 }
238 return; 257 return;
239 } 258 }
240 __mlx4_unregister_mac(dev, port, mac); 259 __mlx4_unregister_mac(dev, port, mac);
@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
284 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 303 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
285 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 304 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
286 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 305 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
287 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 306 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
288 307
289 mlx4_free_cmd_mailbox(dev, mailbox); 308 mlx4_free_cmd_mailbox(dev, mailbox);
290 309
@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
370 u64 out_param = 0; 389 u64 out_param = 0;
371 int err; 390 int err;
372 391
392 if (vlan > 4095)
393 return -EINVAL;
394
373 if (mlx4_is_mfunc(dev)) { 395 if (mlx4_is_mfunc(dev)) {
374 set_param_l(&out_param, port); 396 err = mlx4_cmd_imm(dev, vlan, &out_param,
375 err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, 397 ((u32) port) << 8 | (u32) RES_VLAN,
376 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 398 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
377 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 399 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
378 if (!err) 400 if (!err)
@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
384} 406}
385EXPORT_SYMBOL_GPL(mlx4_register_vlan); 407EXPORT_SYMBOL_GPL(mlx4_register_vlan);
386 408
387void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 409void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
388{ 410{
389 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 411 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
412 int index;
390 413
391 if (index < MLX4_VLAN_REGULAR) { 414 mutex_lock(&table->mutex);
392 mlx4_warn(dev, "Trying to free special vlan index %d\n", index); 415 if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
393 return; 416 mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
417 goto out;
394 } 418 }
395 419
396 mutex_lock(&table->mutex); 420 if (index < MLX4_VLAN_REGULAR) {
397 if (!table->refs[index]) { 421 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
398 mlx4_warn(dev, "No vlan entry for index %d\n", index);
399 goto out; 422 goto out;
400 } 423 }
424
401 if (--table->refs[index]) { 425 if (--table->refs[index]) {
402 mlx4_dbg(dev, "Have more references for index %d," 426 mlx4_dbg(dev, "Have %d more references for index %d,"
403 "no need to modify vlan table\n", index); 427 "no need to modify vlan table\n", table->refs[index],
428 index);
404 goto out; 429 goto out;
405 } 430 }
406 table->entries[index] = 0; 431 table->entries[index] = 0;
@@ -410,23 +435,19 @@ out:
410 mutex_unlock(&table->mutex); 435 mutex_unlock(&table->mutex);
411} 436}
412 437
413void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 438void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
414{ 439{
415 u64 in_param = 0; 440 u64 out_param = 0;
416 int err;
417 441
418 if (mlx4_is_mfunc(dev)) { 442 if (mlx4_is_mfunc(dev)) {
419 set_param_l(&in_param, port); 443 (void) mlx4_cmd_imm(dev, vlan, &out_param,
420 err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, 444 ((u32) port) << 8 | (u32) RES_VLAN,
421 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 445 RES_OP_RESERVE_AND_MAP,
422 MLX4_CMD_WRAPPED); 446 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
423 if (!err) 447 MLX4_CMD_WRAPPED);
424 mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
425 index);
426
427 return; 448 return;
428 } 449 }
429 __mlx4_unregister_vlan(dev, port, index); 450 __mlx4_unregister_vlan(dev, port, vlan);
430} 451}
431EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); 452EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
432 453
@@ -448,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
448 469
449 inbuf = inmailbox->buf; 470 inbuf = inmailbox->buf;
450 outbuf = outmailbox->buf; 471 outbuf = outmailbox->buf;
451 memset(inbuf, 0, 256);
452 memset(outbuf, 0, 256);
453 inbuf[0] = 1; 472 inbuf[0] = 1;
454 inbuf[1] = 1; 473 inbuf[1] = 1;
455 inbuf[2] = 1; 474 inbuf[2] = 1;
@@ -632,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
632 if (IS_ERR(mailbox)) 651 if (IS_ERR(mailbox))
633 return PTR_ERR(mailbox); 652 return PTR_ERR(mailbox);
634 653
635 memset(mailbox->buf, 0, 256);
636
637 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 654 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
638 655
639 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { 656 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -671,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
671 if (IS_ERR(mailbox)) 688 if (IS_ERR(mailbox))
672 return PTR_ERR(mailbox); 689 return PTR_ERR(mailbox);
673 context = mailbox->buf; 690 context = mailbox->buf;
674 memset(context, 0, sizeof *context);
675
676 context->flags = SET_PORT_GEN_ALL_VALID; 691 context->flags = SET_PORT_GEN_ALL_VALID;
677 context->mtu = cpu_to_be16(mtu); 692 context->mtu = cpu_to_be16(mtu);
678 context->pptx = (pptx * (!pfctx)) << 7; 693 context->pptx = (pptx * (!pfctx)) << 7;
@@ -706,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
706 if (IS_ERR(mailbox)) 721 if (IS_ERR(mailbox))
707 return PTR_ERR(mailbox); 722 return PTR_ERR(mailbox);
708 context = mailbox->buf; 723 context = mailbox->buf;
709 memset(context, 0, sizeof *context);
710
711 context->base_qpn = cpu_to_be32(base_qpn); 724 context->base_qpn = cpu_to_be32(base_qpn);
712 context->n_mac = dev->caps.log_num_macs; 725 context->n_mac = dev->caps.log_num_macs;
713 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 726 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -740,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
740 if (IS_ERR(mailbox)) 753 if (IS_ERR(mailbox))
741 return PTR_ERR(mailbox); 754 return PTR_ERR(mailbox);
742 context = mailbox->buf; 755 context = mailbox->buf;
743 memset(context, 0, sizeof *context);
744
745 for (i = 0; i < MLX4_NUM_UP; i += 2) 756 for (i = 0; i < MLX4_NUM_UP; i += 2)
746 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; 757 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
747 758
@@ -767,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
767 if (IS_ERR(mailbox)) 778 if (IS_ERR(mailbox))
768 return PTR_ERR(mailbox); 779 return PTR_ERR(mailbox);
769 context = mailbox->buf; 780 context = mailbox->buf;
770 memset(context, 0, sizeof *context);
771 781
772 for (i = 0; i < MLX4_NUM_TC; i++) { 782 for (i = 0; i < MLX4_NUM_TC; i++) {
773 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; 783 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e891b058c1be..2715e61dbb74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
480 */ 480 */
481 481
482 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 482 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
483 (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 + 483 (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
484 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
485 reserved_from_top); 484 reserved_from_top);
486 if (err) 485 if (err)
487 return err; 486 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd6876321116..2f3f2bc7f283 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -55,6 +55,14 @@ struct mac_res {
55 u8 port; 55 u8 port;
56}; 56};
57 57
58struct vlan_res {
59 struct list_head list;
60 u16 vlan;
61 int ref_count;
62 int vlan_index;
63 u8 port;
64};
65
58struct res_common { 66struct res_common {
59 struct list_head list; 67 struct list_head list;
60 struct rb_node node; 68 struct rb_node node;
@@ -102,7 +110,14 @@ struct res_qp {
102 int local_qpn; 110 int local_qpn;
103 atomic_t ref_count; 111 atomic_t ref_count;
104 u32 qpc_flags; 112 u32 qpc_flags;
113 /* saved qp params before VST enforcement in order to restore on VGT */
105 u8 sched_queue; 114 u8 sched_queue;
115 __be32 param3;
116 u8 vlan_control;
117 u8 fvl_rx;
118 u8 pri_path_fl;
119 u8 vlan_index;
120 u8 feup;
106}; 121};
107 122
108enum res_mtt_states { 123enum res_mtt_states {
@@ -266,6 +281,7 @@ static const char *ResourceType(enum mlx4_resource rt)
266 case RES_MPT: return "RES_MPT"; 281 case RES_MPT: return "RES_MPT";
267 case RES_MTT: return "RES_MTT"; 282 case RES_MTT: return "RES_MTT";
268 case RES_MAC: return "RES_MAC"; 283 case RES_MAC: return "RES_MAC";
284 case RES_VLAN: return "RES_VLAN";
269 case RES_EQ: return "RES_EQ"; 285 case RES_EQ: return "RES_EQ";
270 case RES_COUNTER: return "RES_COUNTER"; 286 case RES_COUNTER: return "RES_COUNTER";
271 case RES_FS_RULE: return "RES_FS_RULE"; 287 case RES_FS_RULE: return "RES_FS_RULE";
@@ -274,10 +290,139 @@ static const char *ResourceType(enum mlx4_resource rt)
274 }; 290 };
275} 291}
276 292
293static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
294static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
295 enum mlx4_resource res_type, int count,
296 int port)
297{
298 struct mlx4_priv *priv = mlx4_priv(dev);
299 struct resource_allocator *res_alloc =
300 &priv->mfunc.master.res_tracker.res_alloc[res_type];
301 int err = -EINVAL;
302 int allocated, free, reserved, guaranteed, from_free;
303
304 if (slave > dev->num_vfs)
305 return -EINVAL;
306
307 spin_lock(&res_alloc->alloc_lock);
308 allocated = (port > 0) ?
309 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
310 res_alloc->allocated[slave];
311 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
312 res_alloc->res_free;
313 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
314 res_alloc->res_reserved;
315 guaranteed = res_alloc->guaranteed[slave];
316
317 if (allocated + count > res_alloc->quota[slave])
318 goto out;
319
320 if (allocated + count <= guaranteed) {
321 err = 0;
322 } else {
323 /* portion may need to be obtained from free area */
324 if (guaranteed - allocated > 0)
325 from_free = count - (guaranteed - allocated);
326 else
327 from_free = count;
328
329 if (free - from_free > reserved)
330 err = 0;
331 }
332
333 if (!err) {
334 /* grant the request */
335 if (port > 0) {
336 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
337 res_alloc->res_port_free[port - 1] -= count;
338 } else {
339 res_alloc->allocated[slave] += count;
340 res_alloc->res_free -= count;
341 }
342 }
343
344out:
345 spin_unlock(&res_alloc->alloc_lock);
346 return err;
347}
348
349static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
350 enum mlx4_resource res_type, int count,
351 int port)
352{
353 struct mlx4_priv *priv = mlx4_priv(dev);
354 struct resource_allocator *res_alloc =
355 &priv->mfunc.master.res_tracker.res_alloc[res_type];
356
357 if (slave > dev->num_vfs)
358 return;
359
360 spin_lock(&res_alloc->alloc_lock);
361 if (port > 0) {
362 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
363 res_alloc->res_port_free[port - 1] += count;
364 } else {
365 res_alloc->allocated[slave] -= count;
366 res_alloc->res_free += count;
367 }
368
369 spin_unlock(&res_alloc->alloc_lock);
370 return;
371}
372
373static inline void initialize_res_quotas(struct mlx4_dev *dev,
374 struct resource_allocator *res_alloc,
375 enum mlx4_resource res_type,
376 int vf, int num_instances)
377{
378 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
379 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
380 if (vf == mlx4_master_func_num(dev)) {
381 res_alloc->res_free = num_instances;
382 if (res_type == RES_MTT) {
383 /* reserved mtts will be taken out of the PF allocation */
384 res_alloc->res_free += dev->caps.reserved_mtts;
385 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
386 res_alloc->quota[vf] += dev->caps.reserved_mtts;
387 }
388 }
389}
390
391void mlx4_init_quotas(struct mlx4_dev *dev)
392{
393 struct mlx4_priv *priv = mlx4_priv(dev);
394 int pf;
395
396 /* quotas for VFs are initialized in mlx4_slave_cap */
397 if (mlx4_is_slave(dev))
398 return;
399
400 if (!mlx4_is_mfunc(dev)) {
401 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
402 mlx4_num_reserved_sqps(dev);
403 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
404 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
405 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
406 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
407 return;
408 }
409
410 pf = mlx4_master_func_num(dev);
411 dev->quotas.qp =
412 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
413 dev->quotas.cq =
414 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
415 dev->quotas.srq =
416 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
417 dev->quotas.mtt =
418 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
419 dev->quotas.mpt =
420 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
421}
277int mlx4_init_resource_tracker(struct mlx4_dev *dev) 422int mlx4_init_resource_tracker(struct mlx4_dev *dev)
278{ 423{
279 struct mlx4_priv *priv = mlx4_priv(dev); 424 struct mlx4_priv *priv = mlx4_priv(dev);
280 int i; 425 int i, j;
281 int t; 426 int t;
282 427
283 priv->mfunc.master.res_tracker.slave_list = 428 priv->mfunc.master.res_tracker.slave_list =
@@ -298,8 +443,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
298 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 443 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
299 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 444 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
300 445
446 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
447 struct resource_allocator *res_alloc =
448 &priv->mfunc.master.res_tracker.res_alloc[i];
449 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
450 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
451 if (i == RES_MAC || i == RES_VLAN)
452 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
453 (dev->num_vfs + 1) * sizeof(int),
454 GFP_KERNEL);
455 else
456 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
457
458 if (!res_alloc->quota || !res_alloc->guaranteed ||
459 !res_alloc->allocated)
460 goto no_mem_err;
461
462 spin_lock_init(&res_alloc->alloc_lock);
463 for (t = 0; t < dev->num_vfs + 1; t++) {
464 switch (i) {
465 case RES_QP:
466 initialize_res_quotas(dev, res_alloc, RES_QP,
467 t, dev->caps.num_qps -
468 dev->caps.reserved_qps -
469 mlx4_num_reserved_sqps(dev));
470 break;
471 case RES_CQ:
472 initialize_res_quotas(dev, res_alloc, RES_CQ,
473 t, dev->caps.num_cqs -
474 dev->caps.reserved_cqs);
475 break;
476 case RES_SRQ:
477 initialize_res_quotas(dev, res_alloc, RES_SRQ,
478 t, dev->caps.num_srqs -
479 dev->caps.reserved_srqs);
480 break;
481 case RES_MPT:
482 initialize_res_quotas(dev, res_alloc, RES_MPT,
483 t, dev->caps.num_mpts -
484 dev->caps.reserved_mrws);
485 break;
486 case RES_MTT:
487 initialize_res_quotas(dev, res_alloc, RES_MTT,
488 t, dev->caps.num_mtts -
489 dev->caps.reserved_mtts);
490 break;
491 case RES_MAC:
492 if (t == mlx4_master_func_num(dev)) {
493 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
494 res_alloc->guaranteed[t] = 2;
495 for (j = 0; j < MLX4_MAX_PORTS; j++)
496 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
497 } else {
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2;
500 }
501 break;
502 case RES_VLAN:
503 if (t == mlx4_master_func_num(dev)) {
504 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
505 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
506 for (j = 0; j < MLX4_MAX_PORTS; j++)
507 res_alloc->res_port_free[j] =
508 res_alloc->quota[t];
509 } else {
510 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
511 res_alloc->guaranteed[t] = 0;
512 }
513 break;
514 case RES_COUNTER:
515 res_alloc->quota[t] = dev->caps.max_counters;
516 res_alloc->guaranteed[t] = 0;
517 if (t == mlx4_master_func_num(dev))
518 res_alloc->res_free = res_alloc->quota[t];
519 break;
520 default:
521 break;
522 }
523 if (i == RES_MAC || i == RES_VLAN) {
524 for (j = 0; j < MLX4_MAX_PORTS; j++)
525 res_alloc->res_port_rsvd[j] +=
526 res_alloc->guaranteed[t];
527 } else {
528 res_alloc->res_reserved += res_alloc->guaranteed[t];
529 }
530 }
531 }
301 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 532 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
302 return 0 ; 533 return 0;
534
535no_mem_err:
536 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
537 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
538 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
539 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
540 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
541 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
542 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
543 }
544 return -ENOMEM;
303} 545}
304 546
305void mlx4_free_resource_tracker(struct mlx4_dev *dev, 547void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +551,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
309 int i; 551 int i;
310 552
311 if (priv->mfunc.master.res_tracker.slave_list) { 553 if (priv->mfunc.master.res_tracker.slave_list) {
312 if (type != RES_TR_FREE_STRUCTS_ONLY) 554 if (type != RES_TR_FREE_STRUCTS_ONLY) {
313 for (i = 0 ; i < dev->num_slaves; i++) 555 for (i = 0; i < dev->num_slaves; i++) {
314 if (type == RES_TR_FREE_ALL || 556 if (type == RES_TR_FREE_ALL ||
315 dev->caps.function != i) 557 dev->caps.function != i)
316 mlx4_delete_all_resources_for_slave(dev, i); 558 mlx4_delete_all_resources_for_slave(dev, i);
559 }
560 /* free master's vlans */
561 i = dev->caps.function;
562 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
563 rem_slave_vlans(dev, i);
564 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
565 }
317 566
318 if (type != RES_TR_FREE_SLAVES_ONLY) { 567 if (type != RES_TR_FREE_SLAVES_ONLY) {
568 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
569 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
570 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
571 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
572 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
573 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
574 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
575 }
319 kfree(priv->mfunc.master.res_tracker.slave_list); 576 kfree(priv->mfunc.master.res_tracker.slave_list);
320 priv->mfunc.master.res_tracker.slave_list = NULL; 577 priv->mfunc.master.res_tracker.slave_list = NULL;
321 } 578 }
@@ -1229,12 +1486,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1229 case RES_OP_RESERVE: 1486 case RES_OP_RESERVE:
1230 count = get_param_l(&in_param); 1487 count = get_param_l(&in_param);
1231 align = get_param_h(&in_param); 1488 align = get_param_h(&in_param);
1232 err = __mlx4_qp_reserve_range(dev, count, align, &base); 1489 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1233 if (err) 1490 if (err)
1234 return err; 1491 return err;
1235 1492
1493 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1494 if (err) {
1495 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1496 return err;
1497 }
1498
1236 err = add_res_range(dev, slave, base, count, RES_QP, 0); 1499 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1237 if (err) { 1500 if (err) {
1501 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1238 __mlx4_qp_release_range(dev, base, count); 1502 __mlx4_qp_release_range(dev, base, count);
1239 return err; 1503 return err;
1240 } 1504 }
@@ -1282,15 +1546,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1282 return err; 1546 return err;
1283 1547
1284 order = get_param_l(&in_param); 1548 order = get_param_l(&in_param);
1549
1550 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1551 if (err)
1552 return err;
1553
1285 base = __mlx4_alloc_mtt_range(dev, order); 1554 base = __mlx4_alloc_mtt_range(dev, order);
1286 if (base == -1) 1555 if (base == -1) {
1556 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1287 return -ENOMEM; 1557 return -ENOMEM;
1558 }
1288 1559
1289 err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1560 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1290 if (err) 1561 if (err) {
1562 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1291 __mlx4_free_mtt_range(dev, base, order); 1563 __mlx4_free_mtt_range(dev, base, order);
1292 else 1564 } else {
1293 set_param_l(out_param, base); 1565 set_param_l(out_param, base);
1566 }
1294 1567
1295 return err; 1568 return err;
1296} 1569}
@@ -1305,13 +1578,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1305 1578
1306 switch (op) { 1579 switch (op) {
1307 case RES_OP_RESERVE: 1580 case RES_OP_RESERVE:
1581 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1582 if (err)
1583 break;
1584
1308 index = __mlx4_mpt_reserve(dev); 1585 index = __mlx4_mpt_reserve(dev);
1309 if (index == -1) 1586 if (index == -1) {
1587 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1310 break; 1588 break;
1589 }
1311 id = index & mpt_mask(dev); 1590 id = index & mpt_mask(dev);
1312 1591
1313 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1592 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1314 if (err) { 1593 if (err) {
1594 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1315 __mlx4_mpt_release(dev, index); 1595 __mlx4_mpt_release(dev, index);
1316 break; 1596 break;
1317 } 1597 }
@@ -1345,12 +1625,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1345 1625
1346 switch (op) { 1626 switch (op) {
1347 case RES_OP_RESERVE_AND_MAP: 1627 case RES_OP_RESERVE_AND_MAP:
1348 err = __mlx4_cq_alloc_icm(dev, &cqn); 1628 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1349 if (err) 1629 if (err)
1350 break; 1630 break;
1351 1631
1632 err = __mlx4_cq_alloc_icm(dev, &cqn);
1633 if (err) {
1634 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1635 break;
1636 }
1637
1352 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1638 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1353 if (err) { 1639 if (err) {
1640 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1354 __mlx4_cq_free_icm(dev, cqn); 1641 __mlx4_cq_free_icm(dev, cqn);
1355 break; 1642 break;
1356 } 1643 }
@@ -1373,12 +1660,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1373 1660
1374 switch (op) { 1661 switch (op) {
1375 case RES_OP_RESERVE_AND_MAP: 1662 case RES_OP_RESERVE_AND_MAP:
1376 err = __mlx4_srq_alloc_icm(dev, &srqn); 1663 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1377 if (err) 1664 if (err)
1378 break; 1665 break;
1379 1666
1667 err = __mlx4_srq_alloc_icm(dev, &srqn);
1668 if (err) {
1669 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1670 break;
1671 }
1672
1380 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1673 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1381 if (err) { 1674 if (err) {
1675 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1382 __mlx4_srq_free_icm(dev, srqn); 1676 __mlx4_srq_free_icm(dev, srqn);
1383 break; 1677 break;
1384 } 1678 }
@@ -1399,9 +1693,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1399 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1400 struct mac_res *res; 1694 struct mac_res *res;
1401 1695
1696 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1697 return -EINVAL;
1402 res = kzalloc(sizeof *res, GFP_KERNEL); 1698 res = kzalloc(sizeof *res, GFP_KERNEL);
1403 if (!res) 1699 if (!res) {
1700 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1404 return -ENOMEM; 1701 return -ENOMEM;
1702 }
1405 res->mac = mac; 1703 res->mac = mac;
1406 res->port = (u8) port; 1704 res->port = (u8) port;
1407 list_add_tail(&res->list, 1705 list_add_tail(&res->list,
@@ -1421,6 +1719,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1421 list_for_each_entry_safe(res, tmp, mac_list, list) { 1719 list_for_each_entry_safe(res, tmp, mac_list, list) {
1422 if (res->mac == mac && res->port == (u8) port) { 1720 if (res->mac == mac && res->port == (u8) port) {
1423 list_del(&res->list); 1721 list_del(&res->list);
1722 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1424 kfree(res); 1723 kfree(res);
1425 break; 1724 break;
1426 } 1725 }
@@ -1438,12 +1737,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1438 list_for_each_entry_safe(res, tmp, mac_list, list) { 1737 list_for_each_entry_safe(res, tmp, mac_list, list) {
1439 list_del(&res->list); 1738 list_del(&res->list);
1440 __mlx4_unregister_mac(dev, res->port, res->mac); 1739 __mlx4_unregister_mac(dev, res->port, res->mac);
1740 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1441 kfree(res); 1741 kfree(res);
1442 } 1742 }
1443} 1743}
1444 1744
1445static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1745static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 u64 in_param, u64 *out_param) 1746 u64 in_param, u64 *out_param, int in_port)
1447{ 1747{
1448 int err = -EINVAL; 1748 int err = -EINVAL;
1449 int port; 1749 int port;
@@ -1452,7 +1752,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1452 if (op != RES_OP_RESERVE_AND_MAP) 1752 if (op != RES_OP_RESERVE_AND_MAP)
1453 return err; 1753 return err;
1454 1754
1455 port = get_param_l(out_param); 1755 port = !in_port ? get_param_l(out_param) : in_port;
1456 mac = in_param; 1756 mac = in_param;
1457 1757
1458 err = __mlx4_register_mac(dev, port, mac); 1758 err = __mlx4_register_mac(dev, port, mac);
@@ -1469,12 +1769,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1469 return err; 1769 return err;
1470} 1770}
1471 1771
1472static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1772static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1473 u64 in_param, u64 *out_param) 1773 int port, int vlan_index)
1474{ 1774{
1775 struct mlx4_priv *priv = mlx4_priv(dev);
1776 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1777 struct list_head *vlan_list =
1778 &tracker->slave_list[slave].res_list[RES_VLAN];
1779 struct vlan_res *res, *tmp;
1780
1781 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1782 if (res->vlan == vlan && res->port == (u8) port) {
1783 /* vlan found. update ref count */
1784 ++res->ref_count;
1785 return 0;
1786 }
1787 }
1788
1789 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1790 return -EINVAL;
1791 res = kzalloc(sizeof(*res), GFP_KERNEL);
1792 if (!res) {
1793 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1794 return -ENOMEM;
1795 }
1796 res->vlan = vlan;
1797 res->port = (u8) port;
1798 res->vlan_index = vlan_index;
1799 res->ref_count = 1;
1800 list_add_tail(&res->list,
1801 &tracker->slave_list[slave].res_list[RES_VLAN]);
1475 return 0; 1802 return 0;
1476} 1803}
1477 1804
1805
1806static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1807 int port)
1808{
1809 struct mlx4_priv *priv = mlx4_priv(dev);
1810 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1811 struct list_head *vlan_list =
1812 &tracker->slave_list[slave].res_list[RES_VLAN];
1813 struct vlan_res *res, *tmp;
1814
1815 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1816 if (res->vlan == vlan && res->port == (u8) port) {
1817 if (!--res->ref_count) {
1818 list_del(&res->list);
1819 mlx4_release_resource(dev, slave, RES_VLAN,
1820 1, port);
1821 kfree(res);
1822 }
1823 break;
1824 }
1825 }
1826}
1827
1828static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1829{
1830 struct mlx4_priv *priv = mlx4_priv(dev);
1831 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1832 struct list_head *vlan_list =
1833 &tracker->slave_list[slave].res_list[RES_VLAN];
1834 struct vlan_res *res, *tmp;
1835 int i;
1836
1837 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1838 list_del(&res->list);
1839 /* dereference the vlan the num times the slave referenced it */
1840 for (i = 0; i < res->ref_count; i++)
1841 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1842 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1843 kfree(res);
1844 }
1845}
1846
1847static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1848 u64 in_param, u64 *out_param, int in_port)
1849{
1850 struct mlx4_priv *priv = mlx4_priv(dev);
1851 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1852 int err;
1853 u16 vlan;
1854 int vlan_index;
1855 int port;
1856
1857 port = !in_port ? get_param_l(out_param) : in_port;
1858
1859 if (!port || op != RES_OP_RESERVE_AND_MAP)
1860 return -EINVAL;
1861
1862 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1863 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1864 slave_state[slave].old_vlan_api = true;
1865 return 0;
1866 }
1867
1868 vlan = (u16) in_param;
1869
1870 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1871 if (!err) {
1872 set_param_l(out_param, (u32) vlan_index);
1873 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1874 if (err)
1875 __mlx4_unregister_vlan(dev, port, vlan);
1876 }
1877 return err;
1878}
1879
1478static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1880static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479 u64 in_param, u64 *out_param) 1881 u64 in_param, u64 *out_param)
1480{ 1882{
@@ -1484,15 +1886,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1484 if (op != RES_OP_RESERVE) 1886 if (op != RES_OP_RESERVE)
1485 return -EINVAL; 1887 return -EINVAL;
1486 1888
1487 err = __mlx4_counter_alloc(dev, &index); 1889 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1488 if (err) 1890 if (err)
1489 return err; 1891 return err;
1490 1892
1893 err = __mlx4_counter_alloc(dev, &index);
1894 if (err) {
1895 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1896 return err;
1897 }
1898
1491 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); 1899 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1492 if (err) 1900 if (err) {
1493 __mlx4_counter_free(dev, index); 1901 __mlx4_counter_free(dev, index);
1494 else 1902 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1903 } else {
1495 set_param_l(out_param, index); 1904 set_param_l(out_param, index);
1905 }
1496 1906
1497 return err; 1907 return err;
1498} 1908}
@@ -1528,7 +1938,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1528 int err; 1938 int err;
1529 int alop = vhcr->op_modifier; 1939 int alop = vhcr->op_modifier;
1530 1940
1531 switch (vhcr->in_modifier) { 1941 switch (vhcr->in_modifier & 0xFF) {
1532 case RES_QP: 1942 case RES_QP:
1533 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 1943 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param); 1944 vhcr->in_param, &vhcr->out_param);
@@ -1556,12 +1966,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1556 1966
1557 case RES_MAC: 1967 case RES_MAC:
1558 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 1968 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559 vhcr->in_param, &vhcr->out_param); 1969 vhcr->in_param, &vhcr->out_param,
1970 (vhcr->in_modifier >> 8) & 0xFF);
1560 break; 1971 break;
1561 1972
1562 case RES_VLAN: 1973 case RES_VLAN:
1563 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 1974 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1564 vhcr->in_param, &vhcr->out_param); 1975 vhcr->in_param, &vhcr->out_param,
1976 (vhcr->in_modifier >> 8) & 0xFF);
1565 break; 1977 break;
1566 1978
1567 case RES_COUNTER: 1979 case RES_COUNTER:
@@ -1597,6 +2009,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1597 err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2009 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1598 if (err) 2010 if (err)
1599 break; 2011 break;
2012 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1600 __mlx4_qp_release_range(dev, base, count); 2013 __mlx4_qp_release_range(dev, base, count);
1601 break; 2014 break;
1602 case RES_OP_MAP_ICM: 2015 case RES_OP_MAP_ICM:
@@ -1634,8 +2047,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1634 base = get_param_l(&in_param); 2047 base = get_param_l(&in_param);
1635 order = get_param_h(&in_param); 2048 order = get_param_h(&in_param);
1636 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2049 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1637 if (!err) 2050 if (!err) {
2051 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1638 __mlx4_free_mtt_range(dev, base, order); 2052 __mlx4_free_mtt_range(dev, base, order);
2053 }
1639 return err; 2054 return err;
1640} 2055}
1641 2056
@@ -1660,6 +2075,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1660 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2075 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1661 if (err) 2076 if (err)
1662 break; 2077 break;
2078 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1663 __mlx4_mpt_release(dev, index); 2079 __mlx4_mpt_release(dev, index);
1664 break; 2080 break;
1665 case RES_OP_MAP_ICM: 2081 case RES_OP_MAP_ICM:
@@ -1694,6 +2110,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1694 if (err) 2110 if (err)
1695 break; 2111 break;
1696 2112
2113 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1697 __mlx4_cq_free_icm(dev, cqn); 2114 __mlx4_cq_free_icm(dev, cqn);
1698 break; 2115 break;
1699 2116
@@ -1718,6 +2135,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718 if (err) 2135 if (err)
1719 break; 2136 break;
1720 2137
2138 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1721 __mlx4_srq_free_icm(dev, srqn); 2139 __mlx4_srq_free_icm(dev, srqn);
1722 break; 2140 break;
1723 2141
@@ -1730,14 +2148,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1730} 2148}
1731 2149
1732static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2150static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733 u64 in_param, u64 *out_param) 2151 u64 in_param, u64 *out_param, int in_port)
1734{ 2152{
1735 int port; 2153 int port;
1736 int err = 0; 2154 int err = 0;
1737 2155
1738 switch (op) { 2156 switch (op) {
1739 case RES_OP_RESERVE_AND_MAP: 2157 case RES_OP_RESERVE_AND_MAP:
1740 port = get_param_l(out_param); 2158 port = !in_port ? get_param_l(out_param) : in_port;
1741 mac_del_from_slave(dev, slave, in_param, port); 2159 mac_del_from_slave(dev, slave, in_param, port);
1742 __mlx4_unregister_mac(dev, port, in_param); 2160 __mlx4_unregister_mac(dev, port, in_param);
1743 break; 2161 break;
@@ -1751,9 +2169,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1751} 2169}
1752 2170
1753static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2171static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754 u64 in_param, u64 *out_param) 2172 u64 in_param, u64 *out_param, int port)
1755{ 2173{
1756 return 0; 2174 struct mlx4_priv *priv = mlx4_priv(dev);
2175 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2176 int err = 0;
2177
2178 switch (op) {
2179 case RES_OP_RESERVE_AND_MAP:
2180 if (slave_state[slave].old_vlan_api)
2181 return 0;
2182 if (!port)
2183 return -EINVAL;
2184 vlan_del_from_slave(dev, slave, in_param, port);
2185 __mlx4_unregister_vlan(dev, port, in_param);
2186 break;
2187 default:
2188 err = -EINVAL;
2189 break;
2190 }
2191
2192 return err;
1757} 2193}
1758 2194
1759static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2195static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1771,6 +2207,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1771 return err; 2207 return err;
1772 2208
1773 __mlx4_counter_free(dev, index); 2209 __mlx4_counter_free(dev, index);
2210 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1774 2211
1775 return err; 2212 return err;
1776} 2213}
@@ -1803,7 +2240,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1803 int err = -EINVAL; 2240 int err = -EINVAL;
1804 int alop = vhcr->op_modifier; 2241 int alop = vhcr->op_modifier;
1805 2242
1806 switch (vhcr->in_modifier) { 2243 switch (vhcr->in_modifier & 0xFF) {
1807 case RES_QP: 2244 case RES_QP:
1808 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2245 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1809 vhcr->in_param); 2246 vhcr->in_param);
@@ -1831,12 +2268,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1831 2268
1832 case RES_MAC: 2269 case RES_MAC:
1833 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2270 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1834 vhcr->in_param, &vhcr->out_param); 2271 vhcr->in_param, &vhcr->out_param,
2272 (vhcr->in_modifier >> 8) & 0xFF);
1835 break; 2273 break;
1836 2274
1837 case RES_VLAN: 2275 case RES_VLAN:
1838 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2276 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1839 vhcr->in_param, &vhcr->out_param); 2277 vhcr->in_param, &vhcr->out_param,
2278 (vhcr->in_modifier >> 8) & 0xFF);
1840 break; 2279 break;
1841 2280
1842 case RES_COUNTER: 2281 case RES_COUNTER:
@@ -2136,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2136 return err; 2575 return err;
2137 qp->local_qpn = local_qpn; 2576 qp->local_qpn = local_qpn;
2138 qp->sched_queue = 0; 2577 qp->sched_queue = 0;
2578 qp->param3 = 0;
2579 qp->vlan_control = 0;
2580 qp->fvl_rx = 0;
2581 qp->pri_path_fl = 0;
2582 qp->vlan_index = 0;
2583 qp->feup = 0;
2139 qp->qpc_flags = be32_to_cpu(qpc->flags); 2584 qp->qpc_flags = be32_to_cpu(qpc->flags);
2140 2585
2141 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2586 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -2862,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2862 int qpn = vhcr->in_modifier & 0x7fffff; 3307 int qpn = vhcr->in_modifier & 0x7fffff;
2863 struct res_qp *qp; 3308 struct res_qp *qp;
2864 u8 orig_sched_queue; 3309 u8 orig_sched_queue;
3310 __be32 orig_param3 = qpc->param3;
3311 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3312 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3313 u8 orig_pri_path_fl = qpc->pri_path.fl;
3314 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3315 u8 orig_feup = qpc->pri_path.feup;
2865 3316
2866 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3317 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2867 if (err) 3318 if (err)
@@ -2889,9 +3340,15 @@ out:
2889 * essentially the QOS value provided by the VF. This will be useful 3340 * essentially the QOS value provided by the VF. This will be useful
2890 * if we allow dynamic changes from VST back to VGT 3341 * if we allow dynamic changes from VST back to VGT
2891 */ 3342 */
2892 if (!err) 3343 if (!err) {
2893 qp->sched_queue = orig_sched_queue; 3344 qp->sched_queue = orig_sched_queue;
2894 3345 qp->param3 = orig_param3;
3346 qp->vlan_control = orig_vlan_control;
3347 qp->fvl_rx = orig_fvl_rx;
3348 qp->pri_path_fl = orig_pri_path_fl;
3349 qp->vlan_index = orig_vlan_index;
3350 qp->feup = orig_feup;
3351 }
2895 put_res(dev, slave, qpn, RES_QP); 3352 put_res(dev, slave, qpn, RES_QP);
2896 return err; 3353 return err;
2897} 3354}
@@ -3498,6 +3955,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3498 &tracker->res_tree[RES_QP]); 3955 &tracker->res_tree[RES_QP]);
3499 list_del(&qp->com.list); 3956 list_del(&qp->com.list);
3500 spin_unlock_irq(mlx4_tlock(dev)); 3957 spin_unlock_irq(mlx4_tlock(dev));
3958 if (!valid_reserved(dev, slave, qpn)) {
3959 __mlx4_qp_release_range(dev, qpn, 1);
3960 mlx4_release_resource(dev, slave,
3961 RES_QP, 1, 0);
3962 }
3501 kfree(qp); 3963 kfree(qp);
3502 state = 0; 3964 state = 0;
3503 break; 3965 break;
@@ -3569,6 +4031,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3569 &tracker->res_tree[RES_SRQ]); 4031 &tracker->res_tree[RES_SRQ]);
3570 list_del(&srq->com.list); 4032 list_del(&srq->com.list);
3571 spin_unlock_irq(mlx4_tlock(dev)); 4033 spin_unlock_irq(mlx4_tlock(dev));
4034 mlx4_release_resource(dev, slave,
4035 RES_SRQ, 1, 0);
3572 kfree(srq); 4036 kfree(srq);
3573 state = 0; 4037 state = 0;
3574 break; 4038 break;
@@ -3635,6 +4099,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3635 &tracker->res_tree[RES_CQ]); 4099 &tracker->res_tree[RES_CQ]);
3636 list_del(&cq->com.list); 4100 list_del(&cq->com.list);
3637 spin_unlock_irq(mlx4_tlock(dev)); 4101 spin_unlock_irq(mlx4_tlock(dev));
4102 mlx4_release_resource(dev, slave,
4103 RES_CQ, 1, 0);
3638 kfree(cq); 4104 kfree(cq);
3639 state = 0; 4105 state = 0;
3640 break; 4106 break;
@@ -3698,6 +4164,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3698 &tracker->res_tree[RES_MPT]); 4164 &tracker->res_tree[RES_MPT]);
3699 list_del(&mpt->com.list); 4165 list_del(&mpt->com.list);
3700 spin_unlock_irq(mlx4_tlock(dev)); 4166 spin_unlock_irq(mlx4_tlock(dev));
4167 mlx4_release_resource(dev, slave,
4168 RES_MPT, 1, 0);
3701 kfree(mpt); 4169 kfree(mpt);
3702 state = 0; 4170 state = 0;
3703 break; 4171 break;
@@ -3767,6 +4235,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3767 &tracker->res_tree[RES_MTT]); 4235 &tracker->res_tree[RES_MTT]);
3768 list_del(&mtt->com.list); 4236 list_del(&mtt->com.list);
3769 spin_unlock_irq(mlx4_tlock(dev)); 4237 spin_unlock_irq(mlx4_tlock(dev));
4238 mlx4_release_resource(dev, slave, RES_MTT,
4239 1 << mtt->order, 0);
3770 kfree(mtt); 4240 kfree(mtt);
3771 state = 0; 4241 state = 0;
3772 break; 4242 break;
@@ -3925,6 +4395,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3925 list_del(&counter->com.list); 4395 list_del(&counter->com.list);
3926 kfree(counter); 4396 kfree(counter);
3927 __mlx4_counter_free(dev, index); 4397 __mlx4_counter_free(dev, index);
4398 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
3928 } 4399 }
3929 } 4400 }
3930 spin_unlock_irq(mlx4_tlock(dev)); 4401 spin_unlock_irq(mlx4_tlock(dev));
@@ -3964,7 +4435,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3964 struct mlx4_priv *priv = mlx4_priv(dev); 4435 struct mlx4_priv *priv = mlx4_priv(dev);
3965 4436
3966 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4437 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3967 /*VLAN*/ 4438 rem_slave_vlans(dev, slave);
3968 rem_slave_macs(dev, slave); 4439 rem_slave_macs(dev, slave);
3969 rem_slave_fs_rule(dev, slave); 4440 rem_slave_fs_rule(dev, slave);
3970 rem_slave_qps(dev, slave); 4441 rem_slave_qps(dev, slave);
@@ -3991,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3991 &tracker->slave_list[work->slave].res_list[RES_QP]; 4462 &tracker->slave_list[work->slave].res_list[RES_QP];
3992 struct res_qp *qp; 4463 struct res_qp *qp;
3993 struct res_qp *tmp; 4464 struct res_qp *tmp;
3994 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 4465 u64 qp_path_mask_vlan_ctrl =
4466 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3995 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 4467 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3996 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 4468 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3997 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 4469 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3998 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 4470 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3999 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | 4471 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4000 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 4472
4473 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4474 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4475 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4476 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4477 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4478 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4001 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 4479 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4002 4480
4003 int err; 4481 int err;
@@ -4029,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4029 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 4507 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4030 4508
4031 upd_context = mailbox->buf; 4509 upd_context = mailbox->buf;
4032 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); 4510 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4033 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4034 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4035 4511
4036 spin_lock_irq(mlx4_tlock(dev)); 4512 spin_lock_irq(mlx4_tlock(dev));
4037 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4513 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4049,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4049 spin_lock_irq(mlx4_tlock(dev)); 4525 spin_lock_irq(mlx4_tlock(dev));
4050 continue; 4526 continue;
4051 } 4527 }
4052 upd_context->qp_context.pri_path.sched_queue = 4528 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4053 qp->sched_queue & 0xC7; 4529 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4054 upd_context->qp_context.pri_path.sched_queue |= 4530 else
4055 ((work->qos & 0x7) << 3); 4531 upd_context->primary_addr_path_mask =
4532 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4533 if (work->vlan_id == MLX4_VGT) {
4534 upd_context->qp_context.param3 = qp->param3;
4535 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4536 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4537 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4538 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4539 upd_context->qp_context.pri_path.feup = qp->feup;
4540 upd_context->qp_context.pri_path.sched_queue =
4541 qp->sched_queue;
4542 } else {
4543 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4544 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4545 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4546 upd_context->qp_context.pri_path.fvl_rx =
4547 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4548 upd_context->qp_context.pri_path.fl =
4549 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4550 upd_context->qp_context.pri_path.feup =
4551 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4552 upd_context->qp_context.pri_path.sched_queue =
4553 qp->sched_queue & 0xC7;
4554 upd_context->qp_context.pri_path.sched_queue |=
4555 ((work->qos & 0x7) << 3);
4556 }
4056 4557
4057 err = mlx4_cmd(dev, mailbox->dma, 4558 err = mlx4_cmd(dev, mailbox->dma,
4058 qp->local_qpn & 0xffffff, 4559 qp->local_qpn & 0xffffff,
@@ -4081,7 +4582,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4081 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 4582 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4082 NO_INDX != work->orig_vlan_ix) 4583 NO_INDX != work->orig_vlan_ix)
4083 __mlx4_unregister_vlan(&work->priv->dev, work->port, 4584 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4084 work->orig_vlan_ix); 4585 work->orig_vlan_id);
4085out: 4586out:
4086 kfree(work); 4587 kfree(work);
4087 return; 4588 return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269e2c54..8fdf23753779 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35 35
36#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
37#include <linux/mlx4/srq.h>
37#include <linux/export.h> 38#include <linux/export.h>
38#include <linux/gfp.h> 39#include <linux/gfp.h>
39 40
@@ -188,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
188 } 189 }
189 190
190 srq_context = mailbox->buf; 191 srq_context = mailbox->buf;
191 memset(srq_context, 0, sizeof *srq_context);
192
193 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | 192 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
194 srq->srqn); 193 srq->srqn);
195 srq_context->logstride = srq->wqe_shift - 4; 194 srq_context->logstride = srq->wqe_shift - 4;