aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEugenia Emantayev <eugenia@mellanox.co.il>2014-12-11 03:57:54 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-11 14:47:35 -0500
commitddae0349fdb78bcc5e7219061847012aa1a29069 (patch)
tree1159aa411fdc49c7ec3aeb93aa999c90cb2ac224
parent3dca0f42c7baaa4e01699629da13d6556f001ebe (diff)
net/mlx4: Change QP allocation scheme
When using BF (Blue-Flame), the QPN overrides the VLAN, CV, and SV fields in the WQE. Thus, BF may only be used for QPNs with bits 6,7 unset. The current Ethernet driver code reserves a Tx QP range with 256b alignment. This is wrong because if there are more than 64 Tx QPs in use, QPNs >= base + 65 will have bits 6/7 set. This problem is not specific for the Ethernet driver, any entity that tries to reserve more than 64 BF-enabled QPs should fail. Also, using ranges is not necessary here and is wasteful. The new mechanism introduced here will support reservation for "Eth QPs eligible for BF" for all drivers: bare-metal, multi-PF, and VFs (when hypervisors support WC in VMs). The flow we use is: 1. In mlx4_en, allocate Tx QPs one by one instead of a range allocation, and request "BF enabled QPs" if BF is supported for the function 2. In the ALLOC_RES FW command, change param1 to: a. param1[23:0] - number of QPs b. param1[31-24] - flags controlling QPs reservation Bit 31 refers to Eth blueflame supported QPs. Those QPs must have bits 6 and 7 unset in order to be used in Ethernet. Bits 24-30 of the flags are currently reserved. When a function tries to allocate a QP, it states the required attributes for this QP. Those attributes are considered "best-effort". If an attribute, such as Ethernet BF enabled QP, is a must-have attribute, the function has to check that attribute is supported before trying to do the allocation. In a lower layer of the code, mlx4_qp_reserve_range masks out the bits which are unsupported. If SRIOV is used, the PF validates those attributes and masks out unsupported attributes as well. In order to notify VFs which attributes are supported, the VF uses QUERY_FUNC_CAP command. This command's mailbox is filled by the PF, which notifies which QP allocation attributes it supports. Signed-off-by: Eugenia Emantayev <eugenia@mellanox.co.il> Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c7
-rw-r--r--include/linux/mlx4/device.h21
14 files changed, 137 insertions, 38 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0c3375524a64..57ecc5b204f3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2227,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2227 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; 2227 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2228 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, 2228 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2229 MLX4_IB_UC_STEER_QPN_ALIGN, 2229 MLX4_IB_UC_STEER_QPN_ALIGN,
2230 &ibdev->steer_qpn_base); 2230 &ibdev->steer_qpn_base, 0);
2231 if (err) 2231 if (err)
2232 goto err_counter; 2232 goto err_counter;
2233 2233
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9c5150c3cb31..506d1bdad227 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -802,16 +802,19 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
802 } 802 }
803 } 803 }
804 } else { 804 } else {
805 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE 805 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
806 * BlueFlame setup flow wrongly causes VLAN insertion. */ 806 * otherwise, the WQE BlueFlame setup flow wrongly causes
807 * VLAN insertion. */
807 if (init_attr->qp_type == IB_QPT_RAW_PACKET) 808 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
808 err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); 809 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
810 init_attr->cap.max_send_wr ?
811 MLX4_RESERVE_ETH_BF_QP : 0);
809 else 812 else
810 if (qp->flags & MLX4_IB_QP_NETIF) 813 if (qp->flags & MLX4_IB_QP_NETIF)
811 err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); 814 err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
812 else 815 else
813 err = mlx4_qp_reserve_range(dev->dev, 1, 1, 816 err = mlx4_qp_reserve_range(dev->dev, 1, 1,
814 &qpn); 817 &qpn, 0);
815 if (err) 818 if (err)
816 goto err_proxy; 819 goto err_proxy;
817 } 820 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b0297da50304..91a8acc191bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -76,22 +76,53 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); 76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
77} 77}
78 78
79u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 79static unsigned long find_aligned_range(unsigned long *bitmap,
80 u32 start, u32 nbits,
81 int len, int align, u32 skip_mask)
82{
83 unsigned long end, i;
84
85again:
86 start = ALIGN(start, align);
87
88 while ((start < nbits) && (test_bit(start, bitmap) ||
89 (start & skip_mask)))
90 start += align;
91
92 if (start >= nbits)
93 return -1;
94
95 end = start+len;
96 if (end > nbits)
97 return -1;
98
99 for (i = start + 1; i < end; i++) {
100 if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
101 start = i + 1;
102 goto again;
103 }
104 }
105
106 return start;
107}
108
109u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
110 int align, u32 skip_mask)
80{ 111{
81 u32 obj; 112 u32 obj;
82 113
83 if (likely(cnt == 1 && align == 1)) 114 if (likely(cnt == 1 && align == 1 && !skip_mask))
84 return mlx4_bitmap_alloc(bitmap); 115 return mlx4_bitmap_alloc(bitmap);
85 116
86 spin_lock(&bitmap->lock); 117 spin_lock(&bitmap->lock);
87 118
88 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 119 obj = find_aligned_range(bitmap->table, bitmap->last,
89 bitmap->last, cnt, align - 1); 120 bitmap->max, cnt, align, skip_mask);
90 if (obj >= bitmap->max) { 121 if (obj >= bitmap->max) {
91 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 122 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
92 & bitmap->mask; 123 & bitmap->mask;
93 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 124 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
94 0, cnt, align - 1); 125 cnt, align, skip_mask);
95 } 126 }
96 127
97 if (obj < bitmap->max) { 128 if (obj < bitmap->max) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index dccf0e1f86be..c67effb05b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -595,7 +595,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
595 return 0; 595 return 0;
596 } 596 }
597 597
598 err = mlx4_qp_reserve_range(dev, 1, 1, qpn); 598 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0);
599 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 599 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
600 if (err) { 600 if (err) {
601 en_err(priv, "Failed to reserve qp for mac registration\n"); 601 en_err(priv, "Failed to reserve qp for mac registration\n");
@@ -1974,15 +1974,8 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1974{ 1974{
1975 struct mlx4_en_port_profile *prof = priv->prof; 1975 struct mlx4_en_port_profile *prof = priv->prof;
1976 int i; 1976 int i;
1977 int err;
1978 int node; 1977 int node;
1979 1978
1980 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1981 if (err) {
1982 en_err(priv, "failed reserving range for TX rings\n");
1983 return err;
1984 }
1985
1986 /* Create tx Rings */ 1979 /* Create tx Rings */
1987 for (i = 0; i < priv->tx_ring_num; i++) { 1980 for (i = 0; i < priv->tx_ring_num; i++) {
1988 node = cpu_to_node(i % num_online_cpus()); 1981 node = cpu_to_node(i % num_online_cpus());
@@ -1991,7 +1984,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1991 goto err; 1984 goto err;
1992 1985
1993 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1986 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1994 priv->base_tx_qpn + i,
1995 prof->tx_ring_size, TXBB_SIZE, 1987 prof->tx_ring_size, TXBB_SIZE,
1996 node, i)) 1988 node, i))
1997 goto err; 1989 goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 69a2e1b88ea8..a850f24fabdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1131,7 +1131,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1131 int err; 1131 int err;
1132 u32 qpn; 1132 u32 qpn;
1133 1133
1134 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); 1134 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
1135 if (err) { 1135 if (err) {
1136 en_err(priv, "Failed reserving drop qpn\n"); 1136 en_err(priv, "Failed reserving drop qpn\n");
1137 return err; 1137 return err;
@@ -1174,7 +1174,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1174 en_dbg(DRV, priv, "Configuring rss steering\n"); 1174 en_dbg(DRV, priv, "Configuring rss steering\n");
1175 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, 1175 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
1176 priv->rx_ring_num, 1176 priv->rx_ring_num,
1177 &rss_map->base_qpn); 1177 &rss_map->base_qpn, 0);
1178 if (err) { 1178 if (err) {
1179 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); 1179 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1180 return err; 1180 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d0cecbdd9ba8..a308d41e4de0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -46,7 +46,7 @@
46#include "mlx4_en.h" 46#include "mlx4_en.h"
47 47
48int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 48int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
49 struct mlx4_en_tx_ring **pring, int qpn, u32 size, 49 struct mlx4_en_tx_ring **pring, u32 size,
50 u16 stride, int node, int queue_index) 50 u16 stride, int node, int queue_index)
51{ 51{
52 struct mlx4_en_dev *mdev = priv->mdev; 52 struct mlx4_en_dev *mdev = priv->mdev;
@@ -112,11 +112,17 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
112 ring, ring->buf, ring->size, ring->buf_size, 112 ring, ring->buf, ring->size, ring->buf_size,
113 (unsigned long long) ring->wqres.buf.direct.map); 113 (unsigned long long) ring->wqres.buf.direct.map);
114 114
115 ring->qpn = qpn; 115 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
116 MLX4_RESERVE_ETH_BF_QP);
117 if (err) {
118 en_err(priv, "failed reserving qp for TX ring\n");
119 goto err_map;
120 }
121
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); 122 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
117 if (err) { 123 if (err) {
118 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 124 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
119 goto err_map; 125 goto err_reserve;
120 } 126 }
121 ring->qp.event = mlx4_en_sqp_event; 127 ring->qp.event = mlx4_en_sqp_event;
122 128
@@ -143,6 +149,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
143 *pring = ring; 149 *pring = ring;
144 return 0; 150 return 0;
145 151
152err_reserve:
153 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
146err_map: 154err_map:
147 mlx4_en_unmap_buffer(&ring->wqres.buf); 155 mlx4_en_unmap_buffer(&ring->wqres.buf);
148err_hwq_res: 156err_hwq_res:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 5089f76f060b..1469b5b5be64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -266,10 +266,15 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
266#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 266#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
267#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 267#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
268 268
269#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
270
269#define QUERY_FUNC_CAP_FMR_FLAG 0x80 271#define QUERY_FUNC_CAP_FMR_FLAG 0x80
270#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 272#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
271#define QUERY_FUNC_CAP_FLAG_ETH 0x80 273#define QUERY_FUNC_CAP_FLAG_ETH 0x80
272#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 274#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
275#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
276
277#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
273 278
274/* when opcode modifier = 1 */ 279/* when opcode modifier = 1 */
275#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 280#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -339,7 +344,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
339 mlx4_get_active_ports(dev, slave); 344 mlx4_get_active_ports(dev, slave);
340 /* enable rdma and ethernet interfaces, and new quota locations */ 345 /* enable rdma and ethernet interfaces, and new quota locations */
341 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 346 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
342 QUERY_FUNC_CAP_FLAG_QUOTAS); 347 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
343 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 348 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
344 349
345 field = min( 350 field = min(
@@ -401,6 +406,8 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
401 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 406 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
402 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 407 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
403 408
409 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG;
410 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
404 } else 411 } else
405 err = -EINVAL; 412 err = -EINVAL;
406 413
@@ -493,6 +500,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
493 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 500 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
494 func_cap->reserved_eq = size & 0xFFFFFF; 501 func_cap->reserved_eq = size & 0xFFFFFF;
495 502
503 func_cap->extra_flags = 0;
504
505 /* Mailbox data from 0x6c and onward should only be treated if
506 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
507 */
508 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
509 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
510 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
511 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
512 }
513
496 goto out; 514 goto out;
497 } 515 }
498 516
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 475215ee370f..0e910a452b02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -144,6 +144,7 @@ struct mlx4_func_cap {
144 u8 port_flags; 144 u8 port_flags;
145 u8 flags1; 145 u8 flags1;
146 u64 phys_port_id; 146 u64 phys_port_id;
147 u32 extra_flags;
147}; 148};
148 149
149struct mlx4_func { 150struct mlx4_func {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3044f9e623cb..6a9a941ddf58 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -466,8 +466,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
466 mlx4_is_master(dev)) 466 mlx4_is_master(dev))
467 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 467 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
468 468
469 if (!mlx4_is_slave(dev)) 469 if (!mlx4_is_slave(dev)) {
470 mlx4_enable_cqe_eqe_stride(dev); 470 mlx4_enable_cqe_eqe_stride(dev);
471 dev->caps.alloc_res_qp_mask =
472 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0);
473 } else {
474 dev->caps.alloc_res_qp_mask = 0;
475 }
471 476
472 return 0; 477 return 0;
473} 478}
@@ -817,6 +822,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
817 822
818 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 823 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
819 824
825 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
826 dev->caps.bf_reg_size)
827 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
828
820 return 0; 829 return 0;
821 830
822err_mem: 831err_mem:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index b67ef488c30c..6834da6c35ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -884,7 +884,8 @@ extern struct workqueue_struct *mlx4_wq;
884 884
885u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 885u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
886void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); 886void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
887u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 887u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
888 int align, u32 skip_mask);
888void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 889void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
889 int use_rr); 890 int use_rr);
890u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); 891u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
@@ -970,7 +971,7 @@ int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
970 struct mlx4_cmd_mailbox *outbox, 971 struct mlx4_cmd_mailbox *outbox,
971 struct mlx4_cmd_info *cmd); 972 struct mlx4_cmd_info *cmd);
972int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 973int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
973 int *base); 974 int *base, u8 flags);
974void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 975void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
975int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); 976int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
976void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); 977void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ac48a8d91501..944a112dff37 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -778,7 +778,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
778 778
779int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 779int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
780 struct mlx4_en_tx_ring **pring, 780 struct mlx4_en_tx_ring **pring,
781 int qpn, u32 size, u16 stride, 781 u32 size, u16 stride,
782 int node, int queue_index); 782 int node, int queue_index);
783void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 783void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
784 struct mlx4_en_tx_ring **pring); 784 struct mlx4_en_tx_ring **pring);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2301365c79c7..40e82edac99d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -42,6 +42,10 @@
42#include "mlx4.h" 42#include "mlx4.h"
43#include "icm.h" 43#include "icm.h"
44 44
45/* QP to support BF should have bits 6,7 cleared */
46#define MLX4_BF_QP_SKIP_MASK 0xc0
47#define MLX4_MAX_BF_QP_RANGE 0x40
48
45void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) 49void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
46{ 50{
47 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -207,26 +211,36 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
207EXPORT_SYMBOL_GPL(mlx4_qp_modify); 211EXPORT_SYMBOL_GPL(mlx4_qp_modify);
208 212
209int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 213int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
210 int *base) 214 int *base, u8 flags)
211{ 215{
216 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
217
212 struct mlx4_priv *priv = mlx4_priv(dev); 218 struct mlx4_priv *priv = mlx4_priv(dev);
213 struct mlx4_qp_table *qp_table = &priv->qp_table; 219 struct mlx4_qp_table *qp_table = &priv->qp_table;
214 220
215 *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); 221 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
222 return -ENOMEM;
223
224 *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
225 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0);
216 if (*base == -1) 226 if (*base == -1)
217 return -ENOMEM; 227 return -ENOMEM;
218 228
219 return 0; 229 return 0;
220} 230}
221 231
222int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 232int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
233 int *base, u8 flags)
223{ 234{
224 u64 in_param = 0; 235 u64 in_param = 0;
225 u64 out_param; 236 u64 out_param;
226 int err; 237 int err;
227 238
239 /* Turn off all unsupported QP allocation flags */
240 flags &= dev->caps.alloc_res_qp_mask;
241
228 if (mlx4_is_mfunc(dev)) { 242 if (mlx4_is_mfunc(dev)) {
229 set_param_l(&in_param, cnt); 243 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
230 set_param_h(&in_param, align); 244 set_param_h(&in_param, align);
231 err = mlx4_cmd_imm(dev, in_param, &out_param, 245 err = mlx4_cmd_imm(dev, in_param, &out_param,
232 RES_QP, RES_OP_RESERVE, 246 RES_QP, RES_OP_RESERVE,
@@ -238,7 +252,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
238 *base = get_param_l(&out_param); 252 *base = get_param_l(&out_param);
239 return 0; 253 return 0;
240 } 254 }
241 return __mlx4_qp_reserve_range(dev, cnt, align, base); 255 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
242} 256}
243EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 257EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
244 258
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 16f617b5749e..4efbd1eca611 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1543,16 +1543,21 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1543 int align; 1543 int align;
1544 int base; 1544 int base;
1545 int qpn; 1545 int qpn;
1546 u8 flags;
1546 1547
1547 switch (op) { 1548 switch (op) {
1548 case RES_OP_RESERVE: 1549 case RES_OP_RESERVE:
1549 count = get_param_l(&in_param) & 0xffffff; 1550 count = get_param_l(&in_param) & 0xffffff;
1551 /* Turn off all unsupported QP allocation flags that the
1552 * slave tries to set.
1553 */
1554 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1550 align = get_param_h(&in_param); 1555 align = get_param_h(&in_param);
1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1556 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552 if (err) 1557 if (err)
1553 return err; 1558 return err;
1554 1559
1555 err = __mlx4_qp_reserve_range(dev, count, align, &base); 1560 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1556 if (err) { 1561 if (err) {
1557 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1562 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1558 return err; 1563 return err;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3951b5368d7e..272aa258c036 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -195,6 +195,22 @@ enum {
195}; 195};
196 196
197enum { 197enum {
198 MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0
199};
200
201/* bit enums for an 8-bit flags field indicating special use
202 * QPs which require special handling in qp_reserve_range.
203 * Currently, this only includes QPs used by the ETH interface,
204 * where we expect to use blueflame. These QPs must not have
205 * bits 6 and 7 set in their qp number.
206 *
207 * This enum may use only bits 0..7.
208 */
209enum {
210 MLX4_RESERVE_ETH_BF_QP = 1 << 7,
211};
212
213enum {
198 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, 214 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
199 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1, 215 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
200 MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2, 216 MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
@@ -501,6 +517,7 @@ struct mlx4_caps {
501 u64 phys_port_id[MLX4_MAX_PORTS + 1]; 517 u64 phys_port_id[MLX4_MAX_PORTS + 1];
502 int tunnel_offload_mode; 518 int tunnel_offload_mode;
503 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; 519 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
520 u8 alloc_res_qp_mask;
504}; 521};
505 522
506struct mlx4_buf_list { 523struct mlx4_buf_list {
@@ -950,8 +967,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
950 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 967 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
951 unsigned vector, int collapsed, int timestamp_en); 968 unsigned vector, int collapsed, int timestamp_en);
952void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 969void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
953 970int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
954int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); 971 int *base, u8 flags);
955void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 972void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
956 973
957int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 974int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,