aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-13 15:16:28 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-13 15:16:28 -0500
commit64bb7e9949c03bff9463c40bfa740f611fb5500d (patch)
tree6a4651e46bf83c8b6c2404ca1e0890c23511b81b
parent9cf5476bfda3c2f1d5712d7bf09b3dad91fc2f2d (diff)
parentde966c5928026b100a989c8cef761d306310a184 (diff)
Merge branch 'mlx4-next'
Or Gerlitz says: ==================== mlx4: Flexible (asymmetric) allocation of EQs and MSI-X vectors This series from Matan Barak is built as follows: The 1st two patches fix small bugs w.r.t firmware spec. Next are two patches which do more re-factoring of the init/fini flow and a patch that adds support for the QUERY_FUNC firmware command, these are all pre-steps for the major patch of the series. In this patch (#6) we change the order of talking/querying the firmware and enabling SRIOV. This allows to remote worst-case assumption w.r.t the number of available MSI-X vectors and EQs per function. The last patch easily enjoys this ordering change, to enable supports > 64 VFs over a firmware that allows for that. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c19
-rw-r--r--include/linux/mlx4/device.h7
9 files changed, 440 insertions, 119 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8b72cf392b34..0c3375524a64 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1975,8 +1975,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1975 dev->caps.num_ports > dev->caps.comp_pool) 1975 dev->caps.num_ports > dev->caps.comp_pool)
1976 return; 1976 return;
1977 1977
1978 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ 1978 eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
1979 dev->caps.num_ports);
1980 1979
1981 /* Init eq table */ 1980 /* Init eq table */
1982 added_eqs = 0; 1981 added_eqs = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 3c05e5878b49..5c93d1451c44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2117,50 +2117,52 @@ err_vhcr:
2117int mlx4_cmd_init(struct mlx4_dev *dev) 2117int mlx4_cmd_init(struct mlx4_dev *dev)
2118{ 2118{
2119 struct mlx4_priv *priv = mlx4_priv(dev); 2119 struct mlx4_priv *priv = mlx4_priv(dev);
2120 int flags = 0;
2121
2122 if (!priv->cmd.initialized) {
2123 mutex_init(&priv->cmd.hcr_mutex);
2124 mutex_init(&priv->cmd.slave_cmd_mutex);
2125 sema_init(&priv->cmd.poll_sem, 1);
2126 priv->cmd.use_events = 0;
2127 priv->cmd.toggle = 1;
2128 priv->cmd.initialized = 1;
2129 flags |= MLX4_CMD_CLEANUP_STRUCT;
2130 }
2120 2131
2121 mutex_init(&priv->cmd.hcr_mutex); 2132 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2122 mutex_init(&priv->cmd.slave_cmd_mutex);
2123 sema_init(&priv->cmd.poll_sem, 1);
2124 priv->cmd.use_events = 0;
2125 priv->cmd.toggle = 1;
2126
2127 priv->cmd.hcr = NULL;
2128 priv->mfunc.vhcr = NULL;
2129
2130 if (!mlx4_is_slave(dev)) {
2131 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2133 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2132 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2134 MLX4_HCR_BASE, MLX4_HCR_SIZE);
2133 if (!priv->cmd.hcr) { 2135 if (!priv->cmd.hcr) {
2134 mlx4_err(dev, "Couldn't map command register\n"); 2136 mlx4_err(dev, "Couldn't map command register\n");
2135 return -ENOMEM; 2137 goto err;
2136 } 2138 }
2139 flags |= MLX4_CMD_CLEANUP_HCR;
2137 } 2140 }
2138 2141
2139 if (mlx4_is_mfunc(dev)) { 2142 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2140 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 2143 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
2141 &priv->mfunc.vhcr_dma, 2144 &priv->mfunc.vhcr_dma,
2142 GFP_KERNEL); 2145 GFP_KERNEL);
2143 if (!priv->mfunc.vhcr) 2146 if (!priv->mfunc.vhcr)
2144 goto err_hcr; 2147 goto err;
2148
2149 flags |= MLX4_CMD_CLEANUP_VHCR;
2145 } 2150 }
2146 2151
2147 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 2152 if (!priv->cmd.pool) {
2148 MLX4_MAILBOX_SIZE, 2153 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
2149 MLX4_MAILBOX_SIZE, 0); 2154 MLX4_MAILBOX_SIZE,
2150 if (!priv->cmd.pool) 2155 MLX4_MAILBOX_SIZE, 0);
2151 goto err_vhcr; 2156 if (!priv->cmd.pool)
2157 goto err;
2152 2158
2153 return 0; 2159 flags |= MLX4_CMD_CLEANUP_POOL;
2160 }
2154 2161
2155err_vhcr: 2162 return 0;
2156 if (mlx4_is_mfunc(dev))
2157 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2158 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2159 priv->mfunc.vhcr = NULL;
2160 2163
2161err_hcr: 2164err:
2162 if (!mlx4_is_slave(dev)) 2165 mlx4_cmd_cleanup(dev, flags);
2163 iounmap(priv->cmd.hcr);
2164 return -ENOMEM; 2166 return -ENOMEM;
2165} 2167}
2166 2168
@@ -2184,18 +2186,28 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2184 iounmap(priv->mfunc.comm); 2186 iounmap(priv->mfunc.comm);
2185} 2187}
2186 2188
2187void mlx4_cmd_cleanup(struct mlx4_dev *dev) 2189void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2188{ 2190{
2189 struct mlx4_priv *priv = mlx4_priv(dev); 2191 struct mlx4_priv *priv = mlx4_priv(dev);
2190 2192
2191 pci_pool_destroy(priv->cmd.pool); 2193 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2194 pci_pool_destroy(priv->cmd.pool);
2195 priv->cmd.pool = NULL;
2196 }
2192 2197
2193 if (!mlx4_is_slave(dev)) 2198 if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2199 (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2194 iounmap(priv->cmd.hcr); 2200 iounmap(priv->cmd.hcr);
2195 if (mlx4_is_mfunc(dev)) 2201 priv->cmd.hcr = NULL;
2202 }
2203 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2204 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2196 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2205 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2197 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2206 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2198 priv->mfunc.vhcr = NULL; 2207 priv->mfunc.vhcr = NULL;
2208 }
2209 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2210 priv->cmd.initialized = 0;
2199} 2211}
2200 2212
2201/* 2213/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 49290a405903..d68b264cee4d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1123,8 +1123,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1123 goto err_out_free; 1123 goto err_out_free;
1124 } 1124 }
1125 1125
1126 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 1126 err = mlx4_bitmap_init(&priv->eq_table.bitmap,
1127 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 1127 roundup_pow_of_two(dev->caps.num_eqs),
1128 dev->caps.num_eqs - 1,
1129 dev->caps.reserved_eqs,
1130 roundup_pow_of_two(dev->caps.num_eqs) -
1131 dev->caps.num_eqs);
1128 if (err) 1132 if (err)
1129 goto err_out_free; 1133 goto err_out_free;
1130 1134
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d6dba77ae4ba..4251f81a0275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -142,7 +142,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
142 [13] = "Large cache line (>64B) EQE stride support", 142 [13] = "Large cache line (>64B) EQE stride support",
143 [14] = "Ethernet protocol control support", 143 [14] = "Ethernet protocol control support",
144 [15] = "Ethernet Backplane autoneg support", 144 [15] = "Ethernet Backplane autoneg support",
145 [16] = "CONFIG DEV support" 145 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support"
146 }; 148 };
147 int i; 149 int i;
148 150
@@ -177,6 +179,61 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
177 return err; 179 return err;
178} 180}
179 181
182int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
183{
184 struct mlx4_cmd_mailbox *mailbox;
185 u32 *outbox;
186 u8 in_modifier;
187 u8 field;
188 u16 field16;
189 int err;
190
191#define QUERY_FUNC_BUS_OFFSET 0x00
192#define QUERY_FUNC_DEVICE_OFFSET 0x01
193#define QUERY_FUNC_FUNCTION_OFFSET 0x01
194#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
195#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
196#define QUERY_FUNC_MAX_EQ_OFFSET 0x06
197#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
198
199 mailbox = mlx4_alloc_cmd_mailbox(dev);
200 if (IS_ERR(mailbox))
201 return PTR_ERR(mailbox);
202 outbox = mailbox->buf;
203
204 in_modifier = slave;
205
206 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
207 MLX4_CMD_QUERY_FUNC,
208 MLX4_CMD_TIME_CLASS_A,
209 MLX4_CMD_NATIVE);
210 if (err)
211 goto out;
212
213 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
214 func->bus = field & 0xf;
215 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
216 func->device = field & 0xf1;
217 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
218 func->function = field & 0x7;
219 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
220 func->physical_function = field & 0xf;
221 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
222 func->rsvd_eqs = field16 & 0xffff;
223 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
224 func->max_eq = field16 & 0xffff;
225 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
226 func->rsvd_uars = field & 0x0f;
227
228 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
229 func->bus, func->device, func->function, func->physical_function,
230 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
231
232out:
233 mlx4_free_cmd_mailbox(dev, mailbox);
234 return err;
235}
236
180int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 237int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
181 struct mlx4_vhcr *vhcr, 238 struct mlx4_vhcr *vhcr,
182 struct mlx4_cmd_mailbox *inbox, 239 struct mlx4_cmd_mailbox *inbox,
@@ -187,6 +244,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
187 u8 field, port; 244 u8 field, port;
188 u32 size, proxy_qp, qkey; 245 u32 size, proxy_qp, qkey;
189 int err = 0; 246 int err = 0;
247 struct mlx4_func func;
190 248
191#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 249#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
192#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 250#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
@@ -231,6 +289,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
231#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 289#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
232 290
233#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 291#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
292#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
234 293
235 if (vhcr->op_modifier == 1) { 294 if (vhcr->op_modifier == 1) {
236 struct mlx4_active_ports actv_ports = 295 struct mlx4_active_ports actv_ports =
@@ -309,11 +368,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
309 size = dev->caps.num_cqs; 368 size = dev->caps.num_cqs;
310 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 369 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
311 370
312 size = dev->caps.num_eqs; 371 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
313 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 372 mlx4_QUERY_FUNC(dev, &func, slave)) {
314 373 size = vhcr->in_modifier &
315 size = dev->caps.reserved_eqs; 374 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
316 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 375 dev->caps.num_eqs :
376 rounddown_pow_of_two(dev->caps.num_eqs);
377 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
378 size = dev->caps.reserved_eqs;
379 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
380 } else {
381 size = vhcr->in_modifier &
382 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
383 func.max_eq :
384 rounddown_pow_of_two(func.max_eq);
385 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
386 size = func.rsvd_eqs;
387 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
388 }
317 389
318 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 390 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
319 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 391 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
@@ -335,7 +407,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
335 return err; 407 return err;
336} 408}
337 409
338int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, 410int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
339 struct mlx4_func_cap *func_cap) 411 struct mlx4_func_cap *func_cap)
340{ 412{
341 struct mlx4_cmd_mailbox *mailbox; 413 struct mlx4_cmd_mailbox *mailbox;
@@ -343,14 +415,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
343 u8 field, op_modifier; 415 u8 field, op_modifier;
344 u32 size, qkey; 416 u32 size, qkey;
345 int err = 0, quotas = 0; 417 int err = 0, quotas = 0;
418 u32 in_modifier;
346 419
347 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 420 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
421 in_modifier = op_modifier ? gen_or_port :
422 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
348 423
349 mailbox = mlx4_alloc_cmd_mailbox(dev); 424 mailbox = mlx4_alloc_cmd_mailbox(dev);
350 if (IS_ERR(mailbox)) 425 if (IS_ERR(mailbox))
351 return PTR_ERR(mailbox); 426 return PTR_ERR(mailbox);
352 427
353 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, 428 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
354 MLX4_CMD_QUERY_FUNC_CAP, 429 MLX4_CMD_QUERY_FUNC_CAP,
355 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 430 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
356 if (err) 431 if (err)
@@ -522,6 +597,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
522#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 597#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
523#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 598#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
524#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 599#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
600#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
525#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 601#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
526#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 602#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
527#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 603#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
@@ -611,7 +687,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
611 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 687 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
612 dev_cap->max_mpts = 1 << (field & 0x3f); 688 dev_cap->max_mpts = 1 << (field & 0x3f);
613 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 689 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
614 dev_cap->reserved_eqs = field & 0xf; 690 dev_cap->reserved_eqs = 1 << (field & 0xf);
615 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 691 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
616 dev_cap->max_eqs = 1 << (field & 0xf); 692 dev_cap->max_eqs = 1 << (field & 0xf);
617 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 693 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
@@ -622,6 +698,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
622 dev_cap->reserved_mrws = 1 << (field & 0xf); 698 dev_cap->reserved_mrws = 1 << (field & 0xf);
623 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); 699 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
624 dev_cap->max_mtt_seg = 1 << (field & 0x3f); 700 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
701 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
702 dev_cap->num_sys_eqs = size & 0xfff;
625 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 703 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
626 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 704 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
627 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 705 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
@@ -783,6 +861,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
783 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 861 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
784 if (field32 & (1 << 20)) 862 if (field32 & (1 << 20))
785 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 863 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
864 if (field32 & (1 << 21))
865 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
786 866
787 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 867 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
788 for (i = 1; i <= dev_cap->num_ports; ++i) { 868 for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -849,8 +929,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
849 * we can't use any EQs whose doorbell falls on that page, 929 * we can't use any EQs whose doorbell falls on that page,
850 * even if the EQ itself isn't reserved. 930 * even if the EQ itself isn't reserved.
851 */ 931 */
852 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 932 if (dev_cap->num_sys_eqs == 0)
853 dev_cap->reserved_eqs); 933 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
934 dev_cap->reserved_eqs);
935 else
936 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
854 937
855 mlx4_dbg(dev, "Max ICM size %lld MB\n", 938 mlx4_dbg(dev, "Max ICM size %lld MB\n",
856 (unsigned long long) dev_cap->max_icm_sz >> 20); 939 (unsigned long long) dev_cap->max_icm_sz >> 20);
@@ -860,8 +943,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
860 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 943 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
861 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 944 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
862 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 945 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
863 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 946 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
864 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); 947 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
948 dev_cap->eqc_entry_sz);
865 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 949 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
866 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 950 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
867 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 951 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
@@ -1407,6 +1491,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1407#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1491#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1408#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1492#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1409#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1493#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1494#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1410#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1495#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1411#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1496#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1412#define INIT_HCA_MCAST_OFFSET 0x0c0 1497#define INIT_HCA_MCAST_OFFSET 0x0c0
@@ -1510,6 +1595,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1510 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1595 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1511 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1596 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1512 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1597 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1598 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1513 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1599 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1514 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1600 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1515 1601
@@ -1620,6 +1706,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1620 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 1706 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1621 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 1707 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1622 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 1708 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1709 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1623 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1710 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1624 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1711 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1625 1712
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 9b835aecac96..475215ee370f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -56,6 +56,7 @@ struct mlx4_dev_cap {
56 int max_mpts; 56 int max_mpts;
57 int reserved_eqs; 57 int reserved_eqs;
58 int max_eqs; 58 int max_eqs;
59 int num_sys_eqs;
59 int reserved_mtts; 60 int reserved_mtts;
60 int max_mrw_sz; 61 int max_mrw_sz;
61 int reserved_mrws; 62 int reserved_mrws;
@@ -145,6 +146,16 @@ struct mlx4_func_cap {
145 u64 phys_port_id; 146 u64 phys_port_id;
146}; 147};
147 148
149struct mlx4_func {
150 int bus;
151 int device;
152 int function;
153 int physical_function;
154 int rsvd_eqs;
155 int max_eq;
156 int rsvd_uars;
157};
158
148struct mlx4_adapter { 159struct mlx4_adapter {
149 char board_id[MLX4_BOARD_ID_LEN]; 160 char board_id[MLX4_BOARD_ID_LEN];
150 u8 inta_pin; 161 u8 inta_pin;
@@ -170,6 +181,7 @@ struct mlx4_init_hca_param {
170 u8 log_num_srqs; 181 u8 log_num_srqs;
171 u8 log_num_cqs; 182 u8 log_num_cqs;
172 u8 log_num_eqs; 183 u8 log_num_eqs;
184 u16 num_sys_eqs;
173 u8 log_rd_per_qp; 185 u8 log_rd_per_qp;
174 u8 log_mc_table_sz; 186 u8 log_mc_table_sz;
175 u8 log_mpt_sz; 187 u8 log_mpt_sz;
@@ -204,13 +216,14 @@ struct mlx4_set_ib_param {
204}; 216};
205 217
206int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); 218int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
207int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, 219int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
208 struct mlx4_func_cap *func_cap); 220 struct mlx4_func_cap *func_cap);
209int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 221int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
210 struct mlx4_vhcr *vhcr, 222 struct mlx4_vhcr *vhcr,
211 struct mlx4_cmd_mailbox *inbox, 223 struct mlx4_cmd_mailbox *inbox,
212 struct mlx4_cmd_mailbox *outbox, 224 struct mlx4_cmd_mailbox *outbox,
213 struct mlx4_cmd_info *cmd); 225 struct mlx4_cmd_info *cmd);
226int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave);
214int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); 227int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
215int mlx4_UNMAP_FA(struct mlx4_dev *dev); 228int mlx4_UNMAP_FA(struct mlx4_dev *dev);
216int mlx4_RUN_FW(struct mlx4_dev *dev); 229int mlx4_RUN_FW(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2f6ba420ac03..3044f9e623cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -197,6 +197,29 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
197 dev->caps.port_mask[i] = dev->caps.port_type[i]; 197 dev->caps.port_mask[i] = dev->caps.port_type[i];
198} 198}
199 199
200enum {
201 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
202};
203
204static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
205{
206 int err = 0;
207 struct mlx4_func func;
208
209 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
210 err = mlx4_QUERY_FUNC(dev, &func, 0);
211 if (err) {
212 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
213 return err;
214 }
215 dev_cap->max_eqs = func.max_eq;
216 dev_cap->reserved_eqs = func.rsvd_eqs;
217 dev_cap->reserved_uars = func.rsvd_uars;
218 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
219 }
220 return err;
221}
222
200static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 223static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
201{ 224{
202 struct mlx4_caps *dev_cap = &dev->caps; 225 struct mlx4_caps *dev_cap = &dev->caps;
@@ -261,7 +284,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
261 } 284 }
262 285
263 dev->caps.num_ports = dev_cap->num_ports; 286 dev->caps.num_ports = dev_cap->num_ports;
264 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 287 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
288 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
289 dev->caps.num_sys_eqs :
290 MLX4_MAX_EQ_NUM;
265 for (i = 1; i <= dev->caps.num_ports; ++i) { 291 for (i = 1; i <= dev->caps.num_ports; ++i) {
266 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 292 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
267 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 293 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -631,7 +657,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
631 struct mlx4_dev_cap dev_cap; 657 struct mlx4_dev_cap dev_cap;
632 struct mlx4_func_cap func_cap; 658 struct mlx4_func_cap func_cap;
633 struct mlx4_init_hca_param hca_param; 659 struct mlx4_init_hca_param hca_param;
634 int i; 660 u8 i;
635 661
636 memset(&hca_param, 0, sizeof(hca_param)); 662 memset(&hca_param, 0, sizeof(hca_param));
637 err = mlx4_QUERY_HCA(dev, &hca_param); 663 err = mlx4_QUERY_HCA(dev, &hca_param);
@@ -732,7 +758,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
732 } 758 }
733 759
734 for (i = 1; i <= dev->caps.num_ports; ++i) { 760 for (i = 1; i <= dev->caps.num_ports; ++i) {
735 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 761 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
736 if (err) { 762 if (err) {
737 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 763 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
738 i, err); 764 i, err);
@@ -1130,8 +1156,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1130 if (err) 1156 if (err)
1131 goto err_srq; 1157 goto err_srq;
1132 1158
1133 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 1159 num_eqs = dev->phys_caps.num_phys_eqs;
1134 dev->caps.num_eqs;
1135 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1160 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1136 cmpt_base + 1161 cmpt_base +
1137 ((u64) (MLX4_CMPT_TYPE_EQ * 1162 ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -1193,8 +1218,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1193 } 1218 }
1194 1219
1195 1220
1196 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 1221 num_eqs = dev->phys_caps.num_phys_eqs;
1197 dev->caps.num_eqs;
1198 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1222 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1199 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1223 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1200 num_eqs, num_eqs, 0, 0); 1224 num_eqs, num_eqs, 0, 0);
@@ -1473,6 +1497,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev)
1473 else { 1497 else {
1474 mlx4_CLOSE_HCA(dev, 0); 1498 mlx4_CLOSE_HCA(dev, 0);
1475 mlx4_free_icms(dev); 1499 mlx4_free_icms(dev);
1500 }
1501}
1502
1503static void mlx4_close_fw(struct mlx4_dev *dev)
1504{
1505 if (!mlx4_is_slave(dev)) {
1476 mlx4_UNMAP_FA(dev); 1506 mlx4_UNMAP_FA(dev);
1477 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1507 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1478 } 1508 }
@@ -1619,17 +1649,10 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1619 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1649 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1620} 1650}
1621 1651
1622static int mlx4_init_hca(struct mlx4_dev *dev) 1652static int mlx4_init_fw(struct mlx4_dev *dev)
1623{ 1653{
1624 struct mlx4_priv *priv = mlx4_priv(dev);
1625 struct mlx4_adapter adapter;
1626 struct mlx4_dev_cap dev_cap;
1627 struct mlx4_mod_stat_cfg mlx4_cfg; 1654 struct mlx4_mod_stat_cfg mlx4_cfg;
1628 struct mlx4_profile profile; 1655 int err = 0;
1629 struct mlx4_init_hca_param init_hca;
1630 u64 icm_size;
1631 int err;
1632 struct mlx4_config_dev_params params;
1633 1656
1634 if (!mlx4_is_slave(dev)) { 1657 if (!mlx4_is_slave(dev)) {
1635 err = mlx4_QUERY_FW(dev); 1658 err = mlx4_QUERY_FW(dev);
@@ -1652,7 +1675,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1652 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1675 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1653 if (err) 1676 if (err)
1654 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1677 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1678 }
1655 1679
1680 return err;
1681}
1682
1683static int mlx4_init_hca(struct mlx4_dev *dev)
1684{
1685 struct mlx4_priv *priv = mlx4_priv(dev);
1686 struct mlx4_adapter adapter;
1687 struct mlx4_dev_cap dev_cap;
1688 struct mlx4_profile profile;
1689 struct mlx4_init_hca_param init_hca;
1690 u64 icm_size;
1691 struct mlx4_config_dev_params params;
1692 int err;
1693
1694 if (!mlx4_is_slave(dev)) {
1656 err = mlx4_dev_cap(dev, &dev_cap); 1695 err = mlx4_dev_cap(dev, &dev_cap);
1657 if (err) { 1696 if (err) {
1658 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1697 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
@@ -1704,6 +1743,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1704 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 1743 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1705 goto err_free_icm; 1744 goto err_free_icm;
1706 } 1745 }
1746
1747 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
1748 err = mlx4_query_func(dev, &dev_cap);
1749 if (err < 0) {
1750 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
1751 goto err_stop_fw;
1752 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
1753 dev->caps.num_eqs = dev_cap.max_eqs;
1754 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
1755 dev->caps.reserved_uars = dev_cap.reserved_uars;
1756 }
1757 }
1758
1707 /* 1759 /*
1708 * If TS is supported by FW 1760 * If TS is supported by FW
1709 * read HCA frequency by QUERY_HCA command 1761 * read HCA frequency by QUERY_HCA command
@@ -2070,12 +2122,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2070{ 2122{
2071 struct mlx4_priv *priv = mlx4_priv(dev); 2123 struct mlx4_priv *priv = mlx4_priv(dev);
2072 struct msix_entry *entries; 2124 struct msix_entry *entries;
2073 int nreq = min_t(int, dev->caps.num_ports *
2074 min_t(int, num_online_cpus() + 1,
2075 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
2076 int i; 2125 int i;
2077 2126
2078 if (msi_x) { 2127 if (msi_x) {
2128 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
2129
2079 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2130 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2080 nreq); 2131 nreq);
2081 2132
@@ -2275,6 +2326,71 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
2275 iounmap(owner); 2326 iounmap(owner);
2276} 2327}
2277 2328
2329#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2330 !!((flags) & MLX4_FLAG_MASTER))
2331
2332static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2333 u8 total_vfs, int existing_vfs)
2334{
2335 u64 dev_flags = dev->flags;
2336
2337 dev->dev_vfs = kzalloc(
2338 total_vfs * sizeof(*dev->dev_vfs),
2339 GFP_KERNEL);
2340 if (NULL == dev->dev_vfs) {
2341 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2342 goto disable_sriov;
2343 } else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
2344 int err = 0;
2345
2346 atomic_inc(&pf_loading);
2347 if (existing_vfs) {
2348 if (existing_vfs != total_vfs)
2349 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2350 existing_vfs, total_vfs);
2351 } else {
2352 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
2353 err = pci_enable_sriov(pdev, total_vfs);
2354 }
2355 if (err) {
2356 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2357 err);
2358 atomic_dec(&pf_loading);
2359 goto disable_sriov;
2360 } else {
2361 mlx4_warn(dev, "Running in master mode\n");
2362 dev_flags |= MLX4_FLAG_SRIOV |
2363 MLX4_FLAG_MASTER;
2364 dev_flags &= ~MLX4_FLAG_SLAVE;
2365 dev->num_vfs = total_vfs;
2366 }
2367 }
2368 return dev_flags;
2369
2370disable_sriov:
2371 dev->num_vfs = 0;
2372 kfree(dev->dev_vfs);
2373 return dev_flags & ~MLX4_FLAG_MASTER;
2374}
2375
2376enum {
2377 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
2378};
2379
2380static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
2381 int *nvfs)
2382{
2383 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
2384 /* Checking for 64 VFs as a limitation of CX2 */
2385 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
2386 requested_vfs >= 64) {
2387 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
2388 requested_vfs);
2389 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
2390 }
2391 return 0;
2392}
2393
2278static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2394static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2279 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2395 int total_vfs, int *nvfs, struct mlx4_priv *priv)
2280{ 2396{
@@ -2283,6 +2399,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2283 int err; 2399 int err;
2284 int port; 2400 int port;
2285 int i; 2401 int i;
2402 struct mlx4_dev_cap *dev_cap = NULL;
2286 int existing_vfs = 0; 2403 int existing_vfs = 0;
2287 2404
2288 dev = &priv->dev; 2405 dev = &priv->dev;
@@ -2319,40 +2436,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2319 } 2436 }
2320 } 2437 }
2321 2438
2322 if (total_vfs) {
2323 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2324 total_vfs);
2325 dev->dev_vfs = kzalloc(
2326 total_vfs * sizeof(*dev->dev_vfs),
2327 GFP_KERNEL);
2328 if (NULL == dev->dev_vfs) {
2329 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2330 err = -ENOMEM;
2331 goto err_free_own;
2332 } else {
2333 atomic_inc(&pf_loading);
2334 existing_vfs = pci_num_vf(pdev);
2335 if (existing_vfs) {
2336 err = 0;
2337 if (existing_vfs != total_vfs)
2338 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2339 existing_vfs, total_vfs);
2340 } else {
2341 err = pci_enable_sriov(pdev, total_vfs);
2342 }
2343 if (err) {
2344 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2345 err);
2346 atomic_dec(&pf_loading);
2347 } else {
2348 mlx4_warn(dev, "Running in master mode\n");
2349 dev->flags |= MLX4_FLAG_SRIOV |
2350 MLX4_FLAG_MASTER;
2351 dev->num_vfs = total_vfs;
2352 }
2353 }
2354 }
2355
2356 atomic_set(&priv->opreq_count, 0); 2439 atomic_set(&priv->opreq_count, 0);
2357 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2440 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2358 2441
@@ -2366,6 +2449,12 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2366 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2449 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2367 goto err_sriov; 2450 goto err_sriov;
2368 } 2451 }
2452
2453 if (total_vfs) {
2454 existing_vfs = pci_num_vf(pdev);
2455 dev->flags = MLX4_FLAG_MASTER;
2456 dev->num_vfs = total_vfs;
2457 }
2369 } 2458 }
2370 2459
2371slave_start: 2460slave_start:
@@ -2379,9 +2468,10 @@ slave_start:
2379 * before posting commands. Also, init num_slaves before calling 2468 * before posting commands. Also, init num_slaves before calling
2380 * mlx4_init_hca */ 2469 * mlx4_init_hca */
2381 if (mlx4_is_mfunc(dev)) { 2470 if (mlx4_is_mfunc(dev)) {
2382 if (mlx4_is_master(dev)) 2471 if (mlx4_is_master(dev)) {
2383 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2472 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2384 else { 2473
2474 } else {
2385 dev->num_slaves = 0; 2475 dev->num_slaves = 0;
2386 err = mlx4_multi_func_init(dev); 2476 err = mlx4_multi_func_init(dev);
2387 if (err) { 2477 if (err) {
@@ -2391,17 +2481,109 @@ slave_start:
2391 } 2481 }
2392 } 2482 }
2393 2483
2484 err = mlx4_init_fw(dev);
2485 if (err) {
2486 mlx4_err(dev, "Failed to init fw, aborting.\n");
2487 goto err_mfunc;
2488 }
2489
2490 if (mlx4_is_master(dev)) {
2491 if (!dev_cap) {
2492 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
2493
2494 if (!dev_cap) {
2495 err = -ENOMEM;
2496 goto err_fw;
2497 }
2498
2499 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2500 if (err) {
2501 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2502 goto err_fw;
2503 }
2504
2505 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2506 goto err_fw;
2507
2508 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2509 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
2510 existing_vfs);
2511
2512 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2513 dev->flags = dev_flags;
2514 if (!SRIOV_VALID_STATE(dev->flags)) {
2515 mlx4_err(dev, "Invalid SRIOV state\n");
2516 goto err_sriov;
2517 }
2518 err = mlx4_reset(dev);
2519 if (err) {
2520 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2521 goto err_sriov;
2522 }
2523 goto slave_start;
2524 }
2525 } else {
2526 /* Legacy mode FW requires SRIOV to be enabled before
2527 * doing QUERY_DEV_CAP, since max_eq's value is different if
2528 * SRIOV is enabled.
2529 */
2530 memset(dev_cap, 0, sizeof(*dev_cap));
2531 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2532 if (err) {
2533 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2534 goto err_fw;
2535 }
2536
2537 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2538 goto err_fw;
2539 }
2540 }
2541
2394 err = mlx4_init_hca(dev); 2542 err = mlx4_init_hca(dev);
2395 if (err) { 2543 if (err) {
2396 if (err == -EACCES) { 2544 if (err == -EACCES) {
2397 /* Not primary Physical function 2545 /* Not primary Physical function
2398 * Running in slave mode */ 2546 * Running in slave mode */
2399 mlx4_cmd_cleanup(dev); 2547 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2548 /* We're not a PF */
2549 if (dev->flags & MLX4_FLAG_SRIOV) {
2550 if (!existing_vfs)
2551 pci_disable_sriov(pdev);
2552 if (mlx4_is_master(dev))
2553 atomic_dec(&pf_loading);
2554 dev->flags &= ~MLX4_FLAG_SRIOV;
2555 }
2556 if (!mlx4_is_slave(dev))
2557 mlx4_free_ownership(dev);
2400 dev->flags |= MLX4_FLAG_SLAVE; 2558 dev->flags |= MLX4_FLAG_SLAVE;
2401 dev->flags &= ~MLX4_FLAG_MASTER; 2559 dev->flags &= ~MLX4_FLAG_MASTER;
2402 goto slave_start; 2560 goto slave_start;
2403 } else 2561 } else
2404 goto err_mfunc; 2562 goto err_fw;
2563 }
2564
2565 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2566 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
2567
2568 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
2569 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
2570 dev->flags = dev_flags;
2571 err = mlx4_cmd_init(dev);
2572 if (err) {
2573 /* Only VHCR is cleaned up, so could still
2574 * send FW commands
2575 */
2576 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
2577 goto err_close;
2578 }
2579 } else {
2580 dev->flags = dev_flags;
2581 }
2582
2583 if (!SRIOV_VALID_STATE(dev->flags)) {
2584 mlx4_err(dev, "Invalid SRIOV state\n");
2585 goto err_close;
2586 }
2405 } 2587 }
2406 2588
2407 /* check if the device is functioning at its maximum possible speed. 2589 /* check if the device is functioning at its maximum possible speed.
@@ -2556,12 +2738,15 @@ err_master_mfunc:
2556err_close: 2738err_close:
2557 mlx4_close_hca(dev); 2739 mlx4_close_hca(dev);
2558 2740
2741err_fw:
2742 mlx4_close_fw(dev);
2743
2559err_mfunc: 2744err_mfunc:
2560 if (mlx4_is_slave(dev)) 2745 if (mlx4_is_slave(dev))
2561 mlx4_multi_func_cleanup(dev); 2746 mlx4_multi_func_cleanup(dev);
2562 2747
2563err_cmd: 2748err_cmd:
2564 mlx4_cmd_cleanup(dev); 2749 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2565 2750
2566err_sriov: 2751err_sriov:
2567 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) 2752 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
@@ -2572,10 +2757,10 @@ err_sriov:
2572 2757
2573 kfree(priv->dev.dev_vfs); 2758 kfree(priv->dev.dev_vfs);
2574 2759
2575err_free_own:
2576 if (!mlx4_is_slave(dev)) 2760 if (!mlx4_is_slave(dev))
2577 mlx4_free_ownership(dev); 2761 mlx4_free_ownership(dev);
2578 2762
2763 kfree(dev_cap);
2579 return err; 2764 return err;
2580} 2765}
2581 2766
@@ -2803,15 +2988,17 @@ static void mlx4_unload_one(struct pci_dev *pdev)
2803 if (mlx4_is_master(dev)) 2988 if (mlx4_is_master(dev))
2804 mlx4_multi_func_cleanup(dev); 2989 mlx4_multi_func_cleanup(dev);
2805 mlx4_close_hca(dev); 2990 mlx4_close_hca(dev);
2991 mlx4_close_fw(dev);
2806 if (mlx4_is_slave(dev)) 2992 if (mlx4_is_slave(dev))
2807 mlx4_multi_func_cleanup(dev); 2993 mlx4_multi_func_cleanup(dev);
2808 mlx4_cmd_cleanup(dev); 2994 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2809 2995
2810 if (dev->flags & MLX4_FLAG_MSI_X) 2996 if (dev->flags & MLX4_FLAG_MSI_X)
2811 pci_disable_msix(pdev); 2997 pci_disable_msix(pdev);
2812 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 2998 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
2813 mlx4_warn(dev, "Disabling SR-IOV\n"); 2999 mlx4_warn(dev, "Disabling SR-IOV\n");
2814 pci_disable_sriov(pdev); 3000 pci_disable_sriov(pdev);
3001 dev->flags &= ~MLX4_FLAG_SRIOV;
2815 dev->num_vfs = 0; 3002 dev->num_vfs = 0;
2816 } 3003 }
2817 3004
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f8fc7bd6f48b..f48e7c3eecf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -606,6 +606,7 @@ struct mlx4_cmd {
606 u8 use_events; 606 u8 use_events;
607 u8 toggle; 607 u8 toggle;
608 u8 comm_toggle; 608 u8 comm_toggle;
609 u8 initialized;
609}; 610};
610 611
611enum { 612enum {
@@ -1126,8 +1127,16 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
1126 1127
1127int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); 1128int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
1128 1129
1130enum {
1131 MLX4_CMD_CLEANUP_STRUCT = 1UL << 0,
1132 MLX4_CMD_CLEANUP_POOL = 1UL << 1,
1133 MLX4_CMD_CLEANUP_HCR = 1UL << 2,
1134 MLX4_CMD_CLEANUP_VHCR = 1UL << 3,
1135 MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1
1136};
1137
1129int mlx4_cmd_init(struct mlx4_dev *dev); 1138int mlx4_cmd_init(struct mlx4_dev *dev);
1130void mlx4_cmd_cleanup(struct mlx4_dev *dev); 1139void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
1131int mlx4_multi_func_init(struct mlx4_dev *dev); 1140int mlx4_multi_func_init(struct mlx4_dev *dev);
1132void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1141void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
1133void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 1142void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 14089d9e1667..2bf437aafc53 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,8 +126,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
126 profile[MLX4_RES_AUXC].num = request->num_qp; 126 profile[MLX4_RES_AUXC].num = request->num_qp;
127 profile[MLX4_RES_SRQ].num = request->num_srq; 127 profile[MLX4_RES_SRQ].num = request->num_srq;
128 profile[MLX4_RES_CQ].num = request->num_cq; 128 profile[MLX4_RES_CQ].num = request->num_cq;
129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? 129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
130 dev->phys_caps.num_phys_eqs :
131 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 130 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
132 profile[MLX4_RES_DMPT].num = request->num_mpt; 131 profile[MLX4_RES_DMPT].num = request->num_mpt;
133 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 132 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
@@ -216,10 +215,18 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
216 init_hca->log_num_cqs = profile[i].log_num; 215 init_hca->log_num_cqs = profile[i].log_num;
217 break; 216 break;
218 case MLX4_RES_EQ: 217 case MLX4_RES_EQ:
219 dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, 218 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
220 MAX_MSIX)); 219 init_hca->log_num_eqs = 0x1f;
221 init_hca->eqc_base = profile[i].start; 220 init_hca->eqc_base = profile[i].start;
222 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); 221 init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
222 } else {
223 dev->caps.num_eqs = roundup_pow_of_two(
224 min_t(unsigned,
225 dev_cap->max_eqs,
226 MAX_MSIX));
227 init_hca->eqc_base = profile[i].start;
228 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
229 }
223 break; 230 break;
224 case MLX4_RES_DMPT: 231 case MLX4_RES_DMPT:
225 dev->caps.num_mpts = profile[i].num; 232 dev->caps.num_mpts = profile[i].num;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3d9bff00f24a..cf09e65c2901 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -95,7 +95,7 @@ enum {
95 95
96enum { 96enum {
97 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 64, 98 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 80,
101 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
@@ -189,7 +189,9 @@ enum {
189 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, 189 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
190 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, 190 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
191 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, 191 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
192 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16 192 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
193 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
194 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18
193}; 195};
194 196
195enum { 197enum {
@@ -443,6 +445,7 @@ struct mlx4_caps {
443 int num_cqs; 445 int num_cqs;
444 int max_cqes; 446 int max_cqes;
445 int reserved_cqs; 447 int reserved_cqs;
448 int num_sys_eqs;
446 int num_eqs; 449 int num_eqs;
447 int reserved_eqs; 450 int reserved_eqs;
448 int num_comp_vectors; 451 int num_comp_vectors;