aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-14 13:09:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-14 13:09:05 -0400
commite3b1fd56f175526db42ae94c457f29c2fa810aca (patch)
tree3e2948ca44fb7fd5348244c2a83eef864b3110b4 /drivers/net/ethernet
parent0680eb1f485ba5aac2ee02c9f0622239c9a4b16c (diff)
parentd087f6ad724dfbcdc3df8e0191b80d9d8d839e71 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma updates from Roland Dreier: "Main set of InfiniBand/RDMA updates for 3.17 merge window: - MR reregistration support - MAD support for RMPP in userspace - iSER and SRP initiator updates - ocrdma hardware driver updates - other fixes..." * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (52 commits) IB/srp: Fix return value check in srp_init_module() RDMA/ocrdma: report asic-id in query device RDMA/ocrdma: Update sli data structure for endianness RDMA/ocrdma: Obtain SL from device structure RDMA/uapi: Include socket.h in rdma_user_cm.h IB/srpt: Handle GID change events IB/mlx5: Use ARRAY_SIZE instead of sizeof/sizeof[0] IB/mlx4: Use ARRAY_SIZE instead of sizeof/sizeof[0] RDMA/amso1100: Check for integer overflow in c2_alloc_cq_buf() IPoIB: Remove unnecessary test for NULL before debugfs_remove() IB/mad: Add user space RMPP support IB/mad: add new ioctl to ABI to support new registration options IB/mad: Add dev_notice messages for various umad/mad registration failures IB/mad: Update module to [pr|dev]_* style print messages IB/ipoib: Avoid multicast join attempts with invalid P_key IB/umad: Update module to [pr|dev]_* style print messages IB/ipoib: Avoid flushing the workqueue from worker context IB/ipoib: Use P_Key change event instead of P_Key polling mechanism IB/ipath: Add P_Key change event support mlx4_core: Add support for secure-host and SMP firewall ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c26
10 files changed, 311 insertions, 6 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 811f1351db7a..43e08d0bc3d3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -897,5 +897,6 @@ void be_roce_dev_remove(struct be_adapter *);
897 */ 897 */
898void be_roce_dev_open(struct be_adapter *); 898void be_roce_dev_open(struct be_adapter *);
899void be_roce_dev_close(struct be_adapter *); 899void be_roce_dev_close(struct be_adapter *);
900void be_roce_dev_shutdown(struct be_adapter *);
900 901
901#endif /* BE_H */ 902#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index db4ff14ff18f..9cdeda54674a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5014,6 +5014,7 @@ static void be_shutdown(struct pci_dev *pdev)
5014 if (!adapter) 5014 if (!adapter)
5015 return; 5015 return;
5016 5016
5017 be_roce_dev_shutdown(adapter);
5017 cancel_delayed_work_sync(&adapter->work); 5018 cancel_delayed_work_sync(&adapter->work);
5018 cancel_delayed_work_sync(&adapter->func_recovery_work); 5019 cancel_delayed_work_sync(&adapter->func_recovery_work);
5019 5020
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 5bf16603a3e9..ef4672dc7357 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter)
120{ 120{
121 if (ocrdma_drv && adapter->ocrdma_dev && 121 if (ocrdma_drv && adapter->ocrdma_dev &&
122 ocrdma_drv->state_change_handler) 122 ocrdma_drv->state_change_handler)
123 ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); 123 ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
124 BE_DEV_UP);
124} 125}
125 126
126void be_roce_dev_open(struct be_adapter *adapter) 127void be_roce_dev_open(struct be_adapter *adapter)
@@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter)
136{ 137{
137 if (ocrdma_drv && adapter->ocrdma_dev && 138 if (ocrdma_drv && adapter->ocrdma_dev &&
138 ocrdma_drv->state_change_handler) 139 ocrdma_drv->state_change_handler)
139 ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); 140 ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
141 BE_DEV_DOWN);
140} 142}
141 143
142void be_roce_dev_close(struct be_adapter *adapter) 144void be_roce_dev_close(struct be_adapter *adapter)
@@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter)
148 } 150 }
149} 151}
150 152
153void be_roce_dev_shutdown(struct be_adapter *adapter)
154{
155 if (be_roce_supported(adapter)) {
156 mutex_lock(&be_adapter_list_lock);
157 if (ocrdma_drv && adapter->ocrdma_dev &&
158 ocrdma_drv->state_change_handler)
159 ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
160 BE_DEV_SHUTDOWN);
161 mutex_unlock(&be_adapter_list_lock);
162 }
163}
164
151int be_roce_register_driver(struct ocrdma_driver *drv) 165int be_roce_register_driver(struct ocrdma_driver *drv)
152{ 166{
153 struct be_adapter *dev; 167 struct be_adapter *dev;
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index a3d9e96c18eb..e6f7eb1a7d87 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -62,7 +62,8 @@ struct ocrdma_driver {
62 62
63enum { 63enum {
64 BE_DEV_UP = 0, 64 BE_DEV_UP = 0,
65 BE_DEV_DOWN = 1 65 BE_DEV_DOWN = 1,
66 BE_DEV_SHUTDOWN = 2
66}; 67};
67 68
68/* APIs for RoCE driver to register callback handlers, 69/* APIs for RoCE driver to register callback handlers,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 5d940a26055c..65a4a0f88ea0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1311,6 +1311,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1311 .wrapper = mlx4_MAD_IFC_wrapper 1311 .wrapper = mlx4_MAD_IFC_wrapper
1312 }, 1312 },
1313 { 1313 {
1314 .opcode = MLX4_CMD_MAD_DEMUX,
1315 .has_inbox = false,
1316 .has_outbox = false,
1317 .out_is_imm = false,
1318 .encode_slave_id = false,
1319 .verify = NULL,
1320 .wrapper = mlx4_CMD_EPERM_wrapper
1321 },
1322 {
1314 .opcode = MLX4_CMD_QUERY_IF_STAT, 1323 .opcode = MLX4_CMD_QUERY_IF_STAT,
1315 .has_inbox = false, 1324 .has_inbox = false,
1316 .has_outbox = true, 1325 .has_outbox = true,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 688e1eabab29..494753e44ae3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
136 [7] = "FSM (MAC anti-spoofing) support", 136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support", 137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support", 138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support" 139 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
140 [11] = "MAD DEMUX (Secure-Host) support"
140 }; 141 };
141 int i; 142 int i;
142 143
@@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
571#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 572#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
572#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 573#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
573#define QUERY_DEV_CAP_VXLAN 0x9e 574#define QUERY_DEV_CAP_VXLAN 0x9e
575#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
574 576
575 dev_cap->flags2 = 0; 577 dev_cap->flags2 = 0;
576 mailbox = mlx4_alloc_cmd_mailbox(dev); 578 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
748 MLX4_GET(dev_cap->max_counters, outbox, 750 MLX4_GET(dev_cap->max_counters, outbox,
749 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 751 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
750 752
753 MLX4_GET(field32, outbox,
754 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
755 if (field32 & (1 << 0))
756 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
757
751 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 758 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
752 if (field32 & (1 << 16)) 759 if (field32 & (1 << 16))
753 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 760 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
@@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work)
2016out: 2023out:
2017 mlx4_free_cmd_mailbox(dev, mailbox); 2024 mlx4_free_cmd_mailbox(dev, mailbox);
2018} 2025}
2026
2027static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2028 struct mlx4_cmd_mailbox *mailbox)
2029{
2030#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2031#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2032#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2033#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2034
2035 u32 set_attr_mask, getresp_attr_mask;
2036 u32 trap_attr_mask, traprepress_attr_mask;
2037
2038 MLX4_GET(set_attr_mask, mailbox->buf,
2039 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2040 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2041 set_attr_mask);
2042
2043 MLX4_GET(getresp_attr_mask, mailbox->buf,
2044 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2045 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2046 getresp_attr_mask);
2047
2048 MLX4_GET(trap_attr_mask, mailbox->buf,
2049 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2050 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2051 trap_attr_mask);
2052
2053 MLX4_GET(traprepress_attr_mask, mailbox->buf,
2054 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2055 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2056 traprepress_attr_mask);
2057
2058 if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2059 traprepress_attr_mask)
2060 return 1;
2061
2062 return 0;
2063}
2064
2065int mlx4_config_mad_demux(struct mlx4_dev *dev)
2066{
2067 struct mlx4_cmd_mailbox *mailbox;
2068 int secure_host_active;
2069 int err;
2070
2071 /* Check if mad_demux is supported */
2072 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2073 return 0;
2074
2075 mailbox = mlx4_alloc_cmd_mailbox(dev);
2076 if (IS_ERR(mailbox)) {
2077 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2078 return -ENOMEM;
2079 }
2080
2081 /* Query mad_demux to find out which MADs are handled by internal sma */
2082 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2083 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2084 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2085 if (err) {
2086 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2087 err);
2088 goto out;
2089 }
2090
2091 secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2092
2093 /* Config mad_demux to handle all MADs returned by the query above */
2094 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2095 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2096 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2097 if (err) {
2098 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2099 goto out;
2100 }
2101
2102 if (secure_host_active)
2103 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2104out:
2105 mlx4_free_cmd_mailbox(dev, mailbox);
2106 return err;
2107}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 80b8c5f30e4e..0158689906fd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1853,6 +1853,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1853 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 1853 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1854 goto err_mr_table_free; 1854 goto err_mr_table_free;
1855 } 1855 }
1856 err = mlx4_config_mad_demux(dev);
1857 if (err) {
1858 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
1859 goto err_mcg_table_free;
1860 }
1856 } 1861 }
1857 1862
1858 err = mlx4_init_eq_table(dev); 1863 err = mlx4_init_eq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 13fbcd03c3e4..b508c7887ef8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -274,6 +274,8 @@ struct mlx4_icm_table {
274#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 274#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
275#define MLX4_MPT_FLAG_REGION (1 << 8) 275#define MLX4_MPT_FLAG_REGION (1 << 8)
276 276
277#define MLX4_MPT_PD_MASK (0x1FFFFUL)
278#define MLX4_MPT_PD_VF_MASK (0xFE0000UL)
277#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 279#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
278#define MLX4_MPT_PD_FLAG_RAE (1 << 28) 280#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
279#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 281#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
@@ -1306,5 +1308,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev);
1306int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); 1308int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1307/* Returns the VF index of slave */ 1309/* Returns the VF index of slave */
1308int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1310int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1311int mlx4_config_mad_demux(struct mlx4_dev *dev);
1309 1312
1310#endif /* MLX4_H */ 1313#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 2839abb878a6..7d717eccb7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -298,6 +298,131 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
299} 299}
300 300
301int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
302 struct mlx4_mpt_entry ***mpt_entry)
303{
304 int err;
305 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
306 struct mlx4_cmd_mailbox *mailbox = NULL;
307
308 /* Make sure that at this point we have single-threaded access only */
309
310 if (mmr->enabled != MLX4_MPT_EN_HW)
311 return -EINVAL;
312
313 err = mlx4_HW2SW_MPT(dev, NULL, key);
314
315 if (err) {
316 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
317 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
318 return err;
319 }
320
321 mmr->enabled = MLX4_MPT_EN_SW;
322
323 if (!mlx4_is_mfunc(dev)) {
324 **mpt_entry = mlx4_table_find(
325 &mlx4_priv(dev)->mr_table.dmpt_table,
326 key, NULL);
327 } else {
328 mailbox = mlx4_alloc_cmd_mailbox(dev);
329 if (IS_ERR_OR_NULL(mailbox))
330 return PTR_ERR(mailbox);
331
332 err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
333 0, MLX4_CMD_QUERY_MPT,
334 MLX4_CMD_TIME_CLASS_B,
335 MLX4_CMD_WRAPPED);
336
337 if (err)
338 goto free_mailbox;
339
340 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
341 }
342
343 if (!(*mpt_entry) || !(**mpt_entry)) {
344 err = -ENOMEM;
345 goto free_mailbox;
346 }
347
348 return 0;
349
350free_mailbox:
351 mlx4_free_cmd_mailbox(dev, mailbox);
352 return err;
353}
354EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
355
356int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
357 struct mlx4_mpt_entry **mpt_entry)
358{
359 int err;
360
361 if (!mlx4_is_mfunc(dev)) {
362 /* Make sure any changes to this entry are flushed */
363 wmb();
364
365 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
366
367 /* Make sure the new status is written */
368 wmb();
369
370 err = mlx4_SYNC_TPT(dev);
371 } else {
372 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
373
374 struct mlx4_cmd_mailbox *mailbox =
375 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
376 buf);
377
378 err = mlx4_SW2HW_MPT(dev, mailbox, key);
379 }
380
381 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
382 if (!err)
383 mmr->enabled = MLX4_MPT_EN_HW;
384 return err;
385}
386EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
387
388void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
389 struct mlx4_mpt_entry **mpt_entry)
390{
391 if (mlx4_is_mfunc(dev)) {
392 struct mlx4_cmd_mailbox *mailbox =
393 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
394 buf);
395 mlx4_free_cmd_mailbox(dev, mailbox);
396 }
397}
398EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
399
400int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
401 u32 pdn)
402{
403 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
404 /* The wrapper function will put the slave's id here */
405 if (mlx4_is_mfunc(dev))
406 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
407 mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) |
408 (pdn & MLX4_MPT_PD_MASK)
409 | MLX4_MPT_PD_FLAG_EN_INV);
410 return 0;
411}
412EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
413
414int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
415 struct mlx4_mpt_entry *mpt_entry,
416 u32 access)
417{
418 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
419 (access & MLX4_PERM_MASK);
420
421 mpt_entry->flags = cpu_to_be32(flags);
422 return 0;
423}
424EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
425
301static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 426static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
302 u64 iova, u64 size, u32 access, int npages, 427 u64 iova, u64 size, u32 access, int npages,
303 int page_shift, struct mlx4_mr *mr) 428 int page_shift, struct mlx4_mr *mr)
@@ -463,6 +588,41 @@ int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
463} 588}
464EXPORT_SYMBOL_GPL(mlx4_mr_free); 589EXPORT_SYMBOL_GPL(mlx4_mr_free);
465 590
591void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
592{
593 mlx4_mtt_cleanup(dev, &mr->mtt);
594}
595EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
596
597int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
598 u64 iova, u64 size, int npages,
599 int page_shift, struct mlx4_mpt_entry *mpt_entry)
600{
601 int err;
602
603 mpt_entry->start = cpu_to_be64(mr->iova);
604 mpt_entry->length = cpu_to_be64(mr->size);
605 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
606
607 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
608 if (err)
609 return err;
610
611 if (mr->mtt.order < 0) {
612 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
613 mpt_entry->mtt_addr = 0;
614 } else {
615 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
616 &mr->mtt));
617 if (mr->mtt.page_shift == 0)
618 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
619 }
620 mr->enabled = MLX4_MPT_EN_SW;
621
622 return 0;
623}
624EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
625
466int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 626int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
467{ 627{
468 struct mlx4_cmd_mailbox *mailbox; 628 struct mlx4_cmd_mailbox *mailbox;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 0efc1368e5a8..1089367fed22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2613,12 +2613,34 @@ int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2613 if (err) 2613 if (err)
2614 return err; 2614 return err;
2615 2615
2616 if (mpt->com.from_state != RES_MPT_HW) { 2616 if (mpt->com.from_state == RES_MPT_MAPPED) {
2617 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2618 * that, the VF must read the MPT. But since the MPT entry memory is not
2619 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2620 * entry contents. To guarantee that the MPT cannot be changed, the driver
2621 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2622 * ownership fofollowing the change. The change here allows the VF to
2623 * perform QUERY_MPT also when the entry is in SW ownership.
2624 */
2625 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2626 &mlx4_priv(dev)->mr_table.dmpt_table,
2627 mpt->key, NULL);
2628
2629 if (NULL == mpt_entry || NULL == outbox->buf) {
2630 err = -EINVAL;
2631 goto out;
2632 }
2633
2634 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2635
2636 err = 0;
2637 } else if (mpt->com.from_state == RES_MPT_HW) {
2638 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2639 } else {
2617 err = -EBUSY; 2640 err = -EBUSY;
2618 goto out; 2641 goto out;
2619 } 2642 }
2620 2643
2621 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2622 2644
2623out: 2645out:
2624 put_res(dev, slave, id, RES_MPT); 2646 put_res(dev, slave, id, RES_MPT);