aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 14:41:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 14:41:08 -0500
commit70a3a06d01ed9ca887316a881813cdefb8a20170 (patch)
treefbdb7982040ba77818e4b738d76eef8bb06fb47f /drivers/net/ethernet
parentf6c0ffa8f0b0781f4954cb06f0a81d6c10c1b434 (diff)
parentef4e359d9b9e2dc022f79840fd207796b524a893 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband update from Roland Dreier: "Main batch of InfiniBand/RDMA changes for 3.9: - SRP error handling fixes from Bart Van Assche - Implementation of memory windows for mlx4 from Shani Michaeli - Lots of cxgb4 HW driver fixes from Vipul Pandya - Make iSER work for virtual functions, other fixes from Or Gerlitz - Fix for bug in qib HW driver from Mike Marciniszyn - IPoIB fixes from me, Itai Garbi, Shlomo Pongratz, Yan Burman - Various cleanups and warning fixes from Julia Lawall, Paul Bolle, Wei Yongjun" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (41 commits) IB/mlx4: Advertise MW support IB/mlx4: Support memory window binding mlx4: Implement memory windows allocation and deallocation mlx4_core: Enable memory windows in {INIT, QUERY}_HCA mlx4_core: Disable memory windows for virtual functions IPoIB: Free ipoib neigh on path record failure so path rec queries are retried IB/srp: Fail I/O requests if the transport is offline IB/srp: Avoid endless SCSI error handling loop IB/srp: Avoid sending a task management function needlessly IB/srp: Track connection state properly IB/mlx4: Remove redundant NULL check before kfree IB/mlx4: Fix compiler warning about uninitialized 'vlan' variable IB/mlx4: Convert is_xxx variables in build_mlx_header() to bool IB/iser: Enable iser when FMRs are not supported IB/iser: Avoid error prints on EAGAIN registration failures IB/iser: Use proper define for the commands per LUN value advertised to SCSI ML IB/uverbs: Implement memory windows support in uverbs IB/core: Add "type 2" memory windows support mlx4_core: Propagate MR deregistration failures to caller mlx4_core: Rename MPT-related functions to have mpt_ prefix ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c63
7 files changed, 243 insertions, 63 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index b2cca58de910..fc27800e9c38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -198,7 +198,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
198 198
199 flush_workqueue(mdev->workqueue); 199 flush_workqueue(mdev->workqueue);
200 destroy_workqueue(mdev->workqueue); 200 destroy_workqueue(mdev->workqueue);
201 mlx4_mr_free(dev, &mdev->mr); 201 (void) mlx4_mr_free(dev, &mdev->mr);
202 iounmap(mdev->uar_map); 202 iounmap(mdev->uar_map);
203 mlx4_uar_free(dev, &mdev->priv_uar); 203 mlx4_uar_free(dev, &mdev->priv_uar);
204 mlx4_pd_free(dev, mdev->priv_pdn); 204 mlx4_pd_free(dev, mdev->priv_pdn);
@@ -303,7 +303,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
303 return mdev; 303 return mdev;
304 304
305err_mr: 305err_mr:
306 mlx4_mr_free(dev, &mdev->mr); 306 (void) mlx4_mr_free(dev, &mdev->mr);
307err_map: 307err_map:
308 if (!mdev->uar_map) 308 if (!mdev->uar_map)
309 iounmap(mdev->uar_map); 309 iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 38b62c78d5da..50917eb3013e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -762,15 +762,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
762 u64 flags; 762 u64 flags;
763 int err = 0; 763 int err = 0;
764 u8 field; 764 u8 field;
765 u32 bmme_flags;
765 766
766 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 767 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
767 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 768 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
768 if (err) 769 if (err)
769 return err; 770 return err;
770 771
771 /* add port mng change event capability unconditionally to slaves */ 772 /* add port mng change event capability and disable mw type 1
773 * unconditionally to slaves
774 */
772 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 775 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
773 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 776 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
777 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
774 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 778 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
775 779
776 /* For guests, report Blueflame disabled */ 780 /* For guests, report Blueflame disabled */
@@ -778,6 +782,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
778 field &= 0x7f; 782 field &= 0x7f;
779 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 783 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
780 784
785 /* For guests, disable mw type 2 */
786 MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
789
781 return 0; 790 return 0;
782} 791}
783 792
@@ -1203,6 +1212,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1203#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1212#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1204#define INIT_HCA_TPT_OFFSET 0x0f0 1213#define INIT_HCA_TPT_OFFSET 0x0f0
1205#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1214#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1215#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1206#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1216#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1207#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1217#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1208#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1218#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
@@ -1319,6 +1329,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1319 /* TPT attributes */ 1329 /* TPT attributes */
1320 1330
1321 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 1331 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1332 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1322 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 1333 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1323 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 1334 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1324 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 1335 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
@@ -1415,6 +1426,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1415 /* TPT attributes */ 1426 /* TPT attributes */
1416 1427
1417 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 1428 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1429 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1418 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 1430 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1419 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 1431 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1420 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 1432 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 3af33ff669cc..151c2bb380a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -170,6 +170,7 @@ struct mlx4_init_hca_param {
170 u8 log_mc_table_sz; 170 u8 log_mc_table_sz;
171 u8 log_mpt_sz; 171 u8 log_mpt_sz;
172 u8 log_uar_sz; 172 u8 log_uar_sz;
173 u8 mw_enabled; /* Enable memory windows */
173 u8 uar_page_sz; /* log pg sz in 4k chunks */ 174 u8 uar_page_sz; /* log pg sz in 4k chunks */
174 u8 steering_mode; /* for QUERY_HCA */ 175 u8 steering_mode; /* for QUERY_HCA */
175 u64 dev_cap_enabled; 176 u64 dev_cap_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b9dde139dac5..d180bc46826a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1431,6 +1431,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1431 1431
1432 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1432 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1433 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1433 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1434 init_hca.mw_enabled = 0;
1435 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
1436 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
1437 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
1434 1438
1435 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1439 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1436 if (err) 1440 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ed4a6959e828..cf883345af88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,6 +60,8 @@
60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
61#define MLX4_FS_NUM_MCG (1 << 17) 61#define MLX4_FS_NUM_MCG (1 << 17)
62 62
63#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
64
63#define MLX4_NUM_UP 8 65#define MLX4_NUM_UP 8
64#define MLX4_NUM_TC 8 66#define MLX4_NUM_TC 8
65#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ 67#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -113,10 +115,10 @@ enum {
113 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT 115 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
114}; 116};
115 117
116enum mlx4_mr_state { 118enum mlx4_mpt_state {
117 MLX4_MR_DISABLED = 0, 119 MLX4_MPT_DISABLED = 0,
118 MLX4_MR_EN_HW, 120 MLX4_MPT_EN_HW,
119 MLX4_MR_EN_SW 121 MLX4_MPT_EN_SW
120}; 122};
121 123
122#define MLX4_COMM_TIME 10000 124#define MLX4_COMM_TIME 10000
@@ -263,6 +265,22 @@ struct mlx4_icm_table {
263 struct mlx4_icm **icm; 265 struct mlx4_icm **icm;
264}; 266};
265 267
268#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
269#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
270#define MLX4_MPT_FLAG_MIO (1 << 17)
271#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
272#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
273#define MLX4_MPT_FLAG_REGION (1 << 8)
274
275#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
276#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
277#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
278
279#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
280
281#define MLX4_MPT_STATUS_SW 0xF0
282#define MLX4_MPT_STATUS_HW 0x00
283
266/* 284/*
267 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 285 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
268 */ 286 */
@@ -863,10 +881,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
863void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); 881void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
864int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); 882int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
865void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 883void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
866int __mlx4_mr_reserve(struct mlx4_dev *dev); 884int __mlx4_mpt_reserve(struct mlx4_dev *dev);
867void __mlx4_mr_release(struct mlx4_dev *dev, u32 index); 885void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
868int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index); 886int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
869void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index); 887void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
870u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); 888u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
871void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); 889void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
872 890
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c202d3ad2a0e..602ca9bf78e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -44,20 +44,6 @@
44#include "mlx4.h" 44#include "mlx4.h"
45#include "icm.h" 45#include "icm.h"
46 46
47#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
48#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
49#define MLX4_MPT_FLAG_MIO (1 << 17)
50#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
51#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
52#define MLX4_MPT_FLAG_REGION (1 << 8)
53
54#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
55#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
56#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
57
58#define MLX4_MPT_STATUS_SW 0xF0
59#define MLX4_MPT_STATUS_HW 0x00
60
61static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 47static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
62{ 48{
63 int o; 49 int o;
@@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
321 mr->size = size; 307 mr->size = size;
322 mr->pd = pd; 308 mr->pd = pd;
323 mr->access = access; 309 mr->access = access;
324 mr->enabled = MLX4_MR_DISABLED; 310 mr->enabled = MLX4_MPT_DISABLED;
325 mr->key = hw_index_to_key(mridx); 311 mr->key = hw_index_to_key(mridx);
326 312
327 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 313 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
@@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
335 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 321 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
336} 322}
337 323
338int __mlx4_mr_reserve(struct mlx4_dev *dev) 324int __mlx4_mpt_reserve(struct mlx4_dev *dev)
339{ 325{
340 struct mlx4_priv *priv = mlx4_priv(dev); 326 struct mlx4_priv *priv = mlx4_priv(dev);
341 327
342 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 328 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
343} 329}
344 330
345static int mlx4_mr_reserve(struct mlx4_dev *dev) 331static int mlx4_mpt_reserve(struct mlx4_dev *dev)
346{ 332{
347 u64 out_param; 333 u64 out_param;
348 334
@@ -353,17 +339,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
353 return -1; 339 return -1;
354 return get_param_l(&out_param); 340 return get_param_l(&out_param);
355 } 341 }
356 return __mlx4_mr_reserve(dev); 342 return __mlx4_mpt_reserve(dev);
357} 343}
358 344
359void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) 345void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
360{ 346{
361 struct mlx4_priv *priv = mlx4_priv(dev); 347 struct mlx4_priv *priv = mlx4_priv(dev);
362 348
363 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 349 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
364} 350}
365 351
366static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) 352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
367{ 353{
368 u64 in_param; 354 u64 in_param;
369 355
@@ -376,17 +362,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
376 index); 362 index);
377 return; 363 return;
378 } 364 }
379 __mlx4_mr_release(dev, index); 365 __mlx4_mpt_release(dev, index);
380} 366}
381 367
382int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 368int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
383{ 369{
384 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 370 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
385 371
386 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 372 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
387} 373}
388 374
389static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
390{ 376{
391 u64 param; 377 u64 param;
392 378
@@ -397,17 +383,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
397 MLX4_CMD_TIME_CLASS_A, 383 MLX4_CMD_TIME_CLASS_A,
398 MLX4_CMD_WRAPPED); 384 MLX4_CMD_WRAPPED);
399 } 385 }
400 return __mlx4_mr_alloc_icm(dev, index); 386 return __mlx4_mpt_alloc_icm(dev, index);
401} 387}
402 388
403void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 389void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
404{ 390{
405 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 391 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
406 392
407 mlx4_table_put(dev, &mr_table->dmpt_table, index); 393 mlx4_table_put(dev, &mr_table->dmpt_table, index);
408} 394}
409 395
410static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
411{ 397{
412 u64 in_param; 398 u64 in_param;
413 399
@@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
420 index); 406 index);
421 return; 407 return;
422 } 408 }
423 return __mlx4_mr_free_icm(dev, index); 409 return __mlx4_mpt_free_icm(dev, index);
424} 410}
425 411
426int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 412int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
@@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
429 u32 index; 415 u32 index;
430 int err; 416 int err;
431 417
432 index = mlx4_mr_reserve(dev); 418 index = mlx4_mpt_reserve(dev);
433 if (index == -1) 419 if (index == -1)
434 return -ENOMEM; 420 return -ENOMEM;
435 421
436 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 422 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
437 access, npages, page_shift, mr); 423 access, npages, page_shift, mr);
438 if (err) 424 if (err)
439 mlx4_mr_release(dev, index); 425 mlx4_mpt_release(dev, index);
440 426
441 return err; 427 return err;
442} 428}
443EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 429EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
444 430
445static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 431static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
446{ 432{
447 int err; 433 int err;
448 434
449 if (mr->enabled == MLX4_MR_EN_HW) { 435 if (mr->enabled == MLX4_MPT_EN_HW) {
450 err = mlx4_HW2SW_MPT(dev, NULL, 436 err = mlx4_HW2SW_MPT(dev, NULL,
451 key_to_hw_index(mr->key) & 437 key_to_hw_index(mr->key) &
452 (dev->caps.num_mpts - 1)); 438 (dev->caps.num_mpts - 1));
453 if (err) 439 if (err) {
454 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 440 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
441 mlx4_warn(dev, "MR has MWs bound to it.\n");
442 return err;
443 }
455 444
456 mr->enabled = MLX4_MR_EN_SW; 445 mr->enabled = MLX4_MPT_EN_SW;
457 } 446 }
458 mlx4_mtt_cleanup(dev, &mr->mtt); 447 mlx4_mtt_cleanup(dev, &mr->mtt);
448
449 return 0;
459} 450}
460 451
461void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 452int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
462{ 453{
463 mlx4_mr_free_reserved(dev, mr); 454 int ret;
455
456 ret = mlx4_mr_free_reserved(dev, mr);
457 if (ret)
458 return ret;
464 if (mr->enabled) 459 if (mr->enabled)
465 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 460 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
466 mlx4_mr_release(dev, key_to_hw_index(mr->key)); 461 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
462
463 return 0;
467} 464}
468EXPORT_SYMBOL_GPL(mlx4_mr_free); 465EXPORT_SYMBOL_GPL(mlx4_mr_free);
469 466
@@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
473 struct mlx4_mpt_entry *mpt_entry; 470 struct mlx4_mpt_entry *mpt_entry;
474 int err; 471 int err;
475 472
476 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); 473 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
477 if (err) 474 if (err)
478 return err; 475 return err;
479 476
@@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
520 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 517 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
521 goto err_cmd; 518 goto err_cmd;
522 } 519 }
523 mr->enabled = MLX4_MR_EN_HW; 520 mr->enabled = MLX4_MPT_EN_HW;
524 521
525 mlx4_free_cmd_mailbox(dev, mailbox); 522 mlx4_free_cmd_mailbox(dev, mailbox);
526 523
@@ -530,7 +527,7 @@ err_cmd:
530 mlx4_free_cmd_mailbox(dev, mailbox); 527 mlx4_free_cmd_mailbox(dev, mailbox);
531 528
532err_table: 529err_table:
533 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 530 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
534 return err; 531 return err;
535} 532}
536EXPORT_SYMBOL_GPL(mlx4_mr_enable); 533EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
657} 654}
658EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 655EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
659 656
657int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
658 struct mlx4_mw *mw)
659{
660 u32 index;
661
662 if ((type == MLX4_MW_TYPE_1 &&
663 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
664 (type == MLX4_MW_TYPE_2 &&
665 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
666 return -ENOTSUPP;
667
668 index = mlx4_mpt_reserve(dev);
669 if (index == -1)
670 return -ENOMEM;
671
672 mw->key = hw_index_to_key(index);
673 mw->pd = pd;
674 mw->type = type;
675 mw->enabled = MLX4_MPT_DISABLED;
676
677 return 0;
678}
679EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
680
681int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
682{
683 struct mlx4_cmd_mailbox *mailbox;
684 struct mlx4_mpt_entry *mpt_entry;
685 int err;
686
687 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
688 if (err)
689 return err;
690
691 mailbox = mlx4_alloc_cmd_mailbox(dev);
692 if (IS_ERR(mailbox)) {
693 err = PTR_ERR(mailbox);
694 goto err_table;
695 }
696 mpt_entry = mailbox->buf;
697
698 memset(mpt_entry, 0, sizeof(*mpt_entry));
699
700 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
701 * off, thus creating a memory window and not a memory region.
702 */
703 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
704 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
705 if (mw->type == MLX4_MW_TYPE_2) {
706 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
707 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
708 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
709 }
710
711 err = mlx4_SW2HW_MPT(dev, mailbox,
712 key_to_hw_index(mw->key) &
713 (dev->caps.num_mpts - 1));
714 if (err) {
715 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
716 goto err_cmd;
717 }
718 mw->enabled = MLX4_MPT_EN_HW;
719
720 mlx4_free_cmd_mailbox(dev, mailbox);
721
722 return 0;
723
724err_cmd:
725 mlx4_free_cmd_mailbox(dev, mailbox);
726
727err_table:
728 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
729 return err;
730}
731EXPORT_SYMBOL_GPL(mlx4_mw_enable);
732
733void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
734{
735 int err;
736
737 if (mw->enabled == MLX4_MPT_EN_HW) {
738 err = mlx4_HW2SW_MPT(dev, NULL,
739 key_to_hw_index(mw->key) &
740 (dev->caps.num_mpts - 1));
741 if (err)
742 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
743
744 mw->enabled = MLX4_MPT_EN_SW;
745 }
746 if (mw->enabled)
747 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
748 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
749}
750EXPORT_SYMBOL_GPL(mlx4_mw_free);
751
660int mlx4_init_mr_table(struct mlx4_dev *dev) 752int mlx4_init_mr_table(struct mlx4_dev *dev)
661{ 753{
662 struct mlx4_priv *priv = mlx4_priv(dev); 754 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
831 return 0; 923 return 0;
832 924
833err_free: 925err_free:
834 mlx4_mr_free(dev, &fmr->mr); 926 (void) mlx4_mr_free(dev, &fmr->mr);
835 return err; 927 return err;
836} 928}
837EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 929EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
@@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
882 err); 974 err);
883 return; 975 return;
884 } 976 }
885 fmr->mr.enabled = MLX4_MR_EN_SW; 977 fmr->mr.enabled = MLX4_MPT_EN_SW;
886} 978}
887EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 979EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
888 980
889int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 981int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
890{ 982{
983 int ret;
984
891 if (fmr->maps) 985 if (fmr->maps)
892 return -EBUSY; 986 return -EBUSY;
893 987
894 mlx4_mr_free(dev, &fmr->mr); 988 ret = mlx4_mr_free(dev, &fmr->mr);
895 fmr->mr.enabled = MLX4_MR_DISABLED; 989 if (ret)
990 return ret;
991 fmr->mr.enabled = MLX4_MPT_DISABLED;
896 992
897 return 0; 993 return 0;
898} 994}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5997adc943d0..083fb48dc3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1231,14 +1231,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1231 1231
1232 switch (op) { 1232 switch (op) {
1233 case RES_OP_RESERVE: 1233 case RES_OP_RESERVE:
1234 index = __mlx4_mr_reserve(dev); 1234 index = __mlx4_mpt_reserve(dev);
1235 if (index == -1) 1235 if (index == -1)
1236 break; 1236 break;
1237 id = index & mpt_mask(dev); 1237 id = index & mpt_mask(dev);
1238 1238
1239 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1239 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1240 if (err) { 1240 if (err) {
1241 __mlx4_mr_release(dev, index); 1241 __mlx4_mpt_release(dev, index);
1242 break; 1242 break;
1243 } 1243 }
1244 set_param_l(out_param, index); 1244 set_param_l(out_param, index);
@@ -1251,7 +1251,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1251 if (err) 1251 if (err)
1252 return err; 1252 return err;
1253 1253
1254 err = __mlx4_mr_alloc_icm(dev, mpt->key); 1254 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1255 if (err) { 1255 if (err) {
1256 res_abort_move(dev, slave, RES_MPT, id); 1256 res_abort_move(dev, slave, RES_MPT, id);
1257 return err; 1257 return err;
@@ -1586,7 +1586,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1586 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 1586 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1587 if (err) 1587 if (err)
1588 break; 1588 break;
1589 __mlx4_mr_release(dev, index); 1589 __mlx4_mpt_release(dev, index);
1590 break; 1590 break;
1591 case RES_OP_MAP_ICM: 1591 case RES_OP_MAP_ICM:
1592 index = get_param_l(&in_param); 1592 index = get_param_l(&in_param);
@@ -1596,7 +1596,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1596 if (err) 1596 if (err)
1597 return err; 1597 return err;
1598 1598
1599 __mlx4_mr_free_icm(dev, mpt->key); 1599 __mlx4_mpt_free_icm(dev, mpt->key);
1600 res_end_move(dev, slave, RES_MPT, id); 1600 res_end_move(dev, slave, RES_MPT, id);
1601 return err; 1601 return err;
1602 break; 1602 break;
@@ -1796,6 +1796,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1796 return be32_to_cpu(mpt->mtt_sz); 1796 return be32_to_cpu(mpt->mtt_sz);
1797} 1797}
1798 1798
1799static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1800{
1801 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1802}
1803
1804static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1805{
1806 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1807}
1808
1809static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1810{
1811 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1812}
1813
1814static int mr_is_region(struct mlx4_mpt_entry *mpt)
1815{
1816 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1817}
1818
1799static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 1819static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1800{ 1820{
1801 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 1821 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@@ -1856,12 +1876,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1856 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; 1876 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1857 int phys; 1877 int phys;
1858 int id; 1878 int id;
1879 u32 pd;
1880 int pd_slave;
1859 1881
1860 id = index & mpt_mask(dev); 1882 id = index & mpt_mask(dev);
1861 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); 1883 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1862 if (err) 1884 if (err)
1863 return err; 1885 return err;
1864 1886
1887 /* Disable memory windows for VFs. */
1888 if (!mr_is_region(inbox->buf)) {
1889 err = -EPERM;
1890 goto ex_abort;
1891 }
1892
1893 /* Make sure that the PD bits related to the slave id are zeros. */
1894 pd = mr_get_pd(inbox->buf);
1895 pd_slave = (pd >> 17) & 0x7f;
1896 if (pd_slave != 0 && pd_slave != slave) {
1897 err = -EPERM;
1898 goto ex_abort;
1899 }
1900
1901 if (mr_is_fmr(inbox->buf)) {
1902 /* FMR and Bind Enable are forbidden in slave devices. */
1903 if (mr_is_bind_enabled(inbox->buf)) {
1904 err = -EPERM;
1905 goto ex_abort;
1906 }
1907 /* FMR and Memory Windows are also forbidden. */
1908 if (!mr_is_region(inbox->buf)) {
1909 err = -EPERM;
1910 goto ex_abort;
1911 }
1912 }
1913
1865 phys = mr_phys_mpt(inbox->buf); 1914 phys = mr_phys_mpt(inbox->buf);
1866 if (!phys) { 1915 if (!phys) {
1867 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 1916 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -3480,7 +3529,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3480 while (state != 0) { 3529 while (state != 0) {
3481 switch (state) { 3530 switch (state) {
3482 case RES_MPT_RESERVED: 3531 case RES_MPT_RESERVED:
3483 __mlx4_mr_release(dev, mpt->key); 3532 __mlx4_mpt_release(dev, mpt->key);
3484 spin_lock_irq(mlx4_tlock(dev)); 3533 spin_lock_irq(mlx4_tlock(dev));
3485 rb_erase(&mpt->com.node, 3534 rb_erase(&mpt->com.node,
3486 &tracker->res_tree[RES_MPT]); 3535 &tracker->res_tree[RES_MPT]);
@@ -3491,7 +3540,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3491 break; 3540 break;
3492 3541
3493 case RES_MPT_MAPPED: 3542 case RES_MPT_MAPPED:
3494 __mlx4_mr_free_icm(dev, mpt->key); 3543 __mlx4_mpt_free_icm(dev, mpt->key);
3495 state = RES_MPT_RESERVED; 3544 state = RES_MPT_RESERVED;
3496 break; 3545 break;
3497 3546