diff options
author | Leon Romanovsky <leonro@mellanox.com> | 2018-12-26 08:22:12 -0500 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-01-02 11:40:34 -0500 |
commit | ccffa545485714dcb001c78ffaa575a8192ed5e4 (patch) | |
tree | 56ab241285187306f42f1608fddf1f71d307e4d2 | |
parent | 7422edce73559d2985f322145c865cea2da3c152 (diff) |
Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"
Longer term testing shows this patch didn't play well with MR cache and
caused to call traces during remove_mkeys().
This reverts commit bb7e22a8ab00ff9ba911a45ba8784cef9e6d6f7a.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 19 |
1 files changed, 3 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 1bd8c1b1dba1..fd6ea1f75085 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
73 | 73 | ||
74 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 74 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
75 | /* Wait until all page fault handlers using the mr complete. */ | 75 | /* Wait until all page fault handlers using the mr complete. */ |
76 | if (mr->umem && mr->umem->is_odp) | 76 | synchronize_srcu(&dev->mr_srcu); |
77 | synchronize_srcu(&dev->mr_srcu); | ||
78 | #endif | 77 | #endif |
79 | 78 | ||
80 | return err; | 79 | return err; |
@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
238 | { | 237 | { |
239 | struct mlx5_mr_cache *cache = &dev->cache; | 238 | struct mlx5_mr_cache *cache = &dev->cache; |
240 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 239 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
241 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
242 | bool odp_mkey_exist = false; | ||
243 | #endif | ||
244 | struct mlx5_ib_mr *tmp_mr; | 240 | struct mlx5_ib_mr *tmp_mr; |
245 | struct mlx5_ib_mr *mr; | 241 | struct mlx5_ib_mr *mr; |
246 | LIST_HEAD(del_list); | 242 | LIST_HEAD(del_list); |
@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
253 | break; | 249 | break; |
254 | } | 250 | } |
255 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | 251 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); |
256 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
257 | if (mr->umem && mr->umem->is_odp) | ||
258 | odp_mkey_exist = true; | ||
259 | #endif | ||
260 | list_move(&mr->list, &del_list); | 252 | list_move(&mr->list, &del_list); |
261 | ent->cur--; | 253 | ent->cur--; |
262 | ent->size--; | 254 | ent->size--; |
@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
265 | } | 257 | } |
266 | 258 | ||
267 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 259 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
268 | if (odp_mkey_exist) | 260 | synchronize_srcu(&dev->mr_srcu); |
269 | synchronize_srcu(&dev->mr_srcu); | ||
270 | #endif | 261 | #endif |
271 | 262 | ||
272 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { | 263 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { |
@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) | |||
581 | { | 572 | { |
582 | struct mlx5_mr_cache *cache = &dev->cache; | 573 | struct mlx5_mr_cache *cache = &dev->cache; |
583 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 574 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
584 | bool odp_mkey_exist = false; | ||
585 | struct mlx5_ib_mr *tmp_mr; | 575 | struct mlx5_ib_mr *tmp_mr; |
586 | struct mlx5_ib_mr *mr; | 576 | struct mlx5_ib_mr *mr; |
587 | LIST_HEAD(del_list); | 577 | LIST_HEAD(del_list); |
@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) | |||
594 | break; | 584 | break; |
595 | } | 585 | } |
596 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | 586 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); |
597 | if (mr->umem && mr->umem->is_odp) | ||
598 | odp_mkey_exist = true; | ||
599 | list_move(&mr->list, &del_list); | 587 | list_move(&mr->list, &del_list); |
600 | ent->cur--; | 588 | ent->cur--; |
601 | ent->size--; | 589 | ent->size--; |
@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) | |||
604 | } | 592 | } |
605 | 593 | ||
606 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 594 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
607 | if (odp_mkey_exist) | 595 | synchronize_srcu(&dev->mr_srcu); |
608 | synchronize_srcu(&dev->mr_srcu); | ||
609 | #endif | 596 | #endif |
610 | 597 | ||
611 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { | 598 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { |