aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2013-10-23 02:53:14 -0400
committerRoland Dreier <roland@purestorage.com>2013-11-08 17:42:59 -0500
commit746b5583c1a48a837f4891adaff5e09d61b204a6 (patch)
treec939fde4e644095bcf476787aebe031f20150d0c
parent51ee86a4af639e4ee8953dd02ad8a766c40f46a1 (diff)
IB/mlx5: Multithreaded create MR
Use asynchronous commands to execute up to eight concurrent create MR commands. This is to fill memory caches faster so we keep consuming from there. Also, increase timeout for shrinking caches to five minutes. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c163
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c32
-rw-r--r--include/linux/mlx5/driver.h17
8 files changed, 255 insertions, 84 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1a6cb3a2809..306534109627 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -745,7 +745,8 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
745 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 745 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
746 seg->start_addr = 0; 746 seg->start_addr = 0;
747 747
748 err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); 748 err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
749 NULL, NULL, NULL);
749 if (err) { 750 if (err) {
750 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); 751 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
751 goto err_in; 752 goto err_in;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 836be9157242..4c134d93d4fc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -262,6 +262,9 @@ struct mlx5_ib_mr {
262 int npages; 262 int npages;
263 struct completion done; 263 struct completion done;
264 enum ib_wc_status status; 264 enum ib_wc_status status;
265 struct mlx5_ib_dev *dev;
266 struct mlx5_create_mkey_mbox_out out;
267 unsigned long start;
265}; 268};
266 269
267struct mlx5_ib_fast_reg_page_list { 270struct mlx5_ib_fast_reg_page_list {
@@ -323,6 +326,7 @@ struct mlx5_cache_ent {
323 struct mlx5_ib_dev *dev; 326 struct mlx5_ib_dev *dev;
324 struct work_struct work; 327 struct work_struct work;
325 struct delayed_work dwork; 328 struct delayed_work dwork;
329 int pending;
326}; 330};
327 331
328struct mlx5_mr_cache { 332struct mlx5_mr_cache {
@@ -358,6 +362,8 @@ struct mlx5_ib_dev {
358 spinlock_t mr_lock; 362 spinlock_t mr_lock;
359 struct mlx5_ib_resources devr; 363 struct mlx5_ib_resources devr;
360 struct mlx5_mr_cache cache; 364 struct mlx5_mr_cache cache;
365 struct timer_list delay_timer;
366 int fill_delay;
361}; 367};
362 368
363static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 369static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3453580b1eb2..d7f202290747 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -35,11 +35,13 @@
35#include <linux/random.h> 35#include <linux/random.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/export.h> 37#include <linux/export.h>
38#include <linux/delay.h>
38#include <rdma/ib_umem.h> 39#include <rdma/ib_umem.h>
39#include "mlx5_ib.h" 40#include "mlx5_ib.h"
40 41
41enum { 42enum {
42 DEF_CACHE_SIZE = 10, 43 DEF_CACHE_SIZE = 10,
44 MAX_PENDING_REG_MR = 8,
43}; 45};
44 46
45enum { 47enum {
@@ -63,6 +65,57 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
63 return order - cache->ent[0].order; 65 return order - cache->ent[0].order;
64} 66}
65 67
68static void reg_mr_callback(int status, void *context)
69{
70 struct mlx5_ib_mr *mr = context;
71 struct mlx5_ib_dev *dev = mr->dev;
72 struct mlx5_mr_cache *cache = &dev->cache;
73 int c = order2idx(dev, mr->order);
74 struct mlx5_cache_ent *ent = &cache->ent[c];
75 u8 key;
76 unsigned long delta = jiffies - mr->start;
77 unsigned long index;
78 unsigned long flags;
79
80 index = find_last_bit(&delta, 8 * sizeof(delta));
81 if (index == 64)
82 index = 0;
83
84 spin_lock_irqsave(&ent->lock, flags);
85 ent->pending--;
86 spin_unlock_irqrestore(&ent->lock, flags);
87 if (status) {
88 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
89 kfree(mr);
90 dev->fill_delay = 1;
91 mod_timer(&dev->delay_timer, jiffies + HZ);
92 return;
93 }
94
95 if (mr->out.hdr.status) {
96 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
97 mr->out.hdr.status,
98 be32_to_cpu(mr->out.hdr.syndrome));
99 kfree(mr);
100 dev->fill_delay = 1;
101 mod_timer(&dev->delay_timer, jiffies + HZ);
102 return;
103 }
104
105 spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
106 key = dev->mdev.priv.mkey_key++;
107 spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
108 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
109
110 cache->last_add = jiffies;
111
112 spin_lock_irqsave(&ent->lock, flags);
113 list_add_tail(&mr->list, &ent->head);
114 ent->cur++;
115 ent->size++;
116 spin_unlock_irqrestore(&ent->lock, flags);
117}
118
66static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 119static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
67{ 120{
68 struct mlx5_mr_cache *cache = &dev->cache; 121 struct mlx5_mr_cache *cache = &dev->cache;
@@ -78,36 +131,39 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
78 return -ENOMEM; 131 return -ENOMEM;
79 132
80 for (i = 0; i < num; i++) { 133 for (i = 0; i < num; i++) {
134 if (ent->pending >= MAX_PENDING_REG_MR) {
135 err = -EAGAIN;
136 break;
137 }
138
81 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 139 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
82 if (!mr) { 140 if (!mr) {
83 err = -ENOMEM; 141 err = -ENOMEM;
84 goto out; 142 break;
85 } 143 }
86 mr->order = ent->order; 144 mr->order = ent->order;
87 mr->umred = 1; 145 mr->umred = 1;
146 mr->dev = dev;
88 in->seg.status = 1 << 6; 147 in->seg.status = 1 << 6;
89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 148 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 149 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
91 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; 150 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
92 in->seg.log2_page_size = 12; 151 in->seg.log2_page_size = 12;
93 152
153 spin_lock_irq(&ent->lock);
154 ent->pending++;
155 spin_unlock_irq(&ent->lock);
156 mr->start = jiffies;
94 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, 157 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
95 sizeof(*in)); 158 sizeof(*in), reg_mr_callback,
159 mr, &mr->out);
96 if (err) { 160 if (err) {
97 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 161 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
98 kfree(mr); 162 kfree(mr);
99 goto out; 163 break;
100 } 164 }
101 cache->last_add = jiffies;
102
103 spin_lock(&ent->lock);
104 list_add_tail(&mr->list, &ent->head);
105 ent->cur++;
106 ent->size++;
107 spin_unlock(&ent->lock);
108 } 165 }
109 166
110out:
111 kfree(in); 167 kfree(in);
112 return err; 168 return err;
113} 169}
@@ -121,16 +177,16 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
121 int i; 177 int i;
122 178
123 for (i = 0; i < num; i++) { 179 for (i = 0; i < num; i++) {
124 spin_lock(&ent->lock); 180 spin_lock_irq(&ent->lock);
125 if (list_empty(&ent->head)) { 181 if (list_empty(&ent->head)) {
126 spin_unlock(&ent->lock); 182 spin_unlock_irq(&ent->lock);
127 return; 183 return;
128 } 184 }
129 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 185 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
130 list_del(&mr->list); 186 list_del(&mr->list);
131 ent->cur--; 187 ent->cur--;
132 ent->size--; 188 ent->size--;
133 spin_unlock(&ent->lock); 189 spin_unlock_irq(&ent->lock);
134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 190 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
135 if (err) 191 if (err)
136 mlx5_ib_warn(dev, "failed destroy mkey\n"); 192 mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -162,9 +218,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
162 return -EINVAL; 218 return -EINVAL;
163 219
164 if (var > ent->size) { 220 if (var > ent->size) {
165 err = add_keys(dev, c, var - ent->size); 221 do {
166 if (err) 222 err = add_keys(dev, c, var - ent->size);
167 return err; 223 if (err && err != -EAGAIN)
224 return err;
225
226 usleep_range(3000, 5000);
227 } while (err);
168 } else if (var < ent->size) { 228 } else if (var < ent->size) {
169 remove_keys(dev, c, ent->size - var); 229 remove_keys(dev, c, ent->size - var);
170 } 230 }
@@ -280,23 +340,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
280 struct mlx5_ib_dev *dev = ent->dev; 340 struct mlx5_ib_dev *dev = ent->dev;
281 struct mlx5_mr_cache *cache = &dev->cache; 341 struct mlx5_mr_cache *cache = &dev->cache;
282 int i = order2idx(dev, ent->order); 342 int i = order2idx(dev, ent->order);
343 int err;
283 344
284 if (cache->stopped) 345 if (cache->stopped)
285 return; 346 return;
286 347
287 ent = &dev->cache.ent[i]; 348 ent = &dev->cache.ent[i];
288 if (ent->cur < 2 * ent->limit) { 349 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
289 add_keys(dev, i, 1); 350 err = add_keys(dev, i, 1);
290 if (ent->cur < 2 * ent->limit) 351 if (ent->cur < 2 * ent->limit) {
291 queue_work(cache->wq, &ent->work); 352 if (err == -EAGAIN) {
353 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
354 i + 2);
355 queue_delayed_work(cache->wq, &ent->dwork,
356 msecs_to_jiffies(3));
357 } else if (err) {
358 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
359 i + 2, err);
360 queue_delayed_work(cache->wq, &ent->dwork,
361 msecs_to_jiffies(1000));
362 } else {
363 queue_work(cache->wq, &ent->work);
364 }
365 }
292 } else if (ent->cur > 2 * ent->limit) { 366 } else if (ent->cur > 2 * ent->limit) {
293 if (!someone_adding(cache) && 367 if (!someone_adding(cache) &&
294 time_after(jiffies, cache->last_add + 60 * HZ)) { 368 time_after(jiffies, cache->last_add + 300 * HZ)) {
295 remove_keys(dev, i, 1); 369 remove_keys(dev, i, 1);
296 if (ent->cur > ent->limit) 370 if (ent->cur > ent->limit)
297 queue_work(cache->wq, &ent->work); 371 queue_work(cache->wq, &ent->work);
298 } else { 372 } else {
299 queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); 373 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
300 } 374 }
301 } 375 }
302} 376}
@@ -336,18 +410,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
336 410
337 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); 411 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
338 412
339 spin_lock(&ent->lock); 413 spin_lock_irq(&ent->lock);
340 if (!list_empty(&ent->head)) { 414 if (!list_empty(&ent->head)) {
341 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 415 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
342 list); 416 list);
343 list_del(&mr->list); 417 list_del(&mr->list);
344 ent->cur--; 418 ent->cur--;
345 spin_unlock(&ent->lock); 419 spin_unlock_irq(&ent->lock);
346 if (ent->cur < ent->limit) 420 if (ent->cur < ent->limit)
347 queue_work(cache->wq, &ent->work); 421 queue_work(cache->wq, &ent->work);
348 break; 422 break;
349 } 423 }
350 spin_unlock(&ent->lock); 424 spin_unlock_irq(&ent->lock);
351 425
352 queue_work(cache->wq, &ent->work); 426 queue_work(cache->wq, &ent->work);
353 427
@@ -374,12 +448,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
374 return; 448 return;
375 } 449 }
376 ent = &cache->ent[c]; 450 ent = &cache->ent[c];
377 spin_lock(&ent->lock); 451 spin_lock_irq(&ent->lock);
378 list_add_tail(&mr->list, &ent->head); 452 list_add_tail(&mr->list, &ent->head);
379 ent->cur++; 453 ent->cur++;
380 if (ent->cur > 2 * ent->limit) 454 if (ent->cur > 2 * ent->limit)
381 shrink = 1; 455 shrink = 1;
382 spin_unlock(&ent->lock); 456 spin_unlock_irq(&ent->lock);
383 457
384 if (shrink) 458 if (shrink)
385 queue_work(cache->wq, &ent->work); 459 queue_work(cache->wq, &ent->work);
@@ -394,16 +468,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
394 468
395 cancel_delayed_work(&ent->dwork); 469 cancel_delayed_work(&ent->dwork);
396 while (1) { 470 while (1) {
397 spin_lock(&ent->lock); 471 spin_lock_irq(&ent->lock);
398 if (list_empty(&ent->head)) { 472 if (list_empty(&ent->head)) {
399 spin_unlock(&ent->lock); 473 spin_unlock_irq(&ent->lock);
400 return; 474 return;
401 } 475 }
402 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 476 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
403 list_del(&mr->list); 477 list_del(&mr->list);
404 ent->cur--; 478 ent->cur--;
405 ent->size--; 479 ent->size--;
406 spin_unlock(&ent->lock); 480 spin_unlock_irq(&ent->lock);
407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 481 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
408 if (err) 482 if (err)
409 mlx5_ib_warn(dev, "failed destroy mkey\n"); 483 mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -464,6 +538,13 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
464 debugfs_remove_recursive(dev->cache.root); 538 debugfs_remove_recursive(dev->cache.root);
465} 539}
466 540
541static void delay_time_func(unsigned long ctx)
542{
543 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
544
545 dev->fill_delay = 0;
546}
547
467int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) 548int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
468{ 549{
469 struct mlx5_mr_cache *cache = &dev->cache; 550 struct mlx5_mr_cache *cache = &dev->cache;
@@ -479,6 +560,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
479 return -ENOMEM; 560 return -ENOMEM;
480 } 561 }
481 562
563 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
482 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 564 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
483 INIT_LIST_HEAD(&cache->ent[i].head); 565 INIT_LIST_HEAD(&cache->ent[i].head);
484 spin_lock_init(&cache->ent[i].lock); 566 spin_lock_init(&cache->ent[i].lock);
@@ -522,6 +604,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
522 clean_keys(dev, i); 604 clean_keys(dev, i);
523 605
524 destroy_workqueue(dev->cache.wq); 606 destroy_workqueue(dev->cache.wq);
607 del_timer_sync(&dev->delay_timer);
525 608
526 return 0; 609 return 0;
527} 610}
@@ -551,7 +634,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
551 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 634 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
552 seg->start_addr = 0; 635 seg->start_addr = 0;
553 636
554 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); 637 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
638 NULL);
555 if (err) 639 if (err)
556 goto err_in; 640 goto err_in;
557 641
@@ -660,14 +744,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
660 int err; 744 int err;
661 int i; 745 int i;
662 746
663 for (i = 0; i < 10; i++) { 747 for (i = 0; i < 1; i++) {
664 mr = alloc_cached_mr(dev, order); 748 mr = alloc_cached_mr(dev, order);
665 if (mr) 749 if (mr)
666 break; 750 break;
667 751
668 err = add_keys(dev, order2idx(dev, order), 1); 752 err = add_keys(dev, order2idx(dev, order), 1);
669 if (err) { 753 if (err && err != -EAGAIN) {
670 mlx5_ib_warn(dev, "add_keys failed\n"); 754 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
671 break; 755 break;
672 } 756 }
673 } 757 }
@@ -759,8 +843,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
759 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); 843 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
760 in->seg.log2_page_size = page_shift; 844 in->seg.log2_page_size = page_shift;
761 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 845 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
762 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); 846 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
763 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); 847 1 << page_shift));
848 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
849 NULL, NULL);
764 if (err) { 850 if (err) {
765 mlx5_ib_warn(dev, "create mkey failed\n"); 851 mlx5_ib_warn(dev, "create mkey failed\n");
766 goto err_2; 852 goto err_2;
@@ -944,7 +1030,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
944 * TBD not needed - issue 197292 */ 1030 * TBD not needed - issue 197292 */
945 in->seg.log2_page_size = PAGE_SHIFT; 1031 in->seg.log2_page_size = PAGE_SHIFT;
946 1032
947 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); 1033 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
1034 NULL, NULL);
948 kfree(in); 1035 kfree(in);
949 if (err) 1036 if (err)
950 goto err_free; 1037 goto err_free;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5659ea880741..e3881433f5d7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1744,6 +1744,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1744 MLX5_MKEY_MASK_PD | 1744 MLX5_MKEY_MASK_PD |
1745 MLX5_MKEY_MASK_LR | 1745 MLX5_MKEY_MASK_LR |
1746 MLX5_MKEY_MASK_LW | 1746 MLX5_MKEY_MASK_LW |
1747 MLX5_MKEY_MASK_KEY |
1747 MLX5_MKEY_MASK_RR | 1748 MLX5_MKEY_MASK_RR |
1748 MLX5_MKEY_MASK_RW | 1749 MLX5_MKEY_MASK_RW |
1749 MLX5_MKEY_MASK_A | 1750 MLX5_MKEY_MASK_A |
@@ -1800,7 +1801,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
1800 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 1801 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1801 seg->len = cpu_to_be64(wr->wr.fast_reg.length); 1802 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1802 seg->log2_page_size = wr->wr.fast_reg.page_shift; 1803 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1803 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 1804 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1805 mlx5_mkey_variant(wr->wr.fast_reg.rkey));
1804} 1806}
1805 1807
1806static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, 1808static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6ca30739625f..8675d26a678b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -98,6 +98,7 @@ enum {
98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in, 99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out, 100 struct mlx5_cmd_msg *out,
101 void *uout, int uout_size,
101 mlx5_cmd_cbk_t cbk, 102 mlx5_cmd_cbk_t cbk,
102 void *context, int page_queue) 103 void *context, int page_queue)
103{ 104{
@@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
110 111
111 ent->in = in; 112 ent->in = in;
112 ent->out = out; 113 ent->out = out;
114 ent->uout = uout;
115 ent->uout_size = uout_size;
113 ent->callback = cbk; 116 ent->callback = cbk;
114 ent->context = context; 117 ent->context = context;
115 ent->cmd = cmd; 118 ent->cmd = cmd;
@@ -534,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work)
534 ent->lay = lay; 537 ent->lay = lay;
535 memset(lay, 0, sizeof(*lay)); 538 memset(lay, 0, sizeof(*lay));
536 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 539 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
540 ent->op = be32_to_cpu(lay->in[0]) >> 16;
537 if (ent->in->next) 541 if (ent->in->next)
538 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 542 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
539 lay->inlen = cpu_to_be32(ent->in->len); 543 lay->inlen = cpu_to_be32(ent->in->len);
@@ -628,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
628 * 2. page queue commands do not support asynchrous completion 632 * 2. page queue commands do not support asynchrous completion
629 */ 633 */
630static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 634static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
631 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, 635 struct mlx5_cmd_msg *out, void *uout, int uout_size,
636 mlx5_cmd_cbk_t callback,
632 void *context, int page_queue, u8 *status) 637 void *context, int page_queue, u8 *status)
633{ 638{
634 struct mlx5_cmd *cmd = &dev->cmd; 639 struct mlx5_cmd *cmd = &dev->cmd;
@@ -642,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
642 if (callback && page_queue) 647 if (callback && page_queue)
643 return -EINVAL; 648 return -EINVAL;
644 649
645 ent = alloc_cmd(cmd, in, out, callback, context, page_queue); 650 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
651 page_queue);
646 if (IS_ERR(ent)) 652 if (IS_ERR(ent))
647 return PTR_ERR(ent); 653 return PTR_ERR(ent);
648 654
@@ -670,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
670 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 676 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
671 if (op < ARRAY_SIZE(cmd->stats)) { 677 if (op < ARRAY_SIZE(cmd->stats)) {
672 stats = &cmd->stats[op]; 678 stats = &cmd->stats[op];
673 spin_lock(&stats->lock); 679 spin_lock_irq(&stats->lock);
674 stats->sum += ds; 680 stats->sum += ds;
675 ++stats->n; 681 ++stats->n;
676 spin_unlock(&stats->lock); 682 spin_unlock_irq(&stats->lock);
677 } 683 }
678 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 684 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
679 "fw exec time for %s is %lld nsec\n", 685 "fw exec time for %s is %lld nsec\n",
@@ -826,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
826 int n; 832 int n;
827 int i; 833 int i;
828 834
829 msg = kzalloc(sizeof(*msg), GFP_KERNEL); 835 msg = kzalloc(sizeof(*msg), flags);
830 if (!msg) 836 if (!msg)
831 return ERR_PTR(-ENOMEM); 837 return ERR_PTR(-ENOMEM);
832 838
@@ -1109,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1109 up(&cmd->sem); 1115 up(&cmd->sem);
1110} 1116}
1111 1117
1118static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1119{
1120 unsigned long flags;
1121
1122 if (msg->cache) {
1123 spin_lock_irqsave(&msg->cache->lock, flags);
1124 list_add_tail(&msg->list, &msg->cache->head);
1125 spin_unlock_irqrestore(&msg->cache->lock, flags);
1126 } else {
1127 mlx5_free_cmd_msg(dev, msg);
1128 }
1129}
1130
1112void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) 1131void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1113{ 1132{
1114 struct mlx5_cmd *cmd = &dev->cmd; 1133 struct mlx5_cmd *cmd = &dev->cmd;
@@ -1117,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1117 void *context; 1136 void *context;
1118 int err; 1137 int err;
1119 int i; 1138 int i;
1139 ktime_t t1, t2, delta;
1140 s64 ds;
1141 struct mlx5_cmd_stats *stats;
1142 unsigned long flags;
1120 1143
1121 for (i = 0; i < (1 << cmd->log_sz); i++) { 1144 for (i = 0; i < (1 << cmd->log_sz); i++) {
1122 if (test_bit(i, &vector)) { 1145 if (test_bit(i, &vector)) {
@@ -1141,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1141 } 1164 }
1142 free_ent(cmd, ent->idx); 1165 free_ent(cmd, ent->idx);
1143 if (ent->callback) { 1166 if (ent->callback) {
1167 t1 = timespec_to_ktime(ent->ts1);
1168 t2 = timespec_to_ktime(ent->ts2);
1169 delta = ktime_sub(t2, t1);
1170 ds = ktime_to_ns(delta);
1171 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1172 stats = &cmd->stats[ent->op];
1173 spin_lock_irqsave(&stats->lock, flags);
1174 stats->sum += ds;
1175 ++stats->n;
1176 spin_unlock_irqrestore(&stats->lock, flags);
1177 }
1178
1144 callback = ent->callback; 1179 callback = ent->callback;
1145 context = ent->context; 1180 context = ent->context;
1146 err = ent->ret; 1181 err = ent->ret;
1182 if (!err)
1183 err = mlx5_copy_from_msg(ent->uout,
1184 ent->out,
1185 ent->uout_size);
1186
1187 mlx5_free_cmd_msg(dev, ent->out);
1188 free_msg(dev, ent->in);
1189
1147 free_cmd(ent); 1190 free_cmd(ent);
1148 callback(err, context); 1191 callback(err, context);
1149 } else { 1192 } else {
@@ -1160,7 +1203,8 @@ static int status_to_err(u8 status)
1160 return status ? -1 : 0; /* TBD more meaningful codes */ 1203 return status ? -1 : 0; /* TBD more meaningful codes */
1161} 1204}
1162 1205
1163static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) 1206static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1207 gfp_t gfp)
1164{ 1208{
1165 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1209 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1166 struct mlx5_cmd *cmd = &dev->cmd; 1210 struct mlx5_cmd *cmd = &dev->cmd;
@@ -1172,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1172 ent = &cmd->cache.med; 1216 ent = &cmd->cache.med;
1173 1217
1174 if (ent) { 1218 if (ent) {
1175 spin_lock(&ent->lock); 1219 spin_lock_irq(&ent->lock);
1176 if (!list_empty(&ent->head)) { 1220 if (!list_empty(&ent->head)) {
1177 msg = list_entry(ent->head.next, typeof(*msg), list); 1221 msg = list_entry(ent->head.next, typeof(*msg), list);
1178 /* For cached lists, we must explicitly state what is 1222 /* For cached lists, we must explicitly state what is
@@ -1181,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1181 msg->len = in_size; 1225 msg->len = in_size;
1182 list_del(&msg->list); 1226 list_del(&msg->list);
1183 } 1227 }
1184 spin_unlock(&ent->lock); 1228 spin_unlock_irq(&ent->lock);
1185 } 1229 }
1186 1230
1187 if (IS_ERR(msg)) 1231 if (IS_ERR(msg))
1188 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); 1232 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1189 1233
1190 return msg; 1234 return msg;
1191} 1235}
1192 1236
1193static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1194{
1195 if (msg->cache) {
1196 spin_lock(&msg->cache->lock);
1197 list_add_tail(&msg->list, &msg->cache->head);
1198 spin_unlock(&msg->cache->lock);
1199 } else {
1200 mlx5_free_cmd_msg(dev, msg);
1201 }
1202}
1203
1204static int is_manage_pages(struct mlx5_inbox_hdr *in) 1237static int is_manage_pages(struct mlx5_inbox_hdr *in)
1205{ 1238{
1206 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1239 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1207} 1240}
1208 1241
1209int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1242static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1210 int out_size) 1243 int out_size, mlx5_cmd_cbk_t callback, void *context)
1211{ 1244{
1212 struct mlx5_cmd_msg *inb; 1245 struct mlx5_cmd_msg *inb;
1213 struct mlx5_cmd_msg *outb; 1246 struct mlx5_cmd_msg *outb;
1214 int pages_queue; 1247 int pages_queue;
1248 gfp_t gfp;
1215 int err; 1249 int err;
1216 u8 status = 0; 1250 u8 status = 0;
1217 1251
1218 pages_queue = is_manage_pages(in); 1252 pages_queue = is_manage_pages(in);
1253 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1219 1254
1220 inb = alloc_msg(dev, in_size); 1255 inb = alloc_msg(dev, in_size, gfp);
1221 if (IS_ERR(inb)) { 1256 if (IS_ERR(inb)) {
1222 err = PTR_ERR(inb); 1257 err = PTR_ERR(inb);
1223 return err; 1258 return err;
@@ -1229,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1229 goto out_in; 1264 goto out_in;
1230 } 1265 }
1231 1266
1232 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); 1267 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1233 if (IS_ERR(outb)) { 1268 if (IS_ERR(outb)) {
1234 err = PTR_ERR(outb); 1269 err = PTR_ERR(outb);
1235 goto out_in; 1270 goto out_in;
1236 } 1271 }
1237 1272
1238 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); 1273 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1274 pages_queue, &status);
1239 if (err) 1275 if (err)
1240 goto out_out; 1276 goto out_out;
1241 1277
@@ -1248,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1248 err = mlx5_copy_from_msg(out, outb, out_size); 1284 err = mlx5_copy_from_msg(out, outb, out_size);
1249 1285
1250out_out: 1286out_out:
1251 mlx5_free_cmd_msg(dev, outb); 1287 if (!callback)
1288 mlx5_free_cmd_msg(dev, outb);
1252 1289
1253out_in: 1290out_in:
1254 free_msg(dev, inb); 1291 if (!callback)
1292 free_msg(dev, inb);
1255 return err; 1293 return err;
1256} 1294}
1295
1296int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1297 int out_size)
1298{
1299 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1300}
1257EXPORT_SYMBOL(mlx5_cmd_exec); 1301EXPORT_SYMBOL(mlx5_cmd_exec);
1258 1302
1303int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1304 void *out, int out_size, mlx5_cmd_cbk_t callback,
1305 void *context)
1306{
1307 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1308}
1309EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1310
1259static void destroy_msg_cache(struct mlx5_core_dev *dev) 1311static void destroy_msg_cache(struct mlx5_core_dev *dev)
1260{ 1312{
1261 struct mlx5_cmd *cmd = &dev->cmd; 1313 struct mlx5_cmd *cmd = &dev->cmd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9c7194b26ee2..80f6d127257a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
154 return 0; 154 return 0;
155 155
156 stats = filp->private_data; 156 stats = filp->private_data;
157 spin_lock(&stats->lock); 157 spin_lock_irq(&stats->lock);
158 if (stats->n) 158 if (stats->n)
159 field = div64_u64(stats->sum, stats->n); 159 field = div64_u64(stats->sum, stats->n);
160 spin_unlock(&stats->lock); 160 spin_unlock_irq(&stats->lock);
161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); 161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
162 if (ret > 0) { 162 if (ret > 0) {
163 if (copy_to_user(buf, tbuf, ret)) 163 if (copy_to_user(buf, tbuf, ret))
@@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
175 struct mlx5_cmd_stats *stats; 175 struct mlx5_cmd_stats *stats;
176 176
177 stats = filp->private_data; 177 stats = filp->private_data;
178 spin_lock(&stats->lock); 178 spin_lock_irq(&stats->lock);
179 stats->sum = 0; 179 stats->sum = 0;
180 stats->n = 0; 180 stats->n = 0;
181 spin_unlock(&stats->lock); 181 spin_unlock_irq(&stats->lock);
182 182
183 *pos += count; 183 *pos += count;
184 184
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 5b44e2e46daf..35e514dc7b7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -37,31 +37,41 @@
37#include "mlx5_core.h" 37#include "mlx5_core.h"
38 38
39int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 39int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
40 struct mlx5_create_mkey_mbox_in *in, int inlen) 40 struct mlx5_create_mkey_mbox_in *in, int inlen,
41 mlx5_cmd_cbk_t callback, void *context,
42 struct mlx5_create_mkey_mbox_out *out)
41{ 43{
42 struct mlx5_create_mkey_mbox_out out; 44 struct mlx5_create_mkey_mbox_out lout;
43 int err; 45 int err;
44 u8 key; 46 u8 key;
45 47
46 memset(&out, 0, sizeof(out)); 48 memset(&lout, 0, sizeof(lout));
47 spin_lock(&dev->priv.mkey_lock); 49 spin_lock_irq(&dev->priv.mkey_lock);
48 key = dev->priv.mkey_key++; 50 key = dev->priv.mkey_key++;
49 spin_unlock(&dev->priv.mkey_lock); 51 spin_unlock_irq(&dev->priv.mkey_lock);
50 in->seg.qpn_mkey7_0 |= cpu_to_be32(key); 52 in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
51 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); 53 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
52 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 54 if (callback) {
55 err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
56 callback, context);
57 return err;
58 } else {
59 err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
60 }
61
53 if (err) { 62 if (err) {
54 mlx5_core_dbg(dev, "cmd exec faile %d\n", err); 63 mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
55 return err; 64 return err;
56 } 65 }
57 66
58 if (out.hdr.status) { 67 if (lout.hdr.status) {
59 mlx5_core_dbg(dev, "status %d\n", out.hdr.status); 68 mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
60 return mlx5_cmd_status_to_err(&out.hdr); 69 return mlx5_cmd_status_to_err(&lout.hdr);
61 } 70 }
62 71
63 mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; 72 mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
64 mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); 73 mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
74 be32_to_cpu(lout.mkey), key, mr->key);
65 75
66 return err; 76 return err;
67} 77}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6b8c496572c8..513619a75695 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -557,9 +557,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
557struct mlx5_cmd_work_ent { 557struct mlx5_cmd_work_ent {
558 struct mlx5_cmd_msg *in; 558 struct mlx5_cmd_msg *in;
559 struct mlx5_cmd_msg *out; 559 struct mlx5_cmd_msg *out;
560 void *uout;
561 int uout_size;
560 mlx5_cmd_cbk_t callback; 562 mlx5_cmd_cbk_t callback;
561 void *context; 563 void *context;
562 int idx; 564 int idx;
563 struct completion done; 565 struct completion done;
564 struct mlx5_cmd *cmd; 566 struct mlx5_cmd *cmd;
565 struct work_struct work; 567 struct work_struct work;
@@ -570,6 +572,7 @@ struct mlx5_cmd_work_ent {
570 u8 token; 572 u8 token;
571 struct timespec ts1; 573 struct timespec ts1;
572 struct timespec ts2; 574 struct timespec ts2;
575 u16 op;
573}; 576};
574 577
575struct mlx5_pas { 578struct mlx5_pas {
@@ -653,6 +656,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
653int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 656int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
654int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 657int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
655 int out_size); 658 int out_size);
659int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
660 void *out, int out_size, mlx5_cmd_cbk_t callback,
661 void *context);
656int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 662int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
657int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 663int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
658int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 664int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -676,7 +682,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
676int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 682int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
677 u16 lwm, int is_srq); 683 u16 lwm, int is_srq);
678int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 684int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
679 struct mlx5_create_mkey_mbox_in *in, int inlen); 685 struct mlx5_create_mkey_mbox_in *in, int inlen,
686 mlx5_cmd_cbk_t callback, void *context,
687 struct mlx5_create_mkey_mbox_out *out);
680int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); 688int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
681int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 689int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 struct mlx5_query_mkey_mbox_out *out, int outlen); 690 struct mlx5_query_mkey_mbox_out *out, int outlen);
@@ -745,6 +753,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745 return mkey_idx << 8; 753 return mkey_idx << 8;
746} 754}
747 755
756static inline u8 mlx5_mkey_variant(u32 mkey)
757{
758 return mkey & 0xff;
759}
760
748enum { 761enum {
749 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 762 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
750 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, 763 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,