aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <roland@topspin.com>2005-06-27 17:36:43 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:11:46 -0400
commitd56d6f9502a15ef64395cb3a6fc7bfdc365b1e3d (patch)
tree4b02fba5320ebef9c339452bc3f9ce8a69a0af4e /drivers/infiniband
parenta03a5a67b243e9a24805ee18272ad25e5b2ca92c (diff)
[PATCH] IB/mthca: Split off MTT allocation
Split allocation of MTT range from creation of MR. This will be useful for implementing shared memory regions and userspace verbs. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c325
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h14
3 files changed, 177 insertions, 168 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index df26d818c1ce..e8cf4d68d11c 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -380,6 +380,12 @@ void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
380int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd); 380int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd);
381void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); 381void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
382 382
383struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size);
384void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt);
385int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
386 int start_index, u64 *buffer_list, int list_len);
387int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
388 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
383int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, 389int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
384 u32 access, struct mthca_mr *mr); 390 u32 access, struct mthca_mr *mr);
385int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, 391int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 29e5fe708b83..877654ae42da 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -40,6 +40,12 @@
40#include "mthca_cmd.h" 40#include "mthca_cmd.h"
41#include "mthca_memfree.h" 41#include "mthca_memfree.h"
42 42
43struct mthca_mtt {
44 struct mthca_buddy *buddy;
45 int order;
46 u32 first_seg;
47};
48
43/* 49/*
44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 50 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
45 */ 51 */
@@ -173,8 +179,8 @@ static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
173 kfree(buddy->bits); 179 kfree(buddy->bits);
174} 180}
175 181
176static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order, 182static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
177 struct mthca_buddy *buddy) 183 struct mthca_buddy *buddy)
178{ 184{
179 u32 seg = mthca_buddy_alloc(buddy, order); 185 u32 seg = mthca_buddy_alloc(buddy, order);
180 186
@@ -191,12 +197,100 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
191 return seg; 197 return seg;
192} 198}
193 199
194static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order, 200static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
195 struct mthca_buddy* buddy) 201 struct mthca_buddy *buddy)
202{
203 struct mthca_mtt *mtt;
204 int i;
205
206 if (size <= 0)
207 return ERR_PTR(-EINVAL);
208
209 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
210 if (!mtt)
211 return ERR_PTR(-ENOMEM);
212
213 mtt->buddy = buddy;
214 mtt->order = 0;
215 for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
216 ++mtt->order;
217
218 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
219 if (mtt->first_seg == -1) {
220 kfree(mtt);
221 return ERR_PTR(-ENOMEM);
222 }
223
224 return mtt;
225}
226
227struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
228{
229 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
230}
231
232void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
233{
234 if (!mtt)
235 return;
236
237 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
238
239 mthca_table_put_range(dev, dev->mr_table.mtt_table,
240 mtt->first_seg,
241 mtt->first_seg + (1 << mtt->order) - 1);
242
243 kfree(mtt);
244}
245
246int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
247 int start_index, u64 *buffer_list, int list_len)
196{ 248{
197 mthca_buddy_free(buddy, seg, order); 249 u64 *mtt_entry;
198 mthca_table_put_range(dev, dev->mr_table.mtt_table, seg, 250 int err = 0;
199 seg + (1 << order) - 1); 251 u8 status;
252 int i;
253
254 mtt_entry = (u64 *) __get_free_page(GFP_KERNEL);
255 if (!mtt_entry)
256 return -ENOMEM;
257
258 while (list_len > 0) {
259 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
260 mtt->first_seg * MTHCA_MTT_SEG_SIZE +
261 start_index * 8);
262 mtt_entry[1] = 0;
263 for (i = 0; i < list_len && i < PAGE_SIZE / 8 - 2; ++i)
264 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
265 MTHCA_MTT_FLAG_PRESENT);
266
267 /*
268 * If we have an odd number of entries to write, add
269 * one more dummy entry for firmware efficiency.
270 */
271 if (i & 1)
272 mtt_entry[i + 2] = 0;
273
274 err = mthca_WRITE_MTT(dev, mtt_entry, (i + 1) & ~1, &status);
275 if (err) {
276 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
277 goto out;
278 }
279 if (status) {
280 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
281 status);
282 err = -EINVAL;
283 goto out;
284 }
285
286 list_len -= i;
287 start_index += i;
288 buffer_list += i;
289 }
290
291out:
292 free_page((unsigned long) mtt_entry);
293 return err;
200} 294}
201 295
202static inline u32 tavor_hw_index_to_key(u32 ind) 296static inline u32 tavor_hw_index_to_key(u32 ind)
@@ -235,18 +329,20 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
235 return tavor_key_to_hw_index(key); 329 return tavor_key_to_hw_index(key);
236} 330}
237 331
238int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, 332int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
239 u32 access, struct mthca_mr *mr) 333 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
240{ 334{
241 void *mailbox = NULL; 335 void *mailbox;
242 struct mthca_mpt_entry *mpt_entry; 336 struct mthca_mpt_entry *mpt_entry;
243 u32 key; 337 u32 key;
338 int i;
244 int err; 339 int err;
245 u8 status; 340 u8 status;
246 341
247 might_sleep(); 342 might_sleep();
248 343
249 mr->order = -1; 344 WARN_ON(buffer_size_shift >= 32);
345
250 key = mthca_alloc(&dev->mr_table.mpt_alloc); 346 key = mthca_alloc(&dev->mr_table.mpt_alloc);
251 if (key == -1) 347 if (key == -1)
252 return -ENOMEM; 348 return -ENOMEM;
@@ -268,186 +364,98 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
268 364
269 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 365 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
270 MTHCA_MPT_FLAG_MIO | 366 MTHCA_MPT_FLAG_MIO |
271 MTHCA_MPT_FLAG_PHYSICAL |
272 MTHCA_MPT_FLAG_REGION | 367 MTHCA_MPT_FLAG_REGION |
273 access); 368 access);
274 mpt_entry->page_size = 0; 369 if (!mr->mtt)
370 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
371
372 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
275 mpt_entry->key = cpu_to_be32(key); 373 mpt_entry->key = cpu_to_be32(key);
276 mpt_entry->pd = cpu_to_be32(pd); 374 mpt_entry->pd = cpu_to_be32(pd);
277 mpt_entry->start = 0; 375 mpt_entry->start = cpu_to_be64(iova);
278 mpt_entry->length = ~0ULL; 376 mpt_entry->length = cpu_to_be64(total_size);
279 377
280 memset(&mpt_entry->lkey, 0, 378 memset(&mpt_entry->lkey, 0,
281 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); 379 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
282 380
381 if (mr->mtt)
382 mpt_entry->mtt_seg =
383 cpu_to_be64(dev->mr_table.mtt_base +
384 mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
385
386 if (0) {
387 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
388 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
389 if (i % 4 == 0)
390 printk("[%02x] ", i * 4);
391 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i]));
392 if ((i + 1) % 4 == 0)
393 printk("\n");
394 }
395 }
396
283 err = mthca_SW2HW_MPT(dev, mpt_entry, 397 err = mthca_SW2HW_MPT(dev, mpt_entry,
284 key & (dev->limits.num_mpts - 1), 398 key & (dev->limits.num_mpts - 1),
285 &status); 399 &status);
286 if (err) { 400 if (err) {
287 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 401 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
288 goto err_out_table; 402 goto err_out_mailbox;
289 } else if (status) { 403 } else if (status) {
290 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", 404 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
291 status); 405 status);
292 err = -EINVAL; 406 err = -EINVAL;
293 goto err_out_table; 407 goto err_out_mailbox;
294 } 408 }
295 409
296 kfree(mailbox); 410 kfree(mailbox);
297 return err; 411 return err;
298 412
413err_out_mailbox:
414 kfree(mailbox);
415
299err_out_table: 416err_out_table:
300 mthca_table_put(dev, dev->mr_table.mpt_table, key); 417 mthca_table_put(dev, dev->mr_table.mpt_table, key);
301 418
302err_out_mpt_free: 419err_out_mpt_free:
303 mthca_free(&dev->mr_table.mpt_alloc, key); 420 mthca_free(&dev->mr_table.mpt_alloc, key);
304 kfree(mailbox);
305 return err; 421 return err;
306} 422}
307 423
424int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
425 u32 access, struct mthca_mr *mr)
426{
427 mr->mtt = NULL;
428 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
429}
430
308int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, 431int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
309 u64 *buffer_list, int buffer_size_shift, 432 u64 *buffer_list, int buffer_size_shift,
310 int list_len, u64 iova, u64 total_size, 433 int list_len, u64 iova, u64 total_size,
311 u32 access, struct mthca_mr *mr) 434 u32 access, struct mthca_mr *mr)
312{ 435{
313 void *mailbox; 436 int err;
314 u64 *mtt_entry;
315 struct mthca_mpt_entry *mpt_entry;
316 u32 key;
317 int err = -ENOMEM;
318 u8 status;
319 int i;
320
321 might_sleep();
322 WARN_ON(buffer_size_shift >= 32);
323
324 key = mthca_alloc(&dev->mr_table.mpt_alloc);
325 if (key == -1)
326 return -ENOMEM;
327 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
328
329 if (mthca_is_memfree(dev)) {
330 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
331 if (err)
332 goto err_out_mpt_free;
333 }
334
335 for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
336 i < list_len;
337 i <<= 1, ++mr->order)
338 ; /* nothing */
339
340 mr->first_seg = mthca_alloc_mtt(dev, mr->order,
341 &dev->mr_table.mtt_buddy);
342 if (mr->first_seg == -1)
343 goto err_out_table;
344
345 /*
346 * If list_len is odd, we add one more dummy entry for
347 * firmware efficiency.
348 */
349 mailbox = kmalloc(max(sizeof *mpt_entry,
350 (size_t) 8 * (list_len + (list_len & 1) + 2)) +
351 MTHCA_CMD_MAILBOX_EXTRA,
352 GFP_KERNEL);
353 if (!mailbox)
354 goto err_out_free_mtt;
355
356 mtt_entry = MAILBOX_ALIGN(mailbox);
357
358 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
359 mr->first_seg * MTHCA_MTT_SEG_SIZE);
360 mtt_entry[1] = 0;
361 for (i = 0; i < list_len; ++i)
362 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
363 MTHCA_MTT_FLAG_PRESENT);
364 if (list_len & 1) {
365 mtt_entry[i + 2] = 0;
366 ++list_len;
367 }
368 437
369 if (0) { 438 mr->mtt = mthca_alloc_mtt(dev, list_len);
370 mthca_dbg(dev, "Dumping MPT entry\n"); 439 if (IS_ERR(mr->mtt))
371 for (i = 0; i < list_len + 2; ++i) 440 return PTR_ERR(mr->mtt);
372 printk(KERN_ERR "[%2d] %016llx\n",
373 i, (unsigned long long) be64_to_cpu(mtt_entry[i]));
374 }
375 441
376 err = mthca_WRITE_MTT(dev, mtt_entry, list_len, &status); 442 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
377 if (err) { 443 if (err) {
378 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 444 mthca_free_mtt(dev, mr->mtt);
379 goto err_out_mailbox_free; 445 return err;
380 }
381 if (status) {
382 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
383 status);
384 err = -EINVAL;
385 goto err_out_mailbox_free;
386 }
387
388 mpt_entry = MAILBOX_ALIGN(mailbox);
389
390 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
391 MTHCA_MPT_FLAG_MIO |
392 MTHCA_MPT_FLAG_REGION |
393 access);
394
395 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
396 mpt_entry->key = cpu_to_be32(key);
397 mpt_entry->pd = cpu_to_be32(pd);
398 mpt_entry->start = cpu_to_be64(iova);
399 mpt_entry->length = cpu_to_be64(total_size);
400 memset(&mpt_entry->lkey, 0,
401 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
402 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base +
403 mr->first_seg * MTHCA_MTT_SEG_SIZE);
404
405 if (0) {
406 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
407 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
408 if (i % 4 == 0)
409 printk("[%02x] ", i * 4);
410 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i]));
411 if ((i + 1) % 4 == 0)
412 printk("\n");
413 }
414 } 446 }
415 447
416 err = mthca_SW2HW_MPT(dev, mpt_entry, 448 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
417 key & (dev->limits.num_mpts - 1), 449 total_size, access, mr);
418 &status);
419 if (err) 450 if (err)
420 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 451 mthca_free_mtt(dev, mr->mtt);
421 else if (status) {
422 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
423 status);
424 err = -EINVAL;
425 }
426 452
427 kfree(mailbox);
428 return err;
429
430err_out_mailbox_free:
431 kfree(mailbox);
432
433err_out_free_mtt:
434 mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy);
435
436err_out_table:
437 mthca_table_put(dev, dev->mr_table.mpt_table, key);
438
439err_out_mpt_free:
440 mthca_free(&dev->mr_table.mpt_alloc, key);
441 return err; 453 return err;
442} 454}
443 455
444/* Free mr or fmr */ 456/* Free mr or fmr */
445static void mthca_free_region(struct mthca_dev *dev, u32 lkey, int order, 457static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
446 u32 first_seg, struct mthca_buddy *buddy)
447{ 458{
448 if (order >= 0)
449 mthca_free_mtt(dev, first_seg, order, buddy);
450
451 mthca_table_put(dev, dev->mr_table.mpt_table, 459 mthca_table_put(dev, dev->mr_table.mpt_table,
452 arbel_key_to_hw_index(lkey)); 460 arbel_key_to_hw_index(lkey));
453 461
@@ -471,8 +479,8 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
471 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n", 479 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
472 status); 480 status);
473 481
474 mthca_free_region(dev, mr->ibmr.lkey, mr->order, mr->first_seg, 482 mthca_free_region(dev, mr->ibmr.lkey);
475 &dev->mr_table.mtt_buddy); 483 mthca_free_mtt(dev, mr->mtt);
476} 484}
477 485
478int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, 486int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
@@ -517,21 +525,15 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
517 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base + 525 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
518 sizeof *(mr->mem.tavor.mpt) * idx; 526 sizeof *(mr->mem.tavor.mpt) * idx;
519 527
520 for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0; 528 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
521 i < list_len; 529 if (IS_ERR(mr->mtt))
522 i <<= 1, ++mr->order)
523 ; /* nothing */
524
525 mr->first_seg = mthca_alloc_mtt(dev, mr->order,
526 dev->mr_table.fmr_mtt_buddy);
527 if (mr->first_seg == -1)
528 goto err_out_table; 530 goto err_out_table;
529 531
530 mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE; 532 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
531 533
532 if (mthca_is_memfree(dev)) { 534 if (mthca_is_memfree(dev)) {
533 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, 535 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
534 mr->first_seg); 536 mr->mtt->first_seg);
535 BUG_ON(!mr->mem.arbel.mtts); 537 BUG_ON(!mr->mem.arbel.mtts);
536 } else 538 } else
537 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; 539 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
@@ -587,8 +589,7 @@ err_out_mailbox_free:
587 kfree(mailbox); 589 kfree(mailbox);
588 590
589err_out_free_mtt: 591err_out_free_mtt:
590 mthca_free_mtt(dev, mr->first_seg, mr->order, 592 mthca_free_mtt(dev, mr->mtt);
591 dev->mr_table.fmr_mtt_buddy);
592 593
593err_out_table: 594err_out_table:
594 mthca_table_put(dev, dev->mr_table.mpt_table, key); 595 mthca_table_put(dev, dev->mr_table.mpt_table, key);
@@ -603,8 +604,9 @@ int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
603 if (fmr->maps) 604 if (fmr->maps)
604 return -EBUSY; 605 return -EBUSY;
605 606
606 mthca_free_region(dev, fmr->ibmr.lkey, fmr->order, fmr->first_seg, 607 mthca_free_region(dev, fmr->ibmr.lkey);
607 dev->mr_table.fmr_mtt_buddy); 608 mthca_free_mtt(dev, fmr->mtt);
609
608 return 0; 610 return 0;
609} 611}
610 612
@@ -820,7 +822,8 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
820 if (dev->limits.reserved_mtts) { 822 if (dev->limits.reserved_mtts) {
821 i = fls(dev->limits.reserved_mtts - 1); 823 i = fls(dev->limits.reserved_mtts - 1);
822 824
823 if (mthca_alloc_mtt(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) { 825 if (mthca_alloc_mtt_range(dev, i,
826 dev->mr_table.fmr_mtt_buddy) == -1) {
824 mthca_warn(dev, "MTT table of order %d is too small.\n", 827 mthca_warn(dev, "MTT table of order %d is too small.\n",
825 dev->mr_table.fmr_mtt_buddy->max_order); 828 dev->mr_table.fmr_mtt_buddy->max_order);
826 err = -ENOMEM; 829 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 619710f95a87..4d976cccb1a8 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -54,18 +54,18 @@ struct mthca_uar {
54 int index; 54 int index;
55}; 55};
56 56
57struct mthca_mtt;
58
57struct mthca_mr { 59struct mthca_mr {
58 struct ib_mr ibmr; 60 struct ib_mr ibmr;
59 int order; 61 struct mthca_mtt *mtt;
60 u32 first_seg;
61}; 62};
62 63
63struct mthca_fmr { 64struct mthca_fmr {
64 struct ib_fmr ibmr; 65 struct ib_fmr ibmr;
65 struct ib_fmr_attr attr; 66 struct ib_fmr_attr attr;
66 int order; 67 struct mthca_mtt *mtt;
67 u32 first_seg; 68 int maps;
68 int maps;
69 union { 69 union {
70 struct { 70 struct {
71 struct mthca_mpt_entry __iomem *mpt; 71 struct mthca_mpt_entry __iomem *mpt;