diff options
author | Michael S. Tsirkin <mst@mellanox.co.il> | 2007-02-10 16:14:25 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-12 19:16:29 -0500 |
commit | b2875d4c39759a732203db32f245cc6d8bbdd7cf (patch) | |
tree | eba8311c0242da3298796d33e86a990aa2a7637f | |
parent | c20e20ab0f3af9a44842ea11287c9ecd034a5d33 (diff) |
IB/mthca: Always fill MTTs from CPU
Speed up memory registration by filling in MTTs directly when the CPU
can write directly to the whole table (all mem-free cards, and to
Tavor mode on 64-bit systems with the patch I posted earlier). This
reduces the number of FW commands needed to register an MR by at least
a factor of 2 and speeds up memory registration significantly.
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 82 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 14 |
3 files changed, 89 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index fe5cecf70fed..b7e42efaf43d 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -464,6 +464,8 @@ void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); | |||
464 | int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd); | 464 | int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd); |
465 | void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); | 465 | void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); |
466 | 466 | ||
467 | int mthca_write_mtt_size(struct mthca_dev *dev); | ||
468 | |||
467 | struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); | 469 | struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); |
468 | void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); | 470 | void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); |
469 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | 471 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 958c6d5b6bc0..6037dd3f87df 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -243,8 +243,8 @@ void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) | |||
243 | kfree(mtt); | 243 | kfree(mtt); |
244 | } | 244 | } |
245 | 245 | ||
246 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | 246 | static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, |
247 | int start_index, u64 *buffer_list, int list_len) | 247 | int start_index, u64 *buffer_list, int list_len) |
248 | { | 248 | { |
249 | struct mthca_mailbox *mailbox; | 249 | struct mthca_mailbox *mailbox; |
250 | __be64 *mtt_entry; | 250 | __be64 *mtt_entry; |
@@ -295,6 +295,84 @@ out: | |||
295 | return err; | 295 | return err; |
296 | } | 296 | } |
297 | 297 | ||
298 | int mthca_write_mtt_size(struct mthca_dev *dev) | ||
299 | { | ||
300 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) | ||
301 | /* | ||
302 | * Be friendly to WRITE_MTT command | ||
303 | * and leave two empty slots for the | ||
304 | * index and reserved fields of the | ||
305 | * mailbox. | ||
306 | */ | ||
307 | return PAGE_SIZE / sizeof (u64) - 2; | ||
308 | |||
309 | /* For Arbel, all MTTs must fit in the same page. */ | ||
310 | return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; | ||
311 | } | ||
312 | |||
313 | void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, | ||
314 | int start_index, u64 *buffer_list, int list_len) | ||
315 | { | ||
316 | u64 __iomem *mtts; | ||
317 | int i; | ||
318 | |||
319 | mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE + | ||
320 | start_index * sizeof (u64); | ||
321 | for (i = 0; i < list_len; ++i) | ||
322 | mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), | ||
323 | mtts + i); | ||
324 | } | ||
325 | |||
326 | void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, | ||
327 | int start_index, u64 *buffer_list, int list_len) | ||
328 | { | ||
329 | __be64 *mtts; | ||
330 | dma_addr_t dma_handle; | ||
331 | int i; | ||
332 | int s = start_index * sizeof (u64); | ||
333 | |||
334 | /* For Arbel, all MTTs must fit in the same page. */ | ||
335 | BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); | ||
336 | /* Require full segments */ | ||
337 | BUG_ON(s % MTHCA_MTT_SEG_SIZE); | ||
338 | |||
339 | mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + | ||
340 | s / MTHCA_MTT_SEG_SIZE, &dma_handle); | ||
341 | |||
342 | BUG_ON(!mtts); | ||
343 | |||
344 | for (i = 0; i < list_len; ++i) | ||
345 | mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); | ||
346 | |||
347 | dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); | ||
348 | } | ||
349 | |||
350 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | ||
351 | int start_index, u64 *buffer_list, int list_len) | ||
352 | { | ||
353 | int size = mthca_write_mtt_size(dev); | ||
354 | int chunk; | ||
355 | |||
356 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) | ||
357 | return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); | ||
358 | |||
359 | while (list_len > 0) { | ||
360 | chunk = min(size, list_len); | ||
361 | if (mthca_is_memfree(dev)) | ||
362 | mthca_arbel_write_mtt_seg(dev, mtt, start_index, | ||
363 | buffer_list, chunk); | ||
364 | else | ||
365 | mthca_tavor_write_mtt_seg(dev, mtt, start_index, | ||
366 | buffer_list, chunk); | ||
367 | |||
368 | list_len -= chunk; | ||
369 | start_index += chunk; | ||
370 | buffer_list += chunk; | ||
371 | } | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
298 | static inline u32 tavor_hw_index_to_key(u32 ind) | 376 | static inline u32 tavor_hw_index_to_key(u32 ind) |
299 | { | 377 | { |
300 | return ind; | 378 | return ind; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 7b96751695ea..0725ad7ad9bf 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1015,6 +1015,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1015 | int shift, n, len; | 1015 | int shift, n, len; |
1016 | int i, j, k; | 1016 | int i, j, k; |
1017 | int err = 0; | 1017 | int err = 0; |
1018 | int write_mtt_size; | ||
1018 | 1019 | ||
1019 | shift = ffs(region->page_size) - 1; | 1020 | shift = ffs(region->page_size) - 1; |
1020 | 1021 | ||
@@ -1040,6 +1041,8 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1040 | 1041 | ||
1041 | i = n = 0; | 1042 | i = n = 0; |
1042 | 1043 | ||
1044 | write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); | ||
1045 | |||
1043 | list_for_each_entry(chunk, ®ion->chunk_list, list) | 1046 | list_for_each_entry(chunk, ®ion->chunk_list, list) |
1044 | for (j = 0; j < chunk->nmap; ++j) { | 1047 | for (j = 0; j < chunk->nmap; ++j) { |
1045 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 1048 | len = sg_dma_len(&chunk->page_list[j]) >> shift; |
@@ -1047,14 +1050,11 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1047 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + | 1050 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + |
1048 | region->page_size * k; | 1051 | region->page_size * k; |
1049 | /* | 1052 | /* |
1050 | * Be friendly to WRITE_MTT command | 1053 | * Be friendly to write_mtt and pass it chunks |
1051 | * and leave two empty slots for the | 1054 | * of appropriate size. |
1052 | * index and reserved fields of the | ||
1053 | * mailbox. | ||
1054 | */ | 1055 | */ |
1055 | if (i == PAGE_SIZE / sizeof (u64) - 2) { | 1056 | if (i == write_mtt_size) { |
1056 | err = mthca_write_mtt(dev, mr->mtt, | 1057 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); |
1057 | n, pages, i); | ||
1058 | if (err) | 1058 | if (err) |
1059 | goto mtt_done; | 1059 | goto mtt_done; |
1060 | n += i; | 1060 | n += i; |