diff options
author | Michael S. Tsirkin <mst@mellanox.co.il> | 2007-02-10 16:14:25 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-12 19:16:29 -0500 |
commit | b2875d4c39759a732203db32f245cc6d8bbdd7cf (patch) | |
tree | eba8311c0242da3298796d33e86a990aa2a7637f /drivers/infiniband/hw/mthca/mthca_provider.c | |
parent | c20e20ab0f3af9a44842ea11287c9ecd034a5d33 (diff) |
IB/mthca: Always fill MTTs from CPU
Speed up memory registration by filling in MTTs directly when the CPU
can write directly to the whole table (all mem-free cards, and to
Tavor mode on 64-bit systems with the patch I posted earlier). This
reduces the number of FW commands needed to register an MR by at least
a factor of 2 and speeds up memory registration significantly.
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_provider.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 7b96751695ea..0725ad7ad9bf 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1015,6 +1015,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1015 | int shift, n, len; | 1015 | int shift, n, len; |
1016 | int i, j, k; | 1016 | int i, j, k; |
1017 | int err = 0; | 1017 | int err = 0; |
1018 | int write_mtt_size; | ||
1018 | 1019 | ||
1019 | shift = ffs(region->page_size) - 1; | 1020 | shift = ffs(region->page_size) - 1; |
1020 | 1021 | ||
@@ -1040,6 +1041,8 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1040 | 1041 | ||
1041 | i = n = 0; | 1042 | i = n = 0; |
1042 | 1043 | ||
1044 | write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); | ||
1045 | |||
1043 | list_for_each_entry(chunk, ®ion->chunk_list, list) | 1046 | list_for_each_entry(chunk, ®ion->chunk_list, list) |
1044 | for (j = 0; j < chunk->nmap; ++j) { | 1047 | for (j = 0; j < chunk->nmap; ++j) { |
1045 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 1048 | len = sg_dma_len(&chunk->page_list[j]) >> shift; |
@@ -1047,14 +1050,11 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
1047 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + | 1050 | pages[i++] = sg_dma_address(&chunk->page_list[j]) + |
1048 | region->page_size * k; | 1051 | region->page_size * k; |
1049 | /* | 1052 | /* |
1050 | * Be friendly to WRITE_MTT command | 1053 | * Be friendly to write_mtt and pass it chunks |
1051 | * and leave two empty slots for the | 1054 | * of appropriate size. |
1052 | * index and reserved fields of the | ||
1053 | * mailbox. | ||
1054 | */ | 1055 | */ |
1055 | if (i == PAGE_SIZE / sizeof (u64) - 2) { | 1056 | if (i == write_mtt_size) { |
1056 | err = mthca_write_mtt(dev, mr->mtt, | 1057 | err = mthca_write_mtt(dev, mr->mtt, n, pages, i); |
1057 | n, pages, i); | ||
1058 | if (err) | 1058 | if (err) |
1059 | goto mtt_done; | 1059 | goto mtt_done; |
1060 | n += i; | 1060 | n += i; |