aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-02-04 23:20:42 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-04 23:20:42 -0500
commit0d89fe2c0ca12ad2ee4e35a0661319746af6e94a (patch)
treefcc5d6598121a4863740437baf10217b701ca153 /drivers/infiniband/hw
parent2b5e6b120e58d44cace68e6c7204b541a8b0b43f (diff)
IB/mthca: Fix and simplify page size calculation in mthca_reg_phys_mr()
In mthca_reg_phys_mr(), we calculate the page size for the HCA hardware to use to map the buffer list passed in by the consumer. For example, if the consumer passes in [0] addr 0x1000, size 0x1000 [1] addr 0x2000, size 0x1000 then the algorithm would come up with a page size of 0x2000 and a list of two pages, at 0x0000 and 0x2000. Usually, this would work fine since the memory region would start at an offset of 0x1000 and have a length of 0x2000. However, the old code did not take into account the alignment of the IO virtual address passed in. For example, if the consumer passed in a virtual address of 0x6000 for the above, then the offset of 0x1000 would not be used correctly because the page mask of 0x1fff would result in an offset of 0. We can fix this quite neatly by making sure that the page shift we use is no bigger than the first bit where the start of the first buffer and the IO virtual address differ. Also, we can further simplify the code by removing the special case for a single buffer by noticing that it doesn't matter if we use a page size that is too big. This allows the loop to compute the page shift to be replaced with __ffs(). Thanks to Bryan S Rosenburg <rosnbrg@us.ibm.com> for pointing out the original bug and suggesting several ways to improve this patch. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6bcde1cb9688..19b7f61cf04c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
923 struct mthca_mr *mr; 923 struct mthca_mr *mr;
924 u64 *page_list; 924 u64 *page_list;
925 u64 total_size; 925 u64 total_size;
926 u64 mask; 926 unsigned long mask;
927 int shift; 927 int shift;
928 int npages; 928 int npages;
929 int err; 929 int err;
930 int i, j, n; 930 int i, j, n;
931 931
932 /* First check that we have enough alignment */ 932 mask = buffer_list[0].addr ^ *iova_start;
933 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
934 return ERR_PTR(-EINVAL);
935
936 mask = 0;
937 total_size = 0; 933 total_size = 0;
938 for (i = 0; i < num_phys_buf; ++i) { 934 for (i = 0; i < num_phys_buf; ++i) {
939 if (i != 0) 935 if (i != 0)
@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
947 if (mask & ~PAGE_MASK) 943 if (mask & ~PAGE_MASK)
948 return ERR_PTR(-EINVAL); 944 return ERR_PTR(-EINVAL);
949 945
950 /* Find largest page shift we can use to cover buffers */ 946 shift = __ffs(mask | 1 << 31);
951 for (shift = PAGE_SHIFT; shift < 31; ++shift)
952 if (num_phys_buf > 1) {
953 if ((1ULL << shift) & mask)
954 break;
955 } else {
956 if (1ULL << shift >=
957 buffer_list[0].size +
958 (buffer_list[0].addr & ((1ULL << shift) - 1)))
959 break;
960 }
961 947
962 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); 948 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
963 buffer_list[0].addr &= ~0ull << shift; 949 buffer_list[0].addr &= ~0ull << shift;