aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mem.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c58
1 files changed, 55 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index dae07eae9507..5f7b30147180 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <rdma/ib_umem.h> 34#include <rdma/ib_umem.h>
35#include <rdma/ib_umem_odp.h>
35#include "mlx5_ib.h" 36#include "mlx5_ib.h"
36 37
37/* @umem: umem object to scan 38/* @umem: umem object to scan
@@ -57,6 +58,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
57 int entry; 58 int entry;
58 unsigned long page_shift = ilog2(umem->page_size); 59 unsigned long page_shift = ilog2(umem->page_size);
59 60
61 /* With ODP we must always match OS page size. */
62 if (umem->odp_data) {
63 *count = ib_umem_page_count(umem);
64 *shift = PAGE_SHIFT;
65 *ncont = *count;
66 if (order)
67 *order = ilog2(roundup_pow_of_two(*count));
68
69 return;
70 }
71
60 addr = addr >> page_shift; 72 addr = addr >> page_shift;
61 tmp = (unsigned long)addr; 73 tmp = (unsigned long)addr;
62 m = find_first_bit(&tmp, sizeof(tmp)); 74 m = find_first_bit(&tmp, sizeof(tmp));
@@ -108,8 +120,32 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
108 *count = i; 120 *count = i;
109} 121}
110 122
123#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
124static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
125{
126 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
127
128 if (umem_dma & ODP_READ_ALLOWED_BIT)
129 mtt_entry |= MLX5_IB_MTT_READ;
130 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
131 mtt_entry |= MLX5_IB_MTT_WRITE;
132
133 return mtt_entry;
134}
135#endif
136
137/*
138 * Populate the given array with bus addresses from the umem.
139 *
140 * dev - mlx5_ib device
141 * umem - umem to use to fill the pages
142 * page_shift - determines the page size used in the resulting array
143 * pas - bus addresses array to fill
144 * access_flags - access flags to set on all present pages.
145 use enum mlx5_ib_mtt_access_flags for this.
146 */
111void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 147void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
112 int page_shift, __be64 *pas, int umr) 148 int page_shift, __be64 *pas, int access_flags)
113{ 149{
114 unsigned long umem_page_shift = ilog2(umem->page_size); 150 unsigned long umem_page_shift = ilog2(umem->page_size);
115 int shift = page_shift - umem_page_shift; 151 int shift = page_shift - umem_page_shift;
@@ -120,6 +156,23 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
120 int len; 156 int len;
121 struct scatterlist *sg; 157 struct scatterlist *sg;
122 int entry; 158 int entry;
159#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
160 const bool odp = umem->odp_data != NULL;
161
162 if (odp) {
163 int num_pages = ib_umem_num_pages(umem);
164
165 WARN_ON(shift != 0);
166 WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
167
168 for (i = 0; i < num_pages; ++i) {
169 dma_addr_t pa = umem->odp_data->dma_list[i];
170
171 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
172 }
173 return;
174 }
175#endif
123 176
124 i = 0; 177 i = 0;
125 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 178 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -128,8 +181,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
128 for (k = 0; k < len; k++) { 181 for (k = 0; k < len; k++) {
129 if (!(i & mask)) { 182 if (!(i & mask)) {
130 cur = base + (k << umem_page_shift); 183 cur = base + (k << umem_page_shift);
131 if (umr) 184 cur |= access_flags;
132 cur |= 3;
133 185
134 pas[i >> shift] = cpu_to_be64(cur); 186 pas[i >> shift] = cpu_to_be64(cur);
135 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", 187 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",