aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mem.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c69
1 files changed, 65 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index dae07eae9507..b56e4c5593ee 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <rdma/ib_umem.h> 34#include <rdma/ib_umem.h>
35#include <rdma/ib_umem_odp.h>
35#include "mlx5_ib.h" 36#include "mlx5_ib.h"
36 37
37/* @umem: umem object to scan 38/* @umem: umem object to scan
@@ -57,6 +58,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
57 int entry; 58 int entry;
58 unsigned long page_shift = ilog2(umem->page_size); 59 unsigned long page_shift = ilog2(umem->page_size);
59 60
61 /* With ODP we must always match OS page size. */
62 if (umem->odp_data) {
63 *count = ib_umem_page_count(umem);
64 *shift = PAGE_SHIFT;
65 *ncont = *count;
66 if (order)
67 *order = ilog2(roundup_pow_of_two(*count));
68
69 return;
70 }
71
60 addr = addr >> page_shift; 72 addr = addr >> page_shift;
61 tmp = (unsigned long)addr; 73 tmp = (unsigned long)addr;
62 m = find_first_bit(&tmp, sizeof(tmp)); 74 m = find_first_bit(&tmp, sizeof(tmp));
@@ -108,8 +120,36 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
108 *count = i; 120 *count = i;
109} 121}
110 122
111void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 123#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
112 int page_shift, __be64 *pas, int umr) 124static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
125{
126 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
127
128 if (umem_dma & ODP_READ_ALLOWED_BIT)
129 mtt_entry |= MLX5_IB_MTT_READ;
130 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
131 mtt_entry |= MLX5_IB_MTT_WRITE;
132
133 return mtt_entry;
134}
135#endif
136
137/*
138 * Populate the given array with bus addresses from the umem.
139 *
140 * dev - mlx5_ib device
141 * umem - umem to use to fill the pages
142 * page_shift - determines the page size used in the resulting array
143 * offset - offset into the umem to start from,
144 * only implemented for ODP umems
145 * num_pages - total number of pages to fill
146 * pas - bus addresses array to fill
147 * access_flags - access flags to set on all present pages.
148 use enum mlx5_ib_mtt_access_flags for this.
149 */
150void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
151 int page_shift, size_t offset, size_t num_pages,
152 __be64 *pas, int access_flags)
113{ 153{
114 unsigned long umem_page_shift = ilog2(umem->page_size); 154 unsigned long umem_page_shift = ilog2(umem->page_size);
115 int shift = page_shift - umem_page_shift; 155 int shift = page_shift - umem_page_shift;
@@ -120,6 +160,21 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
120 int len; 160 int len;
121 struct scatterlist *sg; 161 struct scatterlist *sg;
122 int entry; 162 int entry;
163#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
164 const bool odp = umem->odp_data != NULL;
165
166 if (odp) {
167 WARN_ON(shift != 0);
168 WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
169
170 for (i = 0; i < num_pages; ++i) {
171 dma_addr_t pa = umem->odp_data->dma_list[offset + i];
172
173 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
174 }
175 return;
176 }
177#endif
123 178
124 i = 0; 179 i = 0;
125 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -128,8 +183,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
128 for (k = 0; k < len; k++) { 183 for (k = 0; k < len; k++) {
129 if (!(i & mask)) { 184 if (!(i & mask)) {
130 cur = base + (k << umem_page_shift); 185 cur = base + (k << umem_page_shift);
131 if (umr) 186 cur |= access_flags;
132 cur |= 3;
133 187
134 pas[i >> shift] = cpu_to_be64(cur); 188 pas[i >> shift] = cpu_to_be64(cur);
135 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", 189 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
@@ -142,6 +196,13 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
142 } 196 }
143} 197}
144 198
199void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
200 int page_shift, __be64 *pas, int access_flags)
201{
202 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
203 ib_umem_num_pages(umem), pas,
204 access_flags);
205}
145int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) 206int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
146{ 207{
147 u64 page_size; 208 u64 page_size;