aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2008-01-28 03:40:51 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-07 00:17:45 -0500
commit313abe55a87bc10e55d00f337d609e17ad5f8c9a (patch)
tree833ff3c4b33f83d4ca64ed322c2d8efa21529d71
parent1c69fc2a9012e160c8d459f63df74a6b01db8322 (diff)
mlx4_core: For 64-bit systems, vmap() kernel queue buffers
Since kernel virtual memory is not a problem on 64-bit systems, there is no reason to use our own 2-layer page mapping scheme for large kernel queue buffers on such systems. Instead, map the page list to a single virtually contiguous buffer with vmap(), so that can we access buffer memory via direct indexing. Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/net/mlx4/alloc.c16
-rw-r--r--include/linux/mlx4/device.h4
2 files changed, 18 insertions, 2 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b226e019bc8b..2da2c2ec1f22 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
151 151
152 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); 152 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
153 } 153 }
154
155 if (BITS_PER_LONG == 64) {
156 struct page **pages;
157 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
158 if (!pages)
159 goto err_free;
160 for (i = 0; i < buf->nbufs; ++i)
161 pages[i] = virt_to_page(buf->u.page_list[i].buf);
162 buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
163 kfree(pages);
164 if (!buf->u.direct.buf)
165 goto err_free;
166 }
154 } 167 }
155 168
156 return 0; 169 return 0;
@@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
170 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, 183 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
171 buf->u.direct.map); 184 buf->u.direct.map);
172 else { 185 else {
186 if (BITS_PER_LONG == 64)
187 vunmap(buf->u.direct.buf);
188
173 for (i = 0; i < buf->nbufs; ++i) 189 for (i = 0; i < buf->nbufs; ++i)
174 if (buf->u.page_list[i].buf) 190 if (buf->u.page_list[i].buf)
175 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 191 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a0afa7511a30..631607788f83 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -189,7 +189,7 @@ struct mlx4_buf_list {
189}; 189};
190 190
191struct mlx4_buf { 191struct mlx4_buf {
192 union { 192 struct {
193 struct mlx4_buf_list direct; 193 struct mlx4_buf_list direct;
194 struct mlx4_buf_list *page_list; 194 struct mlx4_buf_list *page_list;
195 } u; 195 } u;
@@ -310,7 +310,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
310void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); 310void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
311static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) 311static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
312{ 312{
313 if (buf->nbufs == 1) 313 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
314 return buf->u.direct.buf + offset; 314 return buf->u.direct.buf + offset;
315 else 315 else
316 return buf->u.page_list[offset >> PAGE_SHIFT].buf + 316 return buf->u.page_list[offset >> PAGE_SHIFT].buf +