aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 18:34:26 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 18:34:26 -0500
commitf0e2dcffae8701f00b34bce90e762eb798dea5b1 (patch)
treef648533a633a2d065b3c9c569c4e9e3c6b2c2ea8 /drivers/net
parent04a94babd68952a4e3cdd54ebf8ce8891f9b0f2e (diff)
parent5128bdc97a1018aacac2550cf73bda61041cc3b8 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/core: Remove unused struct ib_device.flags member IB/core: Add IP checksum offload support IPoIB: Add send gather support IPoIB: Add high DMA feature flag IB/mlx4: Use multiple WQ blocks to post smaller send WQEs mlx4_core: Clean up struct mlx4_buf mlx4_core: For 64-bit systems, vmap() kernel queue buffers IB/mlx4: Consolidate code to get an entry from a struct mlx4_buf
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/mlx4/alloc.c48
-rw-r--r--drivers/net/mlx4/mr.c4
2 files changed, 34 insertions, 18 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b226e019bc8b..521dc0322ee4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
116 buf->nbufs = 1; 116 buf->nbufs = 1;
117 buf->npages = 1; 117 buf->npages = 1;
118 buf->page_shift = get_order(size) + PAGE_SHIFT; 118 buf->page_shift = get_order(size) + PAGE_SHIFT;
119 buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, 119 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
120 size, &t, GFP_KERNEL); 120 size, &t, GFP_KERNEL);
121 if (!buf->u.direct.buf) 121 if (!buf->direct.buf)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
124 buf->u.direct.map = t; 124 buf->direct.map = t;
125 125
126 while (t & ((1 << buf->page_shift) - 1)) { 126 while (t & ((1 << buf->page_shift) - 1)) {
127 --buf->page_shift; 127 --buf->page_shift;
128 buf->npages *= 2; 128 buf->npages *= 2;
129 } 129 }
130 130
131 memset(buf->u.direct.buf, 0, size); 131 memset(buf->direct.buf, 0, size);
132 } else { 132 } else {
133 int i; 133 int i;
134 134
135 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 135 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
136 buf->npages = buf->nbufs; 136 buf->npages = buf->nbufs;
137 buf->page_shift = PAGE_SHIFT; 137 buf->page_shift = PAGE_SHIFT;
138 buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, 138 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
139 GFP_KERNEL); 139 GFP_KERNEL);
140 if (!buf->u.page_list) 140 if (!buf->page_list)
141 return -ENOMEM; 141 return -ENOMEM;
142 142
143 for (i = 0; i < buf->nbufs; ++i) { 143 for (i = 0; i < buf->nbufs; ++i) {
144 buf->u.page_list[i].buf = 144 buf->page_list[i].buf =
145 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 145 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
146 &t, GFP_KERNEL); 146 &t, GFP_KERNEL);
147 if (!buf->u.page_list[i].buf) 147 if (!buf->page_list[i].buf)
148 goto err_free; 148 goto err_free;
149 149
150 buf->u.page_list[i].map = t; 150 buf->page_list[i].map = t;
151 151
152 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); 152 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
153 }
154
155 if (BITS_PER_LONG == 64) {
156 struct page **pages;
157 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
158 if (!pages)
159 goto err_free;
160 for (i = 0; i < buf->nbufs; ++i)
161 pages[i] = virt_to_page(buf->page_list[i].buf);
162 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
163 kfree(pages);
164 if (!buf->direct.buf)
165 goto err_free;
153 } 166 }
154 } 167 }
155 168
@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
167 int i; 180 int i;
168 181
169 if (buf->nbufs == 1) 182 if (buf->nbufs == 1)
170 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, 183 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
171 buf->u.direct.map); 184 buf->direct.map);
172 else { 185 else {
186 if (BITS_PER_LONG == 64)
187 vunmap(buf->direct.buf);
188
173 for (i = 0; i < buf->nbufs; ++i) 189 for (i = 0; i < buf->nbufs; ++i)
174 if (buf->u.page_list[i].buf) 190 if (buf->page_list[i].buf)
175 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 191 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
176 buf->u.page_list[i].buf, 192 buf->page_list[i].buf,
177 buf->u.page_list[i].map); 193 buf->page_list[i].map);
178 kfree(buf->u.page_list); 194 kfree(buf->page_list);
179 } 195 }
180} 196}
181EXPORT_SYMBOL_GPL(mlx4_buf_free); 197EXPORT_SYMBOL_GPL(mlx4_buf_free);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 9c9e308d0917..679dfdb6807f 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
419 419
420 for (i = 0; i < buf->npages; ++i) 420 for (i = 0; i < buf->npages; ++i)
421 if (buf->nbufs == 1) 421 if (buf->nbufs == 1)
422 page_list[i] = buf->u.direct.map + (i << buf->page_shift); 422 page_list[i] = buf->direct.map + (i << buf->page_shift);
423 else 423 else
424 page_list[i] = buf->u.page_list[i].map; 424 page_list[i] = buf->page_list[i].map;
425 425
426 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 426 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
427 427