diff options
-rw-r--r-- | drivers/net/mlx4/alloc.c | 40 | ||||
-rw-r--r-- | drivers/net/mlx4/mr.c | 4 | ||||
-rw-r--r-- | include/linux/mlx4/device.h | 10 |
3 files changed, 26 insertions, 28 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 2da2c2ec1f22..521dc0322ee4 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -116,40 +116,40 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
116 | buf->nbufs = 1; | 116 | buf->nbufs = 1; |
117 | buf->npages = 1; | 117 | buf->npages = 1; |
118 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 118 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
119 | buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, | 119 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
120 | size, &t, GFP_KERNEL); | 120 | size, &t, GFP_KERNEL); |
121 | if (!buf->u.direct.buf) | 121 | if (!buf->direct.buf) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | 123 | ||
124 | buf->u.direct.map = t; | 124 | buf->direct.map = t; |
125 | 125 | ||
126 | while (t & ((1 << buf->page_shift) - 1)) { | 126 | while (t & ((1 << buf->page_shift) - 1)) { |
127 | --buf->page_shift; | 127 | --buf->page_shift; |
128 | buf->npages *= 2; | 128 | buf->npages *= 2; |
129 | } | 129 | } |
130 | 130 | ||
131 | memset(buf->u.direct.buf, 0, size); | 131 | memset(buf->direct.buf, 0, size); |
132 | } else { | 132 | } else { |
133 | int i; | 133 | int i; |
134 | 134 | ||
135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | 135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
136 | buf->npages = buf->nbufs; | 136 | buf->npages = buf->nbufs; |
137 | buf->page_shift = PAGE_SHIFT; | 137 | buf->page_shift = PAGE_SHIFT; |
138 | buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, | 138 | buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, |
139 | GFP_KERNEL); | 139 | GFP_KERNEL); |
140 | if (!buf->u.page_list) | 140 | if (!buf->page_list) |
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | 142 | ||
143 | for (i = 0; i < buf->nbufs; ++i) { | 143 | for (i = 0; i < buf->nbufs; ++i) { |
144 | buf->u.page_list[i].buf = | 144 | buf->page_list[i].buf = |
145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | 145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
146 | &t, GFP_KERNEL); | 146 | &t, GFP_KERNEL); |
147 | if (!buf->u.page_list[i].buf) | 147 | if (!buf->page_list[i].buf) |
148 | goto err_free; | 148 | goto err_free; |
149 | 149 | ||
150 | buf->u.page_list[i].map = t; | 150 | buf->page_list[i].map = t; |
151 | 151 | ||
152 | memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); | 152 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); |
153 | } | 153 | } |
154 | 154 | ||
155 | if (BITS_PER_LONG == 64) { | 155 | if (BITS_PER_LONG == 64) { |
@@ -158,10 +158,10 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
158 | if (!pages) | 158 | if (!pages) |
159 | goto err_free; | 159 | goto err_free; |
160 | for (i = 0; i < buf->nbufs; ++i) | 160 | for (i = 0; i < buf->nbufs; ++i) |
161 | pages[i] = virt_to_page(buf->u.page_list[i].buf); | 161 | pages[i] = virt_to_page(buf->page_list[i].buf); |
162 | buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | 162 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); |
163 | kfree(pages); | 163 | kfree(pages); |
164 | if (!buf->u.direct.buf) | 164 | if (!buf->direct.buf) |
165 | goto err_free; | 165 | goto err_free; |
166 | } | 166 | } |
167 | } | 167 | } |
@@ -180,18 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
180 | int i; | 180 | int i; |
181 | 181 | ||
182 | if (buf->nbufs == 1) | 182 | if (buf->nbufs == 1) |
183 | dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, | 183 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, |
184 | buf->u.direct.map); | 184 | buf->direct.map); |
185 | else { | 185 | else { |
186 | if (BITS_PER_LONG == 64) | 186 | if (BITS_PER_LONG == 64) |
187 | vunmap(buf->u.direct.buf); | 187 | vunmap(buf->direct.buf); |
188 | 188 | ||
189 | for (i = 0; i < buf->nbufs; ++i) | 189 | for (i = 0; i < buf->nbufs; ++i) |
190 | if (buf->u.page_list[i].buf) | 190 | if (buf->page_list[i].buf) |
191 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 191 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
192 | buf->u.page_list[i].buf, | 192 | buf->page_list[i].buf, |
193 | buf->u.page_list[i].map); | 193 | buf->page_list[i].map); |
194 | kfree(buf->u.page_list); | 194 | kfree(buf->page_list); |
195 | } | 195 | } |
196 | } | 196 | } |
197 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 197 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 9c9e308d0917..679dfdb6807f 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
419 | 419 | ||
420 | for (i = 0; i < buf->npages; ++i) | 420 | for (i = 0; i < buf->npages; ++i) |
421 | if (buf->nbufs == 1) | 421 | if (buf->nbufs == 1) |
422 | page_list[i] = buf->u.direct.map + (i << buf->page_shift); | 422 | page_list[i] = buf->direct.map + (i << buf->page_shift); |
423 | else | 423 | else |
424 | page_list[i] = buf->u.page_list[i].map; | 424 | page_list[i] = buf->page_list[i].map; |
425 | 425 | ||
426 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); | 426 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); |
427 | 427 | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 631607788f83..4210ac4a8bcd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -189,10 +189,8 @@ struct mlx4_buf_list { | |||
189 | }; | 189 | }; |
190 | 190 | ||
191 | struct mlx4_buf { | 191 | struct mlx4_buf { |
192 | struct { | 192 | struct mlx4_buf_list direct; |
193 | struct mlx4_buf_list direct; | 193 | struct mlx4_buf_list *page_list; |
194 | struct mlx4_buf_list *page_list; | ||
195 | } u; | ||
196 | int nbufs; | 194 | int nbufs; |
197 | int npages; | 195 | int npages; |
198 | int page_shift; | 196 | int page_shift; |
@@ -311,9 +309,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | |||
311 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) | 309 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) |
312 | { | 310 | { |
313 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | 311 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) |
314 | return buf->u.direct.buf + offset; | 312 | return buf->direct.buf + offset; |
315 | else | 313 | else |
316 | return buf->u.page_list[offset >> PAGE_SHIFT].buf + | 314 | return buf->page_list[offset >> PAGE_SHIFT].buf + |
317 | (offset & (PAGE_SIZE - 1)); | 315 | (offset & (PAGE_SIZE - 1)); |
318 | } | 316 | } |
319 | 317 | ||