diff options
author | Roland Dreier <rolandd@cisco.com> | 2008-02-07 00:17:59 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-02-07 00:17:59 -0500 |
commit | b57aacfa7a95328f469d0360e49289b023c47e9e (patch) | |
tree | 5642416e92540b93ffb1221ae75b3f1f43ffcd7b /drivers/net/mlx4/alloc.c | |
parent | 313abe55a87bc10e55d00f337d609e17ad5f8c9a (diff) |
mlx4_core: Clean up struct mlx4_buf
Now that struct mlx4_buf.u is a struct instead of a union because of
the vmap() changes, there's no point in having a struct at all. So
move .direct and .page_list directly into struct mlx4_buf and get rid
of a bunch of unnecessary ".u"s.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/alloc.c')
-rw-r--r-- | drivers/net/mlx4/alloc.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 2da2c2ec1f22..521dc0322ee4 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -116,40 +116,40 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
116 | buf->nbufs = 1; | 116 | buf->nbufs = 1; |
117 | buf->npages = 1; | 117 | buf->npages = 1; |
118 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 118 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
119 | buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, | 119 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
120 | size, &t, GFP_KERNEL); | 120 | size, &t, GFP_KERNEL); |
121 | if (!buf->u.direct.buf) | 121 | if (!buf->direct.buf) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | 123 | ||
124 | buf->u.direct.map = t; | 124 | buf->direct.map = t; |
125 | 125 | ||
126 | while (t & ((1 << buf->page_shift) - 1)) { | 126 | while (t & ((1 << buf->page_shift) - 1)) { |
127 | --buf->page_shift; | 127 | --buf->page_shift; |
128 | buf->npages *= 2; | 128 | buf->npages *= 2; |
129 | } | 129 | } |
130 | 130 | ||
131 | memset(buf->u.direct.buf, 0, size); | 131 | memset(buf->direct.buf, 0, size); |
132 | } else { | 132 | } else { |
133 | int i; | 133 | int i; |
134 | 134 | ||
135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | 135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
136 | buf->npages = buf->nbufs; | 136 | buf->npages = buf->nbufs; |
137 | buf->page_shift = PAGE_SHIFT; | 137 | buf->page_shift = PAGE_SHIFT; |
138 | buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, | 138 | buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, |
139 | GFP_KERNEL); | 139 | GFP_KERNEL); |
140 | if (!buf->u.page_list) | 140 | if (!buf->page_list) |
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | 142 | ||
143 | for (i = 0; i < buf->nbufs; ++i) { | 143 | for (i = 0; i < buf->nbufs; ++i) { |
144 | buf->u.page_list[i].buf = | 144 | buf->page_list[i].buf = |
145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | 145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
146 | &t, GFP_KERNEL); | 146 | &t, GFP_KERNEL); |
147 | if (!buf->u.page_list[i].buf) | 147 | if (!buf->page_list[i].buf) |
148 | goto err_free; | 148 | goto err_free; |
149 | 149 | ||
150 | buf->u.page_list[i].map = t; | 150 | buf->page_list[i].map = t; |
151 | 151 | ||
152 | memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); | 152 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); |
153 | } | 153 | } |
154 | 154 | ||
155 | if (BITS_PER_LONG == 64) { | 155 | if (BITS_PER_LONG == 64) { |
@@ -158,10 +158,10 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
158 | if (!pages) | 158 | if (!pages) |
159 | goto err_free; | 159 | goto err_free; |
160 | for (i = 0; i < buf->nbufs; ++i) | 160 | for (i = 0; i < buf->nbufs; ++i) |
161 | pages[i] = virt_to_page(buf->u.page_list[i].buf); | 161 | pages[i] = virt_to_page(buf->page_list[i].buf); |
162 | buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | 162 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); |
163 | kfree(pages); | 163 | kfree(pages); |
164 | if (!buf->u.direct.buf) | 164 | if (!buf->direct.buf) |
165 | goto err_free; | 165 | goto err_free; |
166 | } | 166 | } |
167 | } | 167 | } |
@@ -180,18 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
180 | int i; | 180 | int i; |
181 | 181 | ||
182 | if (buf->nbufs == 1) | 182 | if (buf->nbufs == 1) |
183 | dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, | 183 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, |
184 | buf->u.direct.map); | 184 | buf->direct.map); |
185 | else { | 185 | else { |
186 | if (BITS_PER_LONG == 64) | 186 | if (BITS_PER_LONG == 64) |
187 | vunmap(buf->u.direct.buf); | 187 | vunmap(buf->direct.buf); |
188 | 188 | ||
189 | for (i = 0; i < buf->nbufs; ++i) | 189 | for (i = 0; i < buf->nbufs; ++i) |
190 | if (buf->u.page_list[i].buf) | 190 | if (buf->page_list[i].buf) |
191 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 191 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
192 | buf->u.page_list[i].buf, | 192 | buf->page_list[i].buf, |
193 | buf->u.page_list[i].map); | 193 | buf->page_list[i].map); |
194 | kfree(buf->u.page_list); | 194 | kfree(buf->page_list); |
195 | } | 195 | } |
196 | } | 196 | } |
197 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 197 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |