diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/mlx4/icm.c | 113 | ||||
-rw-r--r-- | drivers/net/mlx4/icm.h | 7 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 40 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4.h | 1 |
4 files changed, 112 insertions, 49 deletions
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index b7a4aa8476fb..250e24887578 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
36 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
37 | #include <linux/scatterlist.h> | ||
37 | 38 | ||
38 | #include <linux/mlx4/cmd.h> | 39 | #include <linux/mlx4/cmd.h> |
39 | 40 | ||
@@ -50,19 +51,41 @@ enum { | |||
50 | MLX4_TABLE_CHUNK_SIZE = 1 << 18 | 51 | MLX4_TABLE_CHUNK_SIZE = 1 << 18 |
51 | }; | 52 | }; |
52 | 53 | ||
53 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) | 54 | static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) |
54 | { | 55 | { |
55 | struct mlx4_icm_chunk *chunk, *tmp; | ||
56 | int i; | 56 | int i; |
57 | 57 | ||
58 | list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { | 58 | if (chunk->nsg > 0) |
59 | if (chunk->nsg > 0) | 59 | pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, |
60 | pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, | 60 | PCI_DMA_BIDIRECTIONAL); |
61 | PCI_DMA_BIDIRECTIONAL); | 61 | |
62 | for (i = 0; i < chunk->npages; ++i) | ||
63 | __free_pages(chunk->mem[i].page, | ||
64 | get_order(chunk->mem[i].length)); | ||
65 | } | ||
62 | 66 | ||
63 | for (i = 0; i < chunk->npages; ++i) | 67 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) |
64 | __free_pages(chunk->mem[i].page, | 68 | { |
65 | get_order(chunk->mem[i].length)); | 69 | int i; |
70 | |||
71 | for (i = 0; i < chunk->npages; ++i) | ||
72 | dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, | ||
73 | lowmem_page_address(chunk->mem[i].page), | ||
74 | sg_dma_address(&chunk->mem[i])); | ||
75 | } | ||
76 | |||
77 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | ||
78 | { | ||
79 | struct mlx4_icm_chunk *chunk, *tmp; | ||
80 | |||
81 | if (!icm) | ||
82 | return; | ||
83 | |||
84 | list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { | ||
85 | if (coherent) | ||
86 | mlx4_free_icm_coherent(dev, chunk); | ||
87 | else | ||
88 | mlx4_free_icm_pages(dev, chunk); | ||
66 | 89 | ||
67 | kfree(chunk); | 90 | kfree(chunk); |
68 | } | 91 | } |
@@ -70,16 +93,45 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) | |||
70 | kfree(icm); | 93 | kfree(icm); |
71 | } | 94 | } |
72 | 95 | ||
96 | static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) | ||
97 | { | ||
98 | mem->page = alloc_pages(gfp_mask, order); | ||
99 | if (!mem->page) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | mem->length = PAGE_SIZE << order; | ||
103 | mem->offset = 0; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, | ||
108 | int order, gfp_t gfp_mask) | ||
109 | { | ||
110 | void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, | ||
111 | &sg_dma_address(mem), gfp_mask); | ||
112 | if (!buf) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | sg_set_buf(mem, buf, PAGE_SIZE << order); | ||
116 | BUG_ON(mem->offset); | ||
117 | sg_dma_len(mem) = PAGE_SIZE << order; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
73 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | 121 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, |
74 | gfp_t gfp_mask) | 122 | gfp_t gfp_mask, int coherent) |
75 | { | 123 | { |
76 | struct mlx4_icm *icm; | 124 | struct mlx4_icm *icm; |
77 | struct mlx4_icm_chunk *chunk = NULL; | 125 | struct mlx4_icm_chunk *chunk = NULL; |
78 | int cur_order; | 126 | int cur_order; |
127 | int ret; | ||
128 | |||
129 | /* We use sg_set_buf for coherent allocs, which assumes low memory */ | ||
130 | BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); | ||
79 | 131 | ||
80 | icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | 132 | icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); |
81 | if (!icm) | 133 | if (!icm) |
82 | return icm; | 134 | return NULL; |
83 | 135 | ||
84 | icm->refcount = 0; | 136 | icm->refcount = 0; |
85 | INIT_LIST_HEAD(&icm->chunk_list); | 137 | INIT_LIST_HEAD(&icm->chunk_list); |
@@ -101,12 +153,20 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
101 | while (1 << cur_order > npages) | 153 | while (1 << cur_order > npages) |
102 | --cur_order; | 154 | --cur_order; |
103 | 155 | ||
104 | chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order); | 156 | if (coherent) |
105 | if (chunk->mem[chunk->npages].page) { | 157 | ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, |
106 | chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; | 158 | &chunk->mem[chunk->npages], |
107 | chunk->mem[chunk->npages].offset = 0; | 159 | cur_order, gfp_mask); |
160 | else | ||
161 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | ||
162 | cur_order, gfp_mask); | ||
163 | |||
164 | if (!ret) { | ||
165 | ++chunk->npages; | ||
108 | 166 | ||
109 | if (++chunk->npages == MLX4_ICM_CHUNK_LEN) { | 167 | if (coherent) |
168 | ++chunk->nsg; | ||
169 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | ||
110 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | 170 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, |
111 | chunk->npages, | 171 | chunk->npages, |
112 | PCI_DMA_BIDIRECTIONAL); | 172 | PCI_DMA_BIDIRECTIONAL); |
@@ -125,7 +185,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
125 | } | 185 | } |
126 | } | 186 | } |
127 | 187 | ||
128 | if (chunk) { | 188 | if (!coherent && chunk) { |
129 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | 189 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, |
130 | chunk->npages, | 190 | chunk->npages, |
131 | PCI_DMA_BIDIRECTIONAL); | 191 | PCI_DMA_BIDIRECTIONAL); |
@@ -137,7 +197,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
137 | return icm; | 197 | return icm; |
138 | 198 | ||
139 | fail: | 199 | fail: |
140 | mlx4_free_icm(dev, icm); | 200 | mlx4_free_icm(dev, icm, coherent); |
141 | return NULL; | 201 | return NULL; |
142 | } | 202 | } |
143 | 203 | ||
@@ -202,7 +262,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) | |||
202 | 262 | ||
203 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | 263 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, |
204 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | 264 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | |
205 | __GFP_NOWARN); | 265 | __GFP_NOWARN, table->coherent); |
206 | if (!table->icm[i]) { | 266 | if (!table->icm[i]) { |
207 | ret = -ENOMEM; | 267 | ret = -ENOMEM; |
208 | goto out; | 268 | goto out; |
@@ -210,7 +270,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) | |||
210 | 270 | ||
211 | if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + | 271 | if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + |
212 | (u64) i * MLX4_TABLE_CHUNK_SIZE)) { | 272 | (u64) i * MLX4_TABLE_CHUNK_SIZE)) { |
213 | mlx4_free_icm(dev, table->icm[i]); | 273 | mlx4_free_icm(dev, table->icm[i], table->coherent); |
214 | table->icm[i] = NULL; | 274 | table->icm[i] = NULL; |
215 | ret = -ENOMEM; | 275 | ret = -ENOMEM; |
216 | goto out; | 276 | goto out; |
@@ -234,7 +294,7 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) | |||
234 | if (--table->icm[i]->refcount == 0) { | 294 | if (--table->icm[i]->refcount == 0) { |
235 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, | 295 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, |
236 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | 296 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); |
237 | mlx4_free_icm(dev, table->icm[i]); | 297 | mlx4_free_icm(dev, table->icm[i], table->coherent); |
238 | table->icm[i] = NULL; | 298 | table->icm[i] = NULL; |
239 | } | 299 | } |
240 | 300 | ||
@@ -309,7 +369,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
309 | 369 | ||
310 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | 370 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
311 | u64 virt, int obj_size, int nobj, int reserved, | 371 | u64 virt, int obj_size, int nobj, int reserved, |
312 | int use_lowmem) | 372 | int use_lowmem, int use_coherent) |
313 | { | 373 | { |
314 | int obj_per_chunk; | 374 | int obj_per_chunk; |
315 | int num_icm; | 375 | int num_icm; |
@@ -327,6 +387,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
327 | table->num_obj = nobj; | 387 | table->num_obj = nobj; |
328 | table->obj_size = obj_size; | 388 | table->obj_size = obj_size; |
329 | table->lowmem = use_lowmem; | 389 | table->lowmem = use_lowmem; |
390 | table->coherent = use_coherent; | ||
330 | mutex_init(&table->mutex); | 391 | mutex_init(&table->mutex); |
331 | 392 | ||
332 | for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { | 393 | for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { |
@@ -336,11 +397,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
336 | 397 | ||
337 | table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, | 398 | table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, |
338 | (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | 399 | (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | |
339 | __GFP_NOWARN); | 400 | __GFP_NOWARN, use_coherent); |
340 | if (!table->icm[i]) | 401 | if (!table->icm[i]) |
341 | goto err; | 402 | goto err; |
342 | if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { | 403 | if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { |
343 | mlx4_free_icm(dev, table->icm[i]); | 404 | mlx4_free_icm(dev, table->icm[i], use_coherent); |
344 | table->icm[i] = NULL; | 405 | table->icm[i] = NULL; |
345 | goto err; | 406 | goto err; |
346 | } | 407 | } |
@@ -359,7 +420,7 @@ err: | |||
359 | if (table->icm[i]) { | 420 | if (table->icm[i]) { |
360 | mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, | 421 | mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, |
361 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | 422 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); |
362 | mlx4_free_icm(dev, table->icm[i]); | 423 | mlx4_free_icm(dev, table->icm[i], use_coherent); |
363 | } | 424 | } |
364 | 425 | ||
365 | return -ENOMEM; | 426 | return -ENOMEM; |
@@ -373,7 +434,7 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) | |||
373 | if (table->icm[i]) { | 434 | if (table->icm[i]) { |
374 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, | 435 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, |
375 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | 436 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); |
376 | mlx4_free_icm(dev, table->icm[i]); | 437 | mlx4_free_icm(dev, table->icm[i], table->coherent); |
377 | } | 438 | } |
378 | 439 | ||
379 | kfree(table->icm); | 440 | kfree(table->icm); |
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h index bea223d879a5..a77db6de8597 100644 --- a/drivers/net/mlx4/icm.h +++ b/drivers/net/mlx4/icm.h | |||
@@ -67,8 +67,9 @@ struct mlx4_icm_iter { | |||
67 | 67 | ||
68 | struct mlx4_dev; | 68 | struct mlx4_dev; |
69 | 69 | ||
70 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask); | 70 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, |
71 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm); | 71 | gfp_t gfp_mask, int coherent); |
72 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); | ||
72 | 73 | ||
73 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | 74 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); |
74 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | 75 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); |
@@ -78,7 +79,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
78 | int start, int end); | 79 | int start, int end); |
79 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | 80 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
80 | u64 virt, int obj_size, int nobj, int reserved, | 81 | u64 virt, int obj_size, int nobj, int reserved, |
81 | int use_lowmem); | 82 | int use_lowmem, int use_coherent); |
82 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); | 83 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); |
83 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | 84 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); |
84 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | 85 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 9e590e11c1cf..07c2847a7cc8 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -168,7 +168,7 @@ static int __devinit mlx4_load_fw(struct mlx4_dev *dev) | |||
168 | int err; | 168 | int err; |
169 | 169 | ||
170 | priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, | 170 | priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, |
171 | GFP_HIGHUSER | __GFP_NOWARN); | 171 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
172 | if (!priv->fw.fw_icm) { | 172 | if (!priv->fw.fw_icm) { |
173 | mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); | 173 | mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); |
174 | return -ENOMEM; | 174 | return -ENOMEM; |
@@ -192,7 +192,7 @@ err_unmap_fa: | |||
192 | mlx4_UNMAP_FA(dev); | 192 | mlx4_UNMAP_FA(dev); |
193 | 193 | ||
194 | err_free: | 194 | err_free: |
195 | mlx4_free_icm(dev, priv->fw.fw_icm); | 195 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
196 | return err; | 196 | return err; |
197 | } | 197 | } |
198 | 198 | ||
@@ -207,7 +207,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
207 | ((u64) (MLX4_CMPT_TYPE_QP * | 207 | ((u64) (MLX4_CMPT_TYPE_QP * |
208 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 208 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
209 | cmpt_entry_sz, dev->caps.num_qps, | 209 | cmpt_entry_sz, dev->caps.num_qps, |
210 | dev->caps.reserved_qps, 0); | 210 | dev->caps.reserved_qps, 0, 0); |
211 | if (err) | 211 | if (err) |
212 | goto err; | 212 | goto err; |
213 | 213 | ||
@@ -216,7 +216,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
216 | ((u64) (MLX4_CMPT_TYPE_SRQ * | 216 | ((u64) (MLX4_CMPT_TYPE_SRQ * |
217 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 217 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
218 | cmpt_entry_sz, dev->caps.num_srqs, | 218 | cmpt_entry_sz, dev->caps.num_srqs, |
219 | dev->caps.reserved_srqs, 0); | 219 | dev->caps.reserved_srqs, 0, 0); |
220 | if (err) | 220 | if (err) |
221 | goto err_qp; | 221 | goto err_qp; |
222 | 222 | ||
@@ -225,7 +225,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
225 | ((u64) (MLX4_CMPT_TYPE_CQ * | 225 | ((u64) (MLX4_CMPT_TYPE_CQ * |
226 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 226 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
227 | cmpt_entry_sz, dev->caps.num_cqs, | 227 | cmpt_entry_sz, dev->caps.num_cqs, |
228 | dev->caps.reserved_cqs, 0); | 228 | dev->caps.reserved_cqs, 0, 0); |
229 | if (err) | 229 | if (err) |
230 | goto err_srq; | 230 | goto err_srq; |
231 | 231 | ||
@@ -236,7 +236,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
236 | cmpt_entry_sz, | 236 | cmpt_entry_sz, |
237 | roundup_pow_of_two(MLX4_NUM_EQ + | 237 | roundup_pow_of_two(MLX4_NUM_EQ + |
238 | dev->caps.reserved_eqs), | 238 | dev->caps.reserved_eqs), |
239 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0); | 239 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); |
240 | if (err) | 240 | if (err) |
241 | goto err_cq; | 241 | goto err_cq; |
242 | 242 | ||
@@ -275,7 +275,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
275 | (unsigned long long) aux_pages << 2); | 275 | (unsigned long long) aux_pages << 2); |
276 | 276 | ||
277 | priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, | 277 | priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, |
278 | GFP_HIGHUSER | __GFP_NOWARN); | 278 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
279 | if (!priv->fw.aux_icm) { | 279 | if (!priv->fw.aux_icm) { |
280 | mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); | 280 | mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); |
281 | return -ENOMEM; | 281 | return -ENOMEM; |
@@ -303,7 +303,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
303 | init_hca->mtt_base, | 303 | init_hca->mtt_base, |
304 | dev->caps.mtt_entry_sz, | 304 | dev->caps.mtt_entry_sz, |
305 | dev->caps.num_mtt_segs, | 305 | dev->caps.num_mtt_segs, |
306 | dev->caps.reserved_mtts, 1); | 306 | dev->caps.reserved_mtts, 1, 0); |
307 | if (err) { | 307 | if (err) { |
308 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); | 308 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); |
309 | goto err_unmap_eq; | 309 | goto err_unmap_eq; |
@@ -313,7 +313,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
313 | init_hca->dmpt_base, | 313 | init_hca->dmpt_base, |
314 | dev_cap->dmpt_entry_sz, | 314 | dev_cap->dmpt_entry_sz, |
315 | dev->caps.num_mpts, | 315 | dev->caps.num_mpts, |
316 | dev->caps.reserved_mrws, 1); | 316 | dev->caps.reserved_mrws, 1, 1); |
317 | if (err) { | 317 | if (err) { |
318 | mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); | 318 | mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); |
319 | goto err_unmap_mtt; | 319 | goto err_unmap_mtt; |
@@ -323,7 +323,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
323 | init_hca->qpc_base, | 323 | init_hca->qpc_base, |
324 | dev_cap->qpc_entry_sz, | 324 | dev_cap->qpc_entry_sz, |
325 | dev->caps.num_qps, | 325 | dev->caps.num_qps, |
326 | dev->caps.reserved_qps, 0); | 326 | dev->caps.reserved_qps, 0, 0); |
327 | if (err) { | 327 | if (err) { |
328 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); | 328 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); |
329 | goto err_unmap_dmpt; | 329 | goto err_unmap_dmpt; |
@@ -333,7 +333,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
333 | init_hca->auxc_base, | 333 | init_hca->auxc_base, |
334 | dev_cap->aux_entry_sz, | 334 | dev_cap->aux_entry_sz, |
335 | dev->caps.num_qps, | 335 | dev->caps.num_qps, |
336 | dev->caps.reserved_qps, 0); | 336 | dev->caps.reserved_qps, 0, 0); |
337 | if (err) { | 337 | if (err) { |
338 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); | 338 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); |
339 | goto err_unmap_qp; | 339 | goto err_unmap_qp; |
@@ -343,7 +343,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
343 | init_hca->altc_base, | 343 | init_hca->altc_base, |
344 | dev_cap->altc_entry_sz, | 344 | dev_cap->altc_entry_sz, |
345 | dev->caps.num_qps, | 345 | dev->caps.num_qps, |
346 | dev->caps.reserved_qps, 0); | 346 | dev->caps.reserved_qps, 0, 0); |
347 | if (err) { | 347 | if (err) { |
348 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); | 348 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); |
349 | goto err_unmap_auxc; | 349 | goto err_unmap_auxc; |
@@ -353,7 +353,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
353 | init_hca->rdmarc_base, | 353 | init_hca->rdmarc_base, |
354 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, | 354 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, |
355 | dev->caps.num_qps, | 355 | dev->caps.num_qps, |
356 | dev->caps.reserved_qps, 0); | 356 | dev->caps.reserved_qps, 0, 0); |
357 | if (err) { | 357 | if (err) { |
358 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); | 358 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); |
359 | goto err_unmap_altc; | 359 | goto err_unmap_altc; |
@@ -363,7 +363,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
363 | init_hca->cqc_base, | 363 | init_hca->cqc_base, |
364 | dev_cap->cqc_entry_sz, | 364 | dev_cap->cqc_entry_sz, |
365 | dev->caps.num_cqs, | 365 | dev->caps.num_cqs, |
366 | dev->caps.reserved_cqs, 0); | 366 | dev->caps.reserved_cqs, 0, 0); |
367 | if (err) { | 367 | if (err) { |
368 | mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); | 368 | mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); |
369 | goto err_unmap_rdmarc; | 369 | goto err_unmap_rdmarc; |
@@ -373,7 +373,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
373 | init_hca->srqc_base, | 373 | init_hca->srqc_base, |
374 | dev_cap->srq_entry_sz, | 374 | dev_cap->srq_entry_sz, |
375 | dev->caps.num_srqs, | 375 | dev->caps.num_srqs, |
376 | dev->caps.reserved_srqs, 0); | 376 | dev->caps.reserved_srqs, 0, 0); |
377 | if (err) { | 377 | if (err) { |
378 | mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); | 378 | mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); |
379 | goto err_unmap_cq; | 379 | goto err_unmap_cq; |
@@ -388,7 +388,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
388 | init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, | 388 | init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, |
389 | dev->caps.num_mgms + dev->caps.num_amgms, | 389 | dev->caps.num_mgms + dev->caps.num_amgms, |
390 | dev->caps.num_mgms + dev->caps.num_amgms, | 390 | dev->caps.num_mgms + dev->caps.num_amgms, |
391 | 0); | 391 | 0, 0); |
392 | if (err) { | 392 | if (err) { |
393 | mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); | 393 | mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); |
394 | goto err_unmap_srq; | 394 | goto err_unmap_srq; |
@@ -433,7 +433,7 @@ err_unmap_aux: | |||
433 | mlx4_UNMAP_ICM_AUX(dev); | 433 | mlx4_UNMAP_ICM_AUX(dev); |
434 | 434 | ||
435 | err_free_aux: | 435 | err_free_aux: |
436 | mlx4_free_icm(dev, priv->fw.aux_icm); | 436 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
437 | 437 | ||
438 | return err; | 438 | return err; |
439 | } | 439 | } |
@@ -458,7 +458,7 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
458 | mlx4_unmap_eq_icm(dev); | 458 | mlx4_unmap_eq_icm(dev); |
459 | 459 | ||
460 | mlx4_UNMAP_ICM_AUX(dev); | 460 | mlx4_UNMAP_ICM_AUX(dev); |
461 | mlx4_free_icm(dev, priv->fw.aux_icm); | 461 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
462 | } | 462 | } |
463 | 463 | ||
464 | static void mlx4_close_hca(struct mlx4_dev *dev) | 464 | static void mlx4_close_hca(struct mlx4_dev *dev) |
@@ -466,7 +466,7 @@ static void mlx4_close_hca(struct mlx4_dev *dev) | |||
466 | mlx4_CLOSE_HCA(dev, 0); | 466 | mlx4_CLOSE_HCA(dev, 0); |
467 | mlx4_free_icms(dev); | 467 | mlx4_free_icms(dev); |
468 | mlx4_UNMAP_FA(dev); | 468 | mlx4_UNMAP_FA(dev); |
469 | mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm); | 469 | mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); |
470 | } | 470 | } |
471 | 471 | ||
472 | static int __devinit mlx4_init_hca(struct mlx4_dev *dev) | 472 | static int __devinit mlx4_init_hca(struct mlx4_dev *dev) |
@@ -537,7 +537,7 @@ err_free_icm: | |||
537 | 537 | ||
538 | err_stop_fw: | 538 | err_stop_fw: |
539 | mlx4_UNMAP_FA(dev); | 539 | mlx4_UNMAP_FA(dev); |
540 | mlx4_free_icm(dev, priv->fw.fw_icm); | 540 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
541 | 541 | ||
542 | return err; | 542 | return err; |
543 | } | 543 | } |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index b9f839761919..2bad045b25cf 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -129,6 +129,7 @@ struct mlx4_icm_table { | |||
129 | int num_obj; | 129 | int num_obj; |
130 | int obj_size; | 130 | int obj_size; |
131 | int lowmem; | 131 | int lowmem; |
132 | int coherent; | ||
132 | struct mutex mutex; | 133 | struct mutex mutex; |
133 | struct mlx4_icm **icm; | 134 | struct mlx4_icm **icm; |
134 | }; | 135 | }; |