diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 93 | ||||
-rw-r--r-- | drivers/net/mlx4/mr.c | 165 |
4 files changed, 279 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index d9fc822a1468..d8287d9db41e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -607,6 +607,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
607 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; | 607 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; |
608 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; | 608 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; |
609 | 609 | ||
610 | ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; | ||
611 | ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; | ||
612 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; | ||
613 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; | ||
614 | |||
610 | if (init_node_data(ibdev)) | 615 | if (init_node_data(ibdev)) |
611 | goto err_map; | 616 | goto err_map; |
612 | 617 | ||
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 705ff2fa237e..28697653a370 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -93,6 +93,11 @@ struct mlx4_ib_mr { | |||
93 | struct ib_umem *umem; | 93 | struct ib_umem *umem; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct mlx4_ib_fmr { | ||
97 | struct ib_fmr ibfmr; | ||
98 | struct mlx4_fmr mfmr; | ||
99 | }; | ||
100 | |||
96 | struct mlx4_ib_wq { | 101 | struct mlx4_ib_wq { |
97 | u64 *wrid; | 102 | u64 *wrid; |
98 | spinlock_t lock; | 103 | spinlock_t lock; |
@@ -199,6 +204,10 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) | |||
199 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); | 204 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); |
200 | } | 205 | } |
201 | 206 | ||
207 | static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) | ||
208 | { | ||
209 | return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); | ||
210 | } | ||
202 | static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) | 211 | static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) |
203 | { | 212 | { |
204 | return container_of(ibqp, struct mlx4_ib_qp, ibqp); | 213 | return container_of(ibqp, struct mlx4_ib_qp, ibqp); |
@@ -284,6 +293,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
284 | int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); | 293 | int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); |
285 | void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); | 294 | void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); |
286 | 295 | ||
296 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags, | ||
297 | struct ib_fmr_attr *fmr_attr); | ||
298 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, | ||
299 | u64 iova); | ||
300 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list); | ||
301 | int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr); | ||
302 | |||
287 | static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) | 303 | static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) |
288 | { | 304 | { |
289 | return !!(ah->av.g_slid & 0x80); | 305 | return !!(ah->av.g_slid & 0x80); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 734ec2bd15cd..7dc91a3e712d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -181,3 +181,96 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | |||
181 | 181 | ||
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
184 | |||
185 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, | ||
186 | struct ib_fmr_attr *fmr_attr) | ||
187 | { | ||
188 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
189 | struct mlx4_ib_fmr *fmr; | ||
190 | int err = -ENOMEM; | ||
191 | |||
192 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | ||
193 | if (!fmr) | ||
194 | return ERR_PTR(-ENOMEM); | ||
195 | |||
196 | err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), | ||
197 | fmr_attr->max_pages, fmr_attr->max_maps, | ||
198 | fmr_attr->page_shift, &fmr->mfmr); | ||
199 | if (err) | ||
200 | goto err_free; | ||
201 | |||
202 | err = mlx4_mr_enable(to_mdev(pd->device)->dev, &fmr->mfmr.mr); | ||
203 | if (err) | ||
204 | goto err_mr; | ||
205 | |||
206 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; | ||
207 | |||
208 | return &fmr->ibfmr; | ||
209 | |||
210 | err_mr: | ||
211 | mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); | ||
212 | |||
213 | err_free: | ||
214 | kfree(fmr); | ||
215 | |||
216 | return ERR_PTR(err); | ||
217 | } | ||
218 | |||
219 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | ||
220 | int npages, u64 iova) | ||
221 | { | ||
222 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | ||
223 | struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); | ||
224 | |||
225 | return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, | ||
226 | &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | ||
227 | } | ||
228 | |||
229 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list) | ||
230 | { | ||
231 | struct ib_fmr *ibfmr; | ||
232 | int err; | ||
233 | struct mlx4_dev *mdev = NULL; | ||
234 | |||
235 | list_for_each_entry(ibfmr, fmr_list, list) { | ||
236 | if (mdev && to_mdev(ibfmr->device)->dev != mdev) | ||
237 | return -EINVAL; | ||
238 | mdev = to_mdev(ibfmr->device)->dev; | ||
239 | } | ||
240 | |||
241 | if (!mdev) | ||
242 | return 0; | ||
243 | |||
244 | list_for_each_entry(ibfmr, fmr_list, list) { | ||
245 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | ||
246 | |||
247 | mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * Make sure all MPT status updates are visible before issuing | ||
252 | * SYNC_TPT firmware command. | ||
253 | */ | ||
254 | wmb(); | ||
255 | |||
256 | err = mlx4_SYNC_TPT(mdev); | ||
257 | if (err) | ||
258 | printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when " | ||
259 | "unmapping FMRs\n", err); | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) | ||
265 | { | ||
266 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | ||
267 | struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); | ||
268 | int err; | ||
269 | |||
270 | err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); | ||
271 | |||
272 | if (!err) | ||
273 | kfree(ifmr); | ||
274 | |||
275 | return err; | ||
276 | } | ||
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 3cc98c699aaf..4bc39e65015a 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -68,6 +68,9 @@ struct mlx4_mpt_entry { | |||
68 | 68 | ||
69 | #define MLX4_MTT_FLAG_PRESENT 1 | 69 | #define MLX4_MTT_FLAG_PRESENT 1 |
70 | 70 | ||
71 | #define MLX4_MPT_STATUS_SW 0xF0 | ||
72 | #define MLX4_MPT_STATUS_HW 0x00 | ||
73 | |||
71 | static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) | 74 | static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) |
72 | { | 75 | { |
73 | int o; | 76 | int o; |
@@ -469,3 +472,165 @@ void mlx4_cleanup_mr_table(struct mlx4_dev *dev) | |||
469 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); | 472 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); |
470 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); | 473 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); |
471 | } | 474 | } |
475 | |||
476 | static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, | ||
477 | int npages, u64 iova) | ||
478 | { | ||
479 | int i, page_mask; | ||
480 | |||
481 | if (npages > fmr->max_pages) | ||
482 | return -EINVAL; | ||
483 | |||
484 | page_mask = (1 << fmr->page_shift) - 1; | ||
485 | |||
486 | /* We are getting page lists, so va must be page aligned. */ | ||
487 | if (iova & page_mask) | ||
488 | return -EINVAL; | ||
489 | |||
490 | /* Trust the user not to pass misaligned data in page_list */ | ||
491 | if (0) | ||
492 | for (i = 0; i < npages; ++i) { | ||
493 | if (page_list[i] & ~page_mask) | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | |||
497 | if (fmr->maps >= fmr->max_maps) | ||
498 | return -EINVAL; | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | ||
504 | int npages, u64 iova, u32 *lkey, u32 *rkey) | ||
505 | { | ||
506 | u32 key; | ||
507 | int i, err; | ||
508 | |||
509 | err = mlx4_check_fmr(fmr, page_list, npages, iova); | ||
510 | if (err) | ||
511 | return err; | ||
512 | |||
513 | ++fmr->maps; | ||
514 | |||
515 | key = key_to_hw_index(fmr->mr.key); | ||
516 | key += dev->caps.num_mpts; | ||
517 | *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); | ||
518 | |||
519 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; | ||
520 | |||
521 | /* Make sure MPT status is visible before writing MTT entries */ | ||
522 | wmb(); | ||
523 | |||
524 | for (i = 0; i < npages; ++i) | ||
525 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | ||
526 | |||
527 | dma_sync_single(&dev->pdev->dev, fmr->dma_handle, | ||
528 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
529 | |||
530 | fmr->mpt->key = cpu_to_be32(key); | ||
531 | fmr->mpt->lkey = cpu_to_be32(key); | ||
532 | fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); | ||
533 | fmr->mpt->start = cpu_to_be64(iova); | ||
534 | |||
535 | /* Make MTT entries are visible before setting MPT status */ | ||
536 | wmb(); | ||
537 | |||
538 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; | ||
539 | |||
540 | /* Make sure MPT status is visible before consumer can use FMR */ | ||
541 | wmb(); | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); | ||
546 | |||
547 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | ||
548 | int max_maps, u8 page_shift, struct mlx4_fmr *fmr) | ||
549 | { | ||
550 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
551 | u64 mtt_seg; | ||
552 | int err = -ENOMEM; | ||
553 | |||
554 | if (page_shift < 12 || page_shift >= 32) | ||
555 | return -EINVAL; | ||
556 | |||
557 | /* All MTTs must fit in the same page */ | ||
558 | if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) | ||
559 | return -EINVAL; | ||
560 | |||
561 | fmr->page_shift = page_shift; | ||
562 | fmr->max_pages = max_pages; | ||
563 | fmr->max_maps = max_maps; | ||
564 | fmr->maps = 0; | ||
565 | |||
566 | err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, | ||
567 | page_shift, &fmr->mr); | ||
568 | if (err) | ||
569 | return err; | ||
570 | |||
571 | mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; | ||
572 | |||
573 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, | ||
574 | fmr->mr.mtt.first_seg, | ||
575 | &fmr->dma_handle); | ||
576 | if (!fmr->mtts) { | ||
577 | err = -ENOMEM; | ||
578 | goto err_free; | ||
579 | } | ||
580 | |||
581 | fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, | ||
582 | key_to_hw_index(fmr->mr.key), NULL); | ||
583 | if (!fmr->mpt) { | ||
584 | err = -ENOMEM; | ||
585 | goto err_free; | ||
586 | } | ||
587 | |||
588 | return 0; | ||
589 | |||
590 | err_free: | ||
591 | mlx4_mr_free(dev, &fmr->mr); | ||
592 | return err; | ||
593 | } | ||
594 | EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); | ||
595 | |||
596 | int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | ||
597 | { | ||
598 | return mlx4_mr_enable(dev, &fmr->mr); | ||
599 | } | ||
600 | EXPORT_SYMBOL_GPL(mlx4_fmr_enable); | ||
601 | |||
602 | void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | ||
603 | u32 *lkey, u32 *rkey) | ||
604 | { | ||
605 | u32 key; | ||
606 | |||
607 | if (!fmr->maps) | ||
608 | return; | ||
609 | |||
610 | key = key_to_hw_index(fmr->mr.key); | ||
611 | key &= dev->caps.num_mpts - 1; | ||
612 | *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); | ||
613 | |||
614 | fmr->maps = 0; | ||
615 | |||
616 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; | ||
617 | } | ||
618 | EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); | ||
619 | |||
620 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | ||
621 | { | ||
622 | if (fmr->maps) | ||
623 | return -EBUSY; | ||
624 | |||
625 | fmr->mr.enabled = 0; | ||
626 | mlx4_mr_free(dev, &fmr->mr); | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | EXPORT_SYMBOL_GPL(mlx4_fmr_free); | ||
631 | |||
632 | int mlx4_SYNC_TPT(struct mlx4_dev *dev) | ||
633 | { | ||
634 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); | ||
635 | } | ||
636 | EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); | ||