diff options
| author | Amir Vadai <amirv@mellanox.com> | 2015-05-28 15:28:38 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-05-30 21:22:37 -0400 |
| commit | 64ffaa2159b752e6c263dc57eaaaed7367d37493 (patch) | |
| tree | ad02abcf7cc735ad2e9037cab1870ba9a2281c5a /include/linux | |
| parent | 8ed9b5e1c8f3cfc0d8c94f1a19d1167422eea7a8 (diff) | |
net/mlx5_core,mlx5_ib: Do not use vmap() on coherent memory
As David Daney pointed in mlx4_core driver [1], mlx5_core is also
misusing the DMA-API.
This patch is removing the code that vmap() memory allocated by
dma_alloc_coherent().
After this patch, users of this drivers might fail allocating resources
on memory fragmeneted systems. This will be fixed later on.
[1] - https://patchwork.ozlabs.org/patch/458531/
CC: David Daney <david.daney@cavium.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/mlx5/driver.h | 9 |
1 files changed, 1 insertions, 8 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9a90e7523dc2..c4cf25ffcc16 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -334,8 +334,6 @@ struct mlx5_buf_list { | |||
| 334 | 334 | ||
| 335 | struct mlx5_buf { | 335 | struct mlx5_buf { |
| 336 | struct mlx5_buf_list direct; | 336 | struct mlx5_buf_list direct; |
| 337 | struct mlx5_buf_list *page_list; | ||
| 338 | int nbufs; | ||
| 339 | int npages; | 337 | int npages; |
| 340 | int size; | 338 | int size; |
| 341 | u8 page_shift; | 339 | u8 page_shift; |
| @@ -586,11 +584,7 @@ struct mlx5_pas { | |||
| 586 | 584 | ||
| 587 | static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) | 585 | static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) |
| 588 | { | 586 | { |
| 589 | if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) | ||
| 590 | return buf->direct.buf + offset; | 587 | return buf->direct.buf + offset; |
| 591 | else | ||
| 592 | return buf->page_list[offset >> PAGE_SHIFT].buf + | ||
| 593 | (offset & (PAGE_SIZE - 1)); | ||
| 594 | } | 588 | } |
| 595 | 589 | ||
| 596 | extern struct workqueue_struct *mlx5_core_wq; | 590 | extern struct workqueue_struct *mlx5_core_wq; |
| @@ -669,8 +663,7 @@ void mlx5_health_cleanup(void); | |||
| 669 | void __init mlx5_health_init(void); | 663 | void __init mlx5_health_init(void); |
| 670 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); | 664 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
| 671 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); | 665 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); |
| 672 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, | 666 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); |
| 673 | struct mlx5_buf *buf); | ||
| 674 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); | 667 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); |
| 675 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, | 668 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 676 | gfp_t flags, int npages); | 669 | gfp_t flags, int npages); |
