diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2008-04-23 14:55:45 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-23 14:55:45 -0400 |
commit | 6296883ca4cd52dafb45f191d24102e28ded38f2 (patch) | |
tree | 341e90a9560d8cf6b498d249a6ac81aeea97dd7b | |
parent | 14fb05b3497351fbeb514381bcd227d84e115bd9 (diff) |
mlx4_core: Move kernel doorbell management into core
In addition to mlx4_ib, there will be ethernet and FC consumers of
mlx4_core, so move the code for managing kernel doorbells into the
core module to avoid having to duplicate this multiple times.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/doorbell.c | 122 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 6 | ||||
-rw-r--r-- | drivers/net/mlx4/alloc.c | 111 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 3 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4.h | 3 | ||||
-rw-r--r-- | include/linux/mlx4/device.h | 29 |
10 files changed, 162 insertions, 160 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 3557e7edc9b6..5e570bb0bb6f 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
204 | 204 | ||
205 | uar = &to_mucontext(context)->uar; | 205 | uar = &to_mucontext(context)->uar; |
206 | } else { | 206 | } else { |
207 | err = mlx4_ib_db_alloc(dev, &cq->db, 1); | 207 | err = mlx4_db_alloc(dev->dev, &cq->db, 1); |
208 | if (err) | 208 | if (err) |
209 | goto err_cq; | 209 | goto err_cq; |
210 | 210 | ||
@@ -250,7 +250,7 @@ err_mtt: | |||
250 | 250 | ||
251 | err_db: | 251 | err_db: |
252 | if (!context) | 252 | if (!context) |
253 | mlx4_ib_db_free(dev, &cq->db); | 253 | mlx4_db_free(dev->dev, &cq->db); |
254 | 254 | ||
255 | err_cq: | 255 | err_cq: |
256 | kfree(cq); | 256 | kfree(cq); |
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) | |||
435 | ib_umem_release(mcq->umem); | 435 | ib_umem_release(mcq->umem); |
436 | } else { | 436 | } else { |
437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); | 437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); |
438 | mlx4_ib_db_free(dev, &mcq->db); | 438 | mlx4_db_free(dev->dev, &mcq->db); |
439 | } | 439 | } |
440 | 440 | ||
441 | kfree(mcq); | 441 | kfree(mcq); |
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 1c36087aef14..8e342cc9baec 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
@@ -34,124 +34,6 @@ | |||
34 | 34 | ||
35 | #include "mlx4_ib.h" | 35 | #include "mlx4_ib.h" |
36 | 36 | ||
37 | struct mlx4_ib_db_pgdir { | ||
38 | struct list_head list; | ||
39 | DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); | ||
40 | DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); | ||
41 | unsigned long *bits[2]; | ||
42 | __be32 *db_page; | ||
43 | dma_addr_t db_dma; | ||
44 | }; | ||
45 | |||
46 | static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) | ||
47 | { | ||
48 | struct mlx4_ib_db_pgdir *pgdir; | ||
49 | |||
50 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
51 | if (!pgdir) | ||
52 | return NULL; | ||
53 | |||
54 | bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); | ||
55 | pgdir->bits[0] = pgdir->order0; | ||
56 | pgdir->bits[1] = pgdir->order1; | ||
57 | pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, | ||
58 | PAGE_SIZE, &pgdir->db_dma, | ||
59 | GFP_KERNEL); | ||
60 | if (!pgdir->db_page) { | ||
61 | kfree(pgdir); | ||
62 | return NULL; | ||
63 | } | ||
64 | |||
65 | return pgdir; | ||
66 | } | ||
67 | |||
68 | static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, | ||
69 | struct mlx4_ib_db *db, int order) | ||
70 | { | ||
71 | int o; | ||
72 | int i; | ||
73 | |||
74 | for (o = order; o <= 1; ++o) { | ||
75 | i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); | ||
76 | if (i < MLX4_IB_DB_PER_PAGE >> o) | ||
77 | goto found; | ||
78 | } | ||
79 | |||
80 | return -ENOMEM; | ||
81 | |||
82 | found: | ||
83 | clear_bit(i, pgdir->bits[o]); | ||
84 | |||
85 | i <<= o; | ||
86 | |||
87 | if (o > order) | ||
88 | set_bit(i ^ 1, pgdir->bits[order]); | ||
89 | |||
90 | db->u.pgdir = pgdir; | ||
91 | db->index = i; | ||
92 | db->db = pgdir->db_page + db->index; | ||
93 | db->dma = pgdir->db_dma + db->index * 4; | ||
94 | db->order = order; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) | ||
100 | { | ||
101 | struct mlx4_ib_db_pgdir *pgdir; | ||
102 | int ret = 0; | ||
103 | |||
104 | mutex_lock(&dev->pgdir_mutex); | ||
105 | |||
106 | list_for_each_entry(pgdir, &dev->pgdir_list, list) | ||
107 | if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) | ||
108 | goto out; | ||
109 | |||
110 | pgdir = mlx4_ib_alloc_db_pgdir(dev); | ||
111 | if (!pgdir) { | ||
112 | ret = -ENOMEM; | ||
113 | goto out; | ||
114 | } | ||
115 | |||
116 | list_add(&pgdir->list, &dev->pgdir_list); | ||
117 | |||
118 | /* This should never fail -- we just allocated an empty page: */ | ||
119 | WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); | ||
120 | |||
121 | out: | ||
122 | mutex_unlock(&dev->pgdir_mutex); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) | ||
128 | { | ||
129 | int o; | ||
130 | int i; | ||
131 | |||
132 | mutex_lock(&dev->pgdir_mutex); | ||
133 | |||
134 | o = db->order; | ||
135 | i = db->index; | ||
136 | |||
137 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
138 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
139 | ++o; | ||
140 | } | ||
141 | |||
142 | i >>= o; | ||
143 | set_bit(i, db->u.pgdir->bits[o]); | ||
144 | |||
145 | if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { | ||
146 | dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, | ||
147 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
148 | list_del(&db->u.pgdir->list); | ||
149 | kfree(db->u.pgdir); | ||
150 | } | ||
151 | |||
152 | mutex_unlock(&dev->pgdir_mutex); | ||
153 | } | ||
154 | |||
155 | struct mlx4_ib_user_db_page { | 37 | struct mlx4_ib_user_db_page { |
156 | struct list_head list; | 38 | struct list_head list; |
157 | struct ib_umem *umem; | 39 | struct ib_umem *umem; |
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page { | |||
160 | }; | 42 | }; |
161 | 43 | ||
162 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 44 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
163 | struct mlx4_ib_db *db) | 45 | struct mlx4_db *db) |
164 | { | 46 | { |
165 | struct mlx4_ib_user_db_page *page; | 47 | struct mlx4_ib_user_db_page *page; |
166 | struct ib_umem_chunk *chunk; | 48 | struct ib_umem_chunk *chunk; |
@@ -202,7 +84,7 @@ out: | |||
202 | return err; | 84 | return err; |
203 | } | 85 | } |
204 | 86 | ||
205 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) | 87 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) |
206 | { | 88 | { |
207 | mutex_lock(&context->db_page_mutex); | 89 | mutex_lock(&context->db_page_mutex); |
208 | 90 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 4d9b5ac42202..4d61e32866c6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
557 | goto err_uar; | 557 | goto err_uar; |
558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | 558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
559 | 559 | ||
560 | INIT_LIST_HEAD(&ibdev->pgdir_list); | ||
561 | mutex_init(&ibdev->pgdir_mutex); | ||
562 | |||
563 | ibdev->dev = dev; | 560 | ibdev->dev = dev; |
564 | 561 | ||
565 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 562 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 9e637323c155..5cf994794d25 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -43,24 +43,6 @@ | |||
43 | #include <linux/mlx4/device.h> | 43 | #include <linux/mlx4/device.h> |
44 | #include <linux/mlx4/doorbell.h> | 44 | #include <linux/mlx4/doorbell.h> |
45 | 45 | ||
46 | enum { | ||
47 | MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4 | ||
48 | }; | ||
49 | |||
50 | struct mlx4_ib_db_pgdir; | ||
51 | struct mlx4_ib_user_db_page; | ||
52 | |||
53 | struct mlx4_ib_db { | ||
54 | __be32 *db; | ||
55 | union { | ||
56 | struct mlx4_ib_db_pgdir *pgdir; | ||
57 | struct mlx4_ib_user_db_page *user_page; | ||
58 | } u; | ||
59 | dma_addr_t dma; | ||
60 | int index; | ||
61 | int order; | ||
62 | }; | ||
63 | |||
64 | struct mlx4_ib_ucontext { | 46 | struct mlx4_ib_ucontext { |
65 | struct ib_ucontext ibucontext; | 47 | struct ib_ucontext ibucontext; |
66 | struct mlx4_uar uar; | 48 | struct mlx4_uar uar; |
@@ -88,7 +70,7 @@ struct mlx4_ib_cq { | |||
88 | struct mlx4_cq mcq; | 70 | struct mlx4_cq mcq; |
89 | struct mlx4_ib_cq_buf buf; | 71 | struct mlx4_ib_cq_buf buf; |
90 | struct mlx4_ib_cq_resize *resize_buf; | 72 | struct mlx4_ib_cq_resize *resize_buf; |
91 | struct mlx4_ib_db db; | 73 | struct mlx4_db db; |
92 | spinlock_t lock; | 74 | spinlock_t lock; |
93 | struct mutex resize_mutex; | 75 | struct mutex resize_mutex; |
94 | struct ib_umem *umem; | 76 | struct ib_umem *umem; |
@@ -127,7 +109,7 @@ struct mlx4_ib_qp { | |||
127 | struct mlx4_qp mqp; | 109 | struct mlx4_qp mqp; |
128 | struct mlx4_buf buf; | 110 | struct mlx4_buf buf; |
129 | 111 | ||
130 | struct mlx4_ib_db db; | 112 | struct mlx4_db db; |
131 | struct mlx4_ib_wq rq; | 113 | struct mlx4_ib_wq rq; |
132 | 114 | ||
133 | u32 doorbell_qpn; | 115 | u32 doorbell_qpn; |
@@ -154,7 +136,7 @@ struct mlx4_ib_srq { | |||
154 | struct ib_srq ibsrq; | 136 | struct ib_srq ibsrq; |
155 | struct mlx4_srq msrq; | 137 | struct mlx4_srq msrq; |
156 | struct mlx4_buf buf; | 138 | struct mlx4_buf buf; |
157 | struct mlx4_ib_db db; | 139 | struct mlx4_db db; |
158 | u64 *wrid; | 140 | u64 *wrid; |
159 | spinlock_t lock; | 141 | spinlock_t lock; |
160 | int head; | 142 | int head; |
@@ -175,9 +157,6 @@ struct mlx4_ib_dev { | |||
175 | struct mlx4_dev *dev; | 157 | struct mlx4_dev *dev; |
176 | void __iomem *uar_map; | 158 | void __iomem *uar_map; |
177 | 159 | ||
178 | struct list_head pgdir_list; | ||
179 | struct mutex pgdir_mutex; | ||
180 | |||
181 | struct mlx4_uar priv_uar; | 160 | struct mlx4_uar priv_uar; |
182 | u32 priv_pdn; | 161 | u32 priv_pdn; |
183 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); | 162 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); |
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) | |||
248 | return container_of(ibah, struct mlx4_ib_ah, ibah); | 227 | return container_of(ibah, struct mlx4_ib_ah, ibah); |
249 | } | 228 | } |
250 | 229 | ||
251 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order); | ||
252 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db); | ||
253 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 230 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
254 | struct mlx4_ib_db *db); | 231 | struct mlx4_db *db); |
255 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); | 232 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); |
256 | 233 | ||
257 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); | 234 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); |
258 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | 235 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b75efae7e449..80ea8b9e7761 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
514 | goto err; | 514 | goto err; |
515 | 515 | ||
516 | if (!init_attr->srq) { | 516 | if (!init_attr->srq) { |
517 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); | 517 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
518 | if (err) | 518 | if (err) |
519 | goto err; | 519 | goto err; |
520 | 520 | ||
@@ -580,7 +580,7 @@ err_buf: | |||
580 | 580 | ||
581 | err_db: | 581 | err_db: |
582 | if (!pd->uobject && !init_attr->srq) | 582 | if (!pd->uobject && !init_attr->srq) |
583 | mlx4_ib_db_free(dev, &qp->db); | 583 | mlx4_db_free(dev->dev, &qp->db); |
584 | 584 | ||
585 | err: | 585 | err: |
586 | return err; | 586 | return err; |
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
666 | kfree(qp->rq.wrid); | 666 | kfree(qp->rq.wrid); |
667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
668 | if (!qp->ibqp.srq) | 668 | if (!qp->ibqp.srq) |
669 | mlx4_ib_db_free(dev, &qp->db); | 669 | mlx4_db_free(dev->dev, &qp->db); |
670 | } | 670 | } |
671 | } | 671 | } |
672 | 672 | ||
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index beaa3b06cf58..204619702f9d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
129 | if (err) | 129 | if (err) |
130 | goto err_mtt; | 130 | goto err_mtt; |
131 | } else { | 131 | } else { |
132 | err = mlx4_ib_db_alloc(dev, &srq->db, 0); | 132 | err = mlx4_db_alloc(dev->dev, &srq->db, 0); |
133 | if (err) | 133 | if (err) |
134 | goto err_srq; | 134 | goto err_srq; |
135 | 135 | ||
@@ -200,7 +200,7 @@ err_buf: | |||
200 | 200 | ||
201 | err_db: | 201 | err_db: |
202 | if (!pd->uobject) | 202 | if (!pd->uobject) |
203 | mlx4_ib_db_free(dev, &srq->db); | 203 | mlx4_db_free(dev->dev, &srq->db); |
204 | 204 | ||
205 | err_srq: | 205 | err_srq: |
206 | kfree(srq); | 206 | kfree(srq); |
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) | |||
267 | kfree(msrq->wrid); | 267 | kfree(msrq->wrid); |
268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, | 268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, |
269 | &msrq->buf); | 269 | &msrq->buf); |
270 | mlx4_ib_db_free(dev, &msrq->db); | 270 | mlx4_db_free(dev->dev, &msrq->db); |
271 | } | 271 | } |
272 | 272 | ||
273 | kfree(msrq); | 273 | kfree(msrq); |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 75ef9d0d974d..43c6d04bb880 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -196,3 +196,114 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
199 | |||
200 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | ||
201 | { | ||
202 | struct mlx4_db_pgdir *pgdir; | ||
203 | |||
204 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
205 | if (!pgdir) | ||
206 | return NULL; | ||
207 | |||
208 | bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); | ||
209 | pgdir->bits[0] = pgdir->order0; | ||
210 | pgdir->bits[1] = pgdir->order1; | ||
211 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | ||
212 | &pgdir->db_dma, GFP_KERNEL); | ||
213 | if (!pgdir->db_page) { | ||
214 | kfree(pgdir); | ||
215 | return NULL; | ||
216 | } | ||
217 | |||
218 | return pgdir; | ||
219 | } | ||
220 | |||
221 | static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, | ||
222 | struct mlx4_db *db, int order) | ||
223 | { | ||
224 | int o; | ||
225 | int i; | ||
226 | |||
227 | for (o = order; o <= 1; ++o) { | ||
228 | i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); | ||
229 | if (i < MLX4_DB_PER_PAGE >> o) | ||
230 | goto found; | ||
231 | } | ||
232 | |||
233 | return -ENOMEM; | ||
234 | |||
235 | found: | ||
236 | clear_bit(i, pgdir->bits[o]); | ||
237 | |||
238 | i <<= o; | ||
239 | |||
240 | if (o > order) | ||
241 | set_bit(i ^ 1, pgdir->bits[order]); | ||
242 | |||
243 | db->u.pgdir = pgdir; | ||
244 | db->index = i; | ||
245 | db->db = pgdir->db_page + db->index; | ||
246 | db->dma = pgdir->db_dma + db->index * 4; | ||
247 | db->order = order; | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | ||
253 | { | ||
254 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
255 | struct mlx4_db_pgdir *pgdir; | ||
256 | int ret = 0; | ||
257 | |||
258 | mutex_lock(&priv->pgdir_mutex); | ||
259 | |||
260 | list_for_each_entry(pgdir, &priv->pgdir_list, list) | ||
261 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | ||
262 | goto out; | ||
263 | |||
264 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | ||
265 | if (!pgdir) { | ||
266 | ret = -ENOMEM; | ||
267 | goto out; | ||
268 | } | ||
269 | |||
270 | list_add(&pgdir->list, &priv->pgdir_list); | ||
271 | |||
272 | /* This should never fail -- we just allocated an empty page: */ | ||
273 | WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); | ||
274 | |||
275 | out: | ||
276 | mutex_unlock(&priv->pgdir_mutex); | ||
277 | |||
278 | return ret; | ||
279 | } | ||
280 | EXPORT_SYMBOL_GPL(mlx4_db_alloc); | ||
281 | |||
282 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | ||
283 | { | ||
284 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
285 | int o; | ||
286 | int i; | ||
287 | |||
288 | mutex_lock(&priv->pgdir_mutex); | ||
289 | |||
290 | o = db->order; | ||
291 | i = db->index; | ||
292 | |||
293 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
294 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
295 | ++o; | ||
296 | } | ||
297 | i >>= o; | ||
298 | set_bit(i, db->u.pgdir->bits[o]); | ||
299 | |||
300 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | ||
301 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | ||
302 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
303 | list_del(&db->u.pgdir->list); | ||
304 | kfree(db->u.pgdir); | ||
305 | } | ||
306 | |||
307 | mutex_unlock(&priv->pgdir_mutex); | ||
308 | } | ||
309 | EXPORT_SYMBOL_GPL(mlx4_db_free); | ||
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 49a4acab5e82..a6aa49fc1d68 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
798 | INIT_LIST_HEAD(&priv->ctx_list); | 798 | INIT_LIST_HEAD(&priv->ctx_list); |
799 | spin_lock_init(&priv->ctx_lock); | 799 | spin_lock_init(&priv->ctx_lock); |
800 | 800 | ||
801 | INIT_LIST_HEAD(&priv->pgdir_list); | ||
802 | mutex_init(&priv->pgdir_mutex); | ||
803 | |||
801 | /* | 804 | /* |
802 | * Now reset the HCA before we touch the PCI capabilities or | 805 | * Now reset the HCA before we touch the PCI capabilities or |
803 | * attempt a firmware command, since a boot ROM may have left | 806 | * attempt a firmware command, since a boot ROM may have left |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 73336810e652..a4023c2dd050 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -257,6 +257,9 @@ struct mlx4_priv { | |||
257 | struct list_head ctx_list; | 257 | struct list_head ctx_list; |
258 | spinlock_t ctx_lock; | 258 | spinlock_t ctx_lock; |
259 | 259 | ||
260 | struct list_head pgdir_list; | ||
261 | struct mutex pgdir_mutex; | ||
262 | |||
260 | struct mlx4_fw fw; | 263 | struct mlx4_fw fw; |
261 | struct mlx4_cmd cmd; | 264 | struct mlx4_cmd cmd; |
262 | 265 | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ff7df1a2222f..0a47457931ac 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -208,6 +208,32 @@ struct mlx4_mtt { | |||
208 | int page_shift; | 208 | int page_shift; |
209 | }; | 209 | }; |
210 | 210 | ||
211 | enum { | ||
212 | MLX4_DB_PER_PAGE = PAGE_SIZE / 4 | ||
213 | }; | ||
214 | |||
215 | struct mlx4_db_pgdir { | ||
216 | struct list_head list; | ||
217 | DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); | ||
218 | DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); | ||
219 | unsigned long *bits[2]; | ||
220 | __be32 *db_page; | ||
221 | dma_addr_t db_dma; | ||
222 | }; | ||
223 | |||
224 | struct mlx4_ib_user_db_page; | ||
225 | |||
226 | struct mlx4_db { | ||
227 | __be32 *db; | ||
228 | union { | ||
229 | struct mlx4_db_pgdir *pgdir; | ||
230 | struct mlx4_ib_user_db_page *user_page; | ||
231 | } u; | ||
232 | dma_addr_t dma; | ||
233 | int index; | ||
234 | int order; | ||
235 | }; | ||
236 | |||
211 | struct mlx4_mr { | 237 | struct mlx4_mr { |
212 | struct mlx4_mtt mtt; | 238 | struct mlx4_mtt mtt; |
213 | u64 iova; | 239 | u64 iova; |
@@ -341,6 +367,9 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
341 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 367 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
342 | struct mlx4_buf *buf); | 368 | struct mlx4_buf *buf); |
343 | 369 | ||
370 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | ||
371 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | ||
372 | |||
344 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 373 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
345 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); | 374 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); |
346 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 375 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |