diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2008-04-23 14:55:45 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-23 14:55:45 -0400 |
commit | 6296883ca4cd52dafb45f191d24102e28ded38f2 (patch) | |
tree | 341e90a9560d8cf6b498d249a6ac81aeea97dd7b /drivers/infiniband/hw/mlx4/doorbell.c | |
parent | 14fb05b3497351fbeb514381bcd227d84e115bd9 (diff) |
mlx4_core: Move kernel doorbell management into core
In addition to mlx4_ib, there will be ethernet and FC consumers of
mlx4_core, so move the code for managing kernel doorbells into the
core module to avoid having to duplicate this multiple times.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/doorbell.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/doorbell.c | 122 |
1 files changed, 2 insertions, 120 deletions
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 1c36087aef14..8e342cc9baec 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
@@ -34,124 +34,6 @@ | |||
34 | 34 | ||
35 | #include "mlx4_ib.h" | 35 | #include "mlx4_ib.h" |
36 | 36 | ||
37 | struct mlx4_ib_db_pgdir { | ||
38 | struct list_head list; | ||
39 | DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); | ||
40 | DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); | ||
41 | unsigned long *bits[2]; | ||
42 | __be32 *db_page; | ||
43 | dma_addr_t db_dma; | ||
44 | }; | ||
45 | |||
46 | static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) | ||
47 | { | ||
48 | struct mlx4_ib_db_pgdir *pgdir; | ||
49 | |||
50 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
51 | if (!pgdir) | ||
52 | return NULL; | ||
53 | |||
54 | bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); | ||
55 | pgdir->bits[0] = pgdir->order0; | ||
56 | pgdir->bits[1] = pgdir->order1; | ||
57 | pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, | ||
58 | PAGE_SIZE, &pgdir->db_dma, | ||
59 | GFP_KERNEL); | ||
60 | if (!pgdir->db_page) { | ||
61 | kfree(pgdir); | ||
62 | return NULL; | ||
63 | } | ||
64 | |||
65 | return pgdir; | ||
66 | } | ||
67 | |||
68 | static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, | ||
69 | struct mlx4_ib_db *db, int order) | ||
70 | { | ||
71 | int o; | ||
72 | int i; | ||
73 | |||
74 | for (o = order; o <= 1; ++o) { | ||
75 | i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); | ||
76 | if (i < MLX4_IB_DB_PER_PAGE >> o) | ||
77 | goto found; | ||
78 | } | ||
79 | |||
80 | return -ENOMEM; | ||
81 | |||
82 | found: | ||
83 | clear_bit(i, pgdir->bits[o]); | ||
84 | |||
85 | i <<= o; | ||
86 | |||
87 | if (o > order) | ||
88 | set_bit(i ^ 1, pgdir->bits[order]); | ||
89 | |||
90 | db->u.pgdir = pgdir; | ||
91 | db->index = i; | ||
92 | db->db = pgdir->db_page + db->index; | ||
93 | db->dma = pgdir->db_dma + db->index * 4; | ||
94 | db->order = order; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) | ||
100 | { | ||
101 | struct mlx4_ib_db_pgdir *pgdir; | ||
102 | int ret = 0; | ||
103 | |||
104 | mutex_lock(&dev->pgdir_mutex); | ||
105 | |||
106 | list_for_each_entry(pgdir, &dev->pgdir_list, list) | ||
107 | if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) | ||
108 | goto out; | ||
109 | |||
110 | pgdir = mlx4_ib_alloc_db_pgdir(dev); | ||
111 | if (!pgdir) { | ||
112 | ret = -ENOMEM; | ||
113 | goto out; | ||
114 | } | ||
115 | |||
116 | list_add(&pgdir->list, &dev->pgdir_list); | ||
117 | |||
118 | /* This should never fail -- we just allocated an empty page: */ | ||
119 | WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); | ||
120 | |||
121 | out: | ||
122 | mutex_unlock(&dev->pgdir_mutex); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) | ||
128 | { | ||
129 | int o; | ||
130 | int i; | ||
131 | |||
132 | mutex_lock(&dev->pgdir_mutex); | ||
133 | |||
134 | o = db->order; | ||
135 | i = db->index; | ||
136 | |||
137 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
138 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
139 | ++o; | ||
140 | } | ||
141 | |||
142 | i >>= o; | ||
143 | set_bit(i, db->u.pgdir->bits[o]); | ||
144 | |||
145 | if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { | ||
146 | dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, | ||
147 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
148 | list_del(&db->u.pgdir->list); | ||
149 | kfree(db->u.pgdir); | ||
150 | } | ||
151 | |||
152 | mutex_unlock(&dev->pgdir_mutex); | ||
153 | } | ||
154 | |||
155 | struct mlx4_ib_user_db_page { | 37 | struct mlx4_ib_user_db_page { |
156 | struct list_head list; | 38 | struct list_head list; |
157 | struct ib_umem *umem; | 39 | struct ib_umem *umem; |
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page { | |||
160 | }; | 42 | }; |
161 | 43 | ||
162 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 44 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
163 | struct mlx4_ib_db *db) | 45 | struct mlx4_db *db) |
164 | { | 46 | { |
165 | struct mlx4_ib_user_db_page *page; | 47 | struct mlx4_ib_user_db_page *page; |
166 | struct ib_umem_chunk *chunk; | 48 | struct ib_umem_chunk *chunk; |
@@ -202,7 +84,7 @@ out: | |||
202 | return err; | 84 | return err; |
203 | } | 85 | } |
204 | 86 | ||
205 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) | 87 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) |
206 | { | 88 | { |
207 | mutex_lock(&context->db_page_mutex); | 89 | mutex_lock(&context->db_page_mutex); |
208 | 90 | ||