aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/qp.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-05-08 21:00:38 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-08 21:00:38 -0400
commit225c7b1feef1b41170f7037a5b10a65cd8a42c54 (patch)
tree702a0a2cbba7f1c5b2949d236b4463d486204fdc /drivers/net/mlx4/qp.c
parent1bf66a30421ca772820f489d88c16d0c430d6a67 (diff)
IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters
Add an InfiniBand driver for Mellanox ConnectX adapters. Because these adapters can also be used as ethernet NICs and Fibre Channel HBAs, the driver is split into two modules: mlx4_core: Handles low-level things like device initialization and processing firmware commands. Also controls resource allocation so that the InfiniBand, ethernet and FC functions can share a device without stepping on each other. mlx4_ib: Handles InfiniBand-specific things; plugs into the InfiniBand midlayer. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/qp.c')
-rw-r--r--drivers/net/mlx4/qp.c280
1 files changed, 280 insertions, 0 deletions
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
new file mode 100644
index 000000000000..7f8b7d55b6e1
--- /dev/null
+++ b/drivers/net/mlx4/qp.c
@@ -0,0 +1,280 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/init.h>
37
38#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h>
40
41#include "mlx4.h"
42#include "icm.h"
43
44void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
45{
46 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
47 struct mlx4_qp *qp;
48
49 spin_lock(&qp_table->lock);
50
51 qp = __mlx4_qp_lookup(dev, qpn);
52 if (qp)
53 atomic_inc(&qp->refcount);
54
55 spin_unlock(&qp_table->lock);
56
57 if (!qp) {
58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
59 return;
60 }
61
62 qp->event(qp, event_type);
63
64 if (atomic_dec_and_test(&qp->refcount))
65 complete(&qp->free);
66}
67
68int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
69 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
70 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
71 int sqd_event, struct mlx4_qp *qp)
72{
73 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
74 [MLX4_QP_STATE_RST] = {
75 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
76 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
77 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
78 },
79 [MLX4_QP_STATE_INIT] = {
80 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
81 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
82 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
83 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
84 },
85 [MLX4_QP_STATE_RTR] = {
86 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
87 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
88 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
89 },
90 [MLX4_QP_STATE_RTS] = {
91 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
92 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
93 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
94 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
95 },
96 [MLX4_QP_STATE_SQD] = {
97 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
98 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
99 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
100 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
101 },
102 [MLX4_QP_STATE_SQER] = {
103 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
104 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
105 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
106 },
107 [MLX4_QP_STATE_ERR] = {
108 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
109 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
110 }
111 };
112
113 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0;
115
116 if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
117 new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
118 !op[cur_state][new_state])
119 return -EINVAL;
120
121 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
122 return mlx4_cmd(dev, 0, qp->qpn, 2,
123 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
124
125 mailbox = mlx4_alloc_cmd_mailbox(dev);
126 if (IS_ERR(mailbox))
127 return PTR_ERR(mailbox);
128
129 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
130 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
131 context->mtt_base_addr_h = mtt_addr >> 32;
132 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
133 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
134 }
135
136 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
137 memcpy(mailbox->buf + 8, context, sizeof *context);
138
139 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
140 cpu_to_be32(qp->qpn);
141
142 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
143 new_state == MLX4_QP_STATE_RST ? 2 : 0,
144 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
145
146 mlx4_free_cmd_mailbox(dev, mailbox);
147 return ret;
148}
149EXPORT_SYMBOL_GPL(mlx4_qp_modify);
150
151int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
152{
153 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_qp_table *qp_table = &priv->qp_table;
155 int err;
156
157 if (sqpn)
158 qp->qpn = sqpn;
159 else {
160 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
161 if (qp->qpn == -1)
162 return -ENOMEM;
163 }
164
165 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
166 if (err)
167 goto err_out;
168
169 err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
170 if (err)
171 goto err_put_qp;
172
173 err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
174 if (err)
175 goto err_put_auxc;
176
177 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
178 if (err)
179 goto err_put_altc;
180
181 err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
182 if (err)
183 goto err_put_rdmarc;
184
185 spin_lock_irq(&qp_table->lock);
186 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
187 spin_unlock_irq(&qp_table->lock);
188 if (err)
189 goto err_put_cmpt;
190
191 atomic_set(&qp->refcount, 1);
192 init_completion(&qp->free);
193
194 return 0;
195
196err_put_cmpt:
197 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
198
199err_put_rdmarc:
200 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
201
202err_put_altc:
203 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
204
205err_put_auxc:
206 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
207
208err_put_qp:
209 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
210
211err_out:
212 if (!sqpn)
213 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
214
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
218
219void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
220{
221 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
222 unsigned long flags;
223
224 spin_lock_irqsave(&qp_table->lock, flags);
225 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
226 spin_unlock_irqrestore(&qp_table->lock, flags);
227}
228EXPORT_SYMBOL_GPL(mlx4_qp_remove);
229
230void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
231{
232 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
233
234 if (atomic_dec_and_test(&qp->refcount))
235 complete(&qp->free);
236 wait_for_completion(&qp->free);
237
238 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
239 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
242 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
243
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245}
246EXPORT_SYMBOL_GPL(mlx4_qp_free);
247
248static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
249{
250 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
251 MLX4_CMD_TIME_CLASS_B);
252}
253
254int __devinit mlx4_init_qp_table(struct mlx4_dev *dev)
255{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err;
258
259 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
261
262 /*
263 * We reserve 2 extra QPs per port for the special QPs. The
264 * block of special QPs must be aligned to a multiple of 8, so
265 * round up.
266 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8);
270 if (err)
271 return err;
272
273 return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
274}
275
276void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
277{
278 mlx4_CONF_SPECIAL_QP(dev, 0);
279 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
280}