aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS22
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig10
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile3
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c92
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c843
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c100
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c139
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1504
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c162
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h545
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1007
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2524
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c473
-rw-r--r--drivers/infiniband/hw/mlx5/user.h121
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c587
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c521
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c217
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c475
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c435
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c301
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c223
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h165
-rw-r--r--include/linux/mlx5/device.h893
-rw-r--r--include/linux/mlx5/doorbell.h79
-rw-r--r--include/linux/mlx5/driver.h769
-rw-r--r--include/linux/mlx5/qp.h467
-rw-r--r--include/linux/mlx5/srq.h41
45 files changed, 15779 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 60d6a3393500..b4265364ed27 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5365,6 +5365,28 @@ W: http://linuxtv.org
5365S: Odd Fixes 5365S: Odd Fixes
5366F: drivers/media/radio/radio-miropcm20* 5366F: drivers/media/radio/radio-miropcm20*
5367 5367
5368Mellanox MLX5 core VPI driver
5369M: Eli Cohen <eli@mellanox.com>
5370L: netdev@vger.kernel.org
5371L: linux-rdma@vger.kernel.org
5372W: http://www.mellanox.com
5373Q: http://patchwork.ozlabs.org/project/netdev/list/
5374Q: http://patchwork.kernel.org/project/linux-rdma/list/
5375T: git://openfabrics.org/~eli/connect-ib.git
5376S: Supported
5377F: drivers/net/ethernet/mellanox/mlx5/core/
5378F: include/linux/mlx5/
5379
5380Mellanox MLX5 IB driver
5381M: Eli Cohen <eli@mellanox.com>
5382L: linux-rdma@vger.kernel.org
5383W: http://www.mellanox.com
5384Q: http://patchwork.kernel.org/project/linux-rdma/list/
5385T: git://openfabrics.org/~eli/connect-ib.git
5386S: Supported
5387F: include/linux/mlx5/
5388F: drivers/infiniband/hw/mlx5/
5389
5368MODULE SUPPORT 5390MODULE SUPPORT
5369M: Rusty Russell <rusty@rustcorp.com.au> 5391M: Rusty Russell <rusty@rustcorp.com.au>
5370S: Maintained 5392S: Maintained
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index c85b56c28099..5ceda710f516 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -50,6 +50,7 @@ source "drivers/infiniband/hw/amso1100/Kconfig"
50source "drivers/infiniband/hw/cxgb3/Kconfig" 50source "drivers/infiniband/hw/cxgb3/Kconfig"
51source "drivers/infiniband/hw/cxgb4/Kconfig" 51source "drivers/infiniband/hw/cxgb4/Kconfig"
52source "drivers/infiniband/hw/mlx4/Kconfig" 52source "drivers/infiniband/hw/mlx4/Kconfig"
53source "drivers/infiniband/hw/mlx5/Kconfig"
53source "drivers/infiniband/hw/nes/Kconfig" 54source "drivers/infiniband/hw/nes/Kconfig"
54source "drivers/infiniband/hw/ocrdma/Kconfig" 55source "drivers/infiniband/hw/ocrdma/Kconfig"
55 56
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index b126fefe0b1c..1fe69888515f 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
7obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 7obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
8obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ 8obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
9obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 9obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
10obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 11obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
11obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ 12obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
12obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 13obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
new file mode 100644
index 000000000000..8e6aebfaf8a4
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -0,0 +1,10 @@
1config MLX5_INFINIBAND
2 tristate "Mellanox Connect-IB HCA support"
3 depends on NETDEVICES && ETHERNET && PCI && X86
4 select NET_VENDOR_MELLANOX
5 select MLX5_CORE
6 ---help---
7 This driver provides low-level InfiniBand support for
8 Mellanox Connect-IB PCI Express host channel adapters (HCAs).
9 This is required to use InfiniBand protocols such as
10 IP-over-IB or SRP with these devices.
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
new file mode 100644
index 000000000000..4ea0135af484
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
2
3mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
new file mode 100644
index 000000000000..39ab0caefdf9
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx5_ib.h"
34
35struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
36 struct mlx5_ib_ah *ah)
37{
38 if (ah_attr->ah_flags & IB_AH_GRH) {
39 memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
40 ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
41 (1 << 30) |
42 ah_attr->grh.sgid_index << 20);
43 ah->av.hop_limit = ah_attr->grh.hop_limit;
44 ah->av.tclass = ah_attr->grh.traffic_class;
45 }
46
47 ah->av.rlid = cpu_to_be16(ah_attr->dlid);
48 ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
49 ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf);
50
51 return &ah->ibah;
52}
53
54struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
55{
56 struct mlx5_ib_ah *ah;
57
58 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
59 if (!ah)
60 return ERR_PTR(-ENOMEM);
61
62 return create_ib_ah(ah_attr, ah); /* never fails */
63}
64
65int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
66{
67 struct mlx5_ib_ah *ah = to_mah(ibah);
68 u32 tmp;
69
70 memset(ah_attr, 0, sizeof(*ah_attr));
71
72 tmp = be32_to_cpu(ah->av.grh_gid_fl);
73 if (tmp & (1 << 30)) {
74 ah_attr->ah_flags = IB_AH_GRH;
75 ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
76 ah_attr->grh.flow_label = tmp & 0xfffff;
77 memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
78 ah_attr->grh.hop_limit = ah->av.hop_limit;
79 ah_attr->grh.traffic_class = ah->av.tclass;
80 }
81 ah_attr->dlid = be16_to_cpu(ah->av.rlid);
82 ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
83 ah_attr->sl = ah->av.stat_rate_sl & 0xf;
84
85 return 0;
86}
87
88int mlx5_ib_destroy_ah(struct ib_ah *ah)
89{
90 kfree(to_mah(ah));
91 return 0;
92}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
new file mode 100644
index 000000000000..344ab03948a3
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -0,0 +1,843 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kref.h>
34#include <rdma/ib_umem.h>
35#include "mlx5_ib.h"
36#include "user.h"
37
38static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
39{
40 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
41
42 ibcq->comp_handler(ibcq, ibcq->cq_context);
43}
44
45static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
46{
47 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
48 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
49 struct ib_cq *ibcq = &cq->ibcq;
50 struct ib_event event;
51
52 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
53 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
54 type, mcq->cqn);
55 return;
56 }
57
58 if (ibcq->event_handler) {
59 event.device = &dev->ib_dev;
60 event.event = IB_EVENT_CQ_ERR;
61 event.element.cq = ibcq;
62 ibcq->event_handler(&event, ibcq->cq_context);
63 }
64}
65
66static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
67{
68 return mlx5_buf_offset(&buf->buf, n * size);
69}
70
71static void *get_cqe(struct mlx5_ib_cq *cq, int n)
72{
73 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
74}
75
76static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
77{
78 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
79 struct mlx5_cqe64 *cqe64;
80
81 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
82 return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^
83 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
84}
85
86static void *next_cqe_sw(struct mlx5_ib_cq *cq)
87{
88 return get_sw_cqe(cq, cq->mcq.cons_index);
89}
90
91static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
92{
93 switch (wq->wr_data[idx]) {
94 case MLX5_IB_WR_UMR:
95 return 0;
96
97 case IB_WR_LOCAL_INV:
98 return IB_WC_LOCAL_INV;
99
100 case IB_WR_FAST_REG_MR:
101 return IB_WC_FAST_REG_MR;
102
103 default:
104 pr_warn("unknown completion status\n");
105 return 0;
106 }
107}
108
109static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
110 struct mlx5_ib_wq *wq, int idx)
111{
112 wc->wc_flags = 0;
113 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
114 case MLX5_OPCODE_RDMA_WRITE_IMM:
115 wc->wc_flags |= IB_WC_WITH_IMM;
116 case MLX5_OPCODE_RDMA_WRITE:
117 wc->opcode = IB_WC_RDMA_WRITE;
118 break;
119 case MLX5_OPCODE_SEND_IMM:
120 wc->wc_flags |= IB_WC_WITH_IMM;
121 case MLX5_OPCODE_SEND:
122 case MLX5_OPCODE_SEND_INVAL:
123 wc->opcode = IB_WC_SEND;
124 break;
125 case MLX5_OPCODE_RDMA_READ:
126 wc->opcode = IB_WC_RDMA_READ;
127 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
128 break;
129 case MLX5_OPCODE_ATOMIC_CS:
130 wc->opcode = IB_WC_COMP_SWAP;
131 wc->byte_len = 8;
132 break;
133 case MLX5_OPCODE_ATOMIC_FA:
134 wc->opcode = IB_WC_FETCH_ADD;
135 wc->byte_len = 8;
136 break;
137 case MLX5_OPCODE_ATOMIC_MASKED_CS:
138 wc->opcode = IB_WC_MASKED_COMP_SWAP;
139 wc->byte_len = 8;
140 break;
141 case MLX5_OPCODE_ATOMIC_MASKED_FA:
142 wc->opcode = IB_WC_MASKED_FETCH_ADD;
143 wc->byte_len = 8;
144 break;
145 case MLX5_OPCODE_BIND_MW:
146 wc->opcode = IB_WC_BIND_MW;
147 break;
148 case MLX5_OPCODE_UMR:
149 wc->opcode = get_umr_comp(wq, idx);
150 break;
151 }
152}
153
154enum {
155 MLX5_GRH_IN_BUFFER = 1,
156 MLX5_GRH_IN_CQE = 2,
157};
158
159static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
160 struct mlx5_ib_qp *qp)
161{
162 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
163 struct mlx5_ib_srq *srq;
164 struct mlx5_ib_wq *wq;
165 u16 wqe_ctr;
166 u8 g;
167
168 if (qp->ibqp.srq || qp->ibqp.xrcd) {
169 struct mlx5_core_srq *msrq = NULL;
170
171 if (qp->ibqp.xrcd) {
172 msrq = mlx5_core_get_srq(&dev->mdev,
173 be32_to_cpu(cqe->srqn));
174 srq = to_mibsrq(msrq);
175 } else {
176 srq = to_msrq(qp->ibqp.srq);
177 }
178 if (srq) {
179 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
180 wc->wr_id = srq->wrid[wqe_ctr];
181 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
182 if (msrq && atomic_dec_and_test(&msrq->refcount))
183 complete(&msrq->free);
184 }
185 } else {
186 wq = &qp->rq;
187 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
188 ++wq->tail;
189 }
190 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
191
192 switch (cqe->op_own >> 4) {
193 case MLX5_CQE_RESP_WR_IMM:
194 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
195 wc->wc_flags = IB_WC_WITH_IMM;
196 wc->ex.imm_data = cqe->imm_inval_pkey;
197 break;
198 case MLX5_CQE_RESP_SEND:
199 wc->opcode = IB_WC_RECV;
200 wc->wc_flags = 0;
201 break;
202 case MLX5_CQE_RESP_SEND_IMM:
203 wc->opcode = IB_WC_RECV;
204 wc->wc_flags = IB_WC_WITH_IMM;
205 wc->ex.imm_data = cqe->imm_inval_pkey;
206 break;
207 case MLX5_CQE_RESP_SEND_INV:
208 wc->opcode = IB_WC_RECV;
209 wc->wc_flags = IB_WC_WITH_INVALIDATE;
210 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
211 break;
212 }
213 wc->slid = be16_to_cpu(cqe->slid);
214 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
215 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
216 wc->dlid_path_bits = cqe->ml_path;
217 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
218 wc->wc_flags |= g ? IB_WC_GRH : 0;
219 wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
220}
221
222static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
223{
224 __be32 *p = (__be32 *)cqe;
225 int i;
226
227 mlx5_ib_warn(dev, "dump error cqe\n");
228 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
229 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
230 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
231 be32_to_cpu(p[3]));
232}
233
234static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
235 struct mlx5_err_cqe *cqe,
236 struct ib_wc *wc)
237{
238 int dump = 1;
239
240 switch (cqe->syndrome) {
241 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
242 wc->status = IB_WC_LOC_LEN_ERR;
243 break;
244 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
245 wc->status = IB_WC_LOC_QP_OP_ERR;
246 break;
247 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
248 wc->status = IB_WC_LOC_PROT_ERR;
249 break;
250 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
251 dump = 0;
252 wc->status = IB_WC_WR_FLUSH_ERR;
253 break;
254 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
255 wc->status = IB_WC_MW_BIND_ERR;
256 break;
257 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
258 wc->status = IB_WC_BAD_RESP_ERR;
259 break;
260 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
261 wc->status = IB_WC_LOC_ACCESS_ERR;
262 break;
263 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
264 wc->status = IB_WC_REM_INV_REQ_ERR;
265 break;
266 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
267 wc->status = IB_WC_REM_ACCESS_ERR;
268 break;
269 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
270 wc->status = IB_WC_REM_OP_ERR;
271 break;
272 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
273 wc->status = IB_WC_RETRY_EXC_ERR;
274 dump = 0;
275 break;
276 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
277 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
278 dump = 0;
279 break;
280 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
281 wc->status = IB_WC_REM_ABORT_ERR;
282 break;
283 default:
284 wc->status = IB_WC_GENERAL_ERR;
285 break;
286 }
287
288 wc->vendor_err = cqe->vendor_err_synd;
289 if (dump)
290 dump_cqe(dev, cqe);
291}
292
293static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
294{
295 /* TBD: waiting decision
296 */
297 return 0;
298}
299
300static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
301{
302 struct mlx5_wqe_data_seg *dpseg;
303 void *addr;
304
305 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
306 sizeof(struct mlx5_wqe_raddr_seg) +
307 sizeof(struct mlx5_wqe_atomic_seg);
308 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
309 return addr;
310}
311
312static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
313 uint16_t idx)
314{
315 void *addr;
316 int byte_count;
317 int i;
318
319 if (!is_atomic_response(qp, idx))
320 return;
321
322 byte_count = be32_to_cpu(cqe64->byte_cnt);
323 addr = mlx5_get_atomic_laddr(qp, idx);
324
325 if (byte_count == 4) {
326 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
327 } else {
328 for (i = 0; i < byte_count; i += 8) {
329 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
330 addr += 8;
331 }
332 }
333
334 return;
335}
336
337static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
338 u16 tail, u16 head)
339{
340 int idx;
341
342 do {
343 idx = tail & (qp->sq.wqe_cnt - 1);
344 handle_atomic(qp, cqe64, idx);
345 if (idx == head)
346 break;
347
348 tail = qp->sq.w_list[idx].next;
349 } while (1);
350 tail = qp->sq.w_list[idx].next;
351 qp->sq.last_poll = tail;
352}
353
354static int mlx5_poll_one(struct mlx5_ib_cq *cq,
355 struct mlx5_ib_qp **cur_qp,
356 struct ib_wc *wc)
357{
358 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
359 struct mlx5_err_cqe *err_cqe;
360 struct mlx5_cqe64 *cqe64;
361 struct mlx5_core_qp *mqp;
362 struct mlx5_ib_wq *wq;
363 uint8_t opcode;
364 uint32_t qpn;
365 u16 wqe_ctr;
366 void *cqe;
367 int idx;
368
369 cqe = next_cqe_sw(cq);
370 if (!cqe)
371 return -EAGAIN;
372
373 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
374
375 ++cq->mcq.cons_index;
376
377 /* Make sure we read CQ entry contents after we've checked the
378 * ownership bit.
379 */
380 rmb();
381
382 /* TBD: resize CQ */
383
384 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
385 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
386 /* We do not have to take the QP table lock here,
387 * because CQs will be locked while QPs are removed
388 * from the table.
389 */
390 mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
391 if (unlikely(!mqp)) {
392 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
393 cq->mcq.cqn, qpn);
394 return -EINVAL;
395 }
396
397 *cur_qp = to_mibqp(mqp);
398 }
399
400 wc->qp = &(*cur_qp)->ibqp;
401 opcode = cqe64->op_own >> 4;
402 switch (opcode) {
403 case MLX5_CQE_REQ:
404 wq = &(*cur_qp)->sq;
405 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
406 idx = wqe_ctr & (wq->wqe_cnt - 1);
407 handle_good_req(wc, cqe64, wq, idx);
408 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
409 wc->wr_id = wq->wrid[idx];
410 wq->tail = wq->wqe_head[idx] + 1;
411 wc->status = IB_WC_SUCCESS;
412 break;
413 case MLX5_CQE_RESP_WR_IMM:
414 case MLX5_CQE_RESP_SEND:
415 case MLX5_CQE_RESP_SEND_IMM:
416 case MLX5_CQE_RESP_SEND_INV:
417 handle_responder(wc, cqe64, *cur_qp);
418 wc->status = IB_WC_SUCCESS;
419 break;
420 case MLX5_CQE_RESIZE_CQ:
421 break;
422 case MLX5_CQE_REQ_ERR:
423 case MLX5_CQE_RESP_ERR:
424 err_cqe = (struct mlx5_err_cqe *)cqe64;
425 mlx5_handle_error_cqe(dev, err_cqe, wc);
426 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
427 opcode == MLX5_CQE_REQ_ERR ?
428 "Requestor" : "Responder", cq->mcq.cqn);
429 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
430 err_cqe->syndrome, err_cqe->vendor_err_synd);
431 if (opcode == MLX5_CQE_REQ_ERR) {
432 wq = &(*cur_qp)->sq;
433 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
434 idx = wqe_ctr & (wq->wqe_cnt - 1);
435 wc->wr_id = wq->wrid[idx];
436 wq->tail = wq->wqe_head[idx] + 1;
437 } else {
438 struct mlx5_ib_srq *srq;
439
440 if ((*cur_qp)->ibqp.srq) {
441 srq = to_msrq((*cur_qp)->ibqp.srq);
442 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
443 wc->wr_id = srq->wrid[wqe_ctr];
444 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
445 } else {
446 wq = &(*cur_qp)->rq;
447 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
448 ++wq->tail;
449 }
450 }
451 break;
452 }
453
454 return 0;
455}
456
457int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
458{
459 struct mlx5_ib_cq *cq = to_mcq(ibcq);
460 struct mlx5_ib_qp *cur_qp = NULL;
461 unsigned long flags;
462 int npolled;
463 int err = 0;
464
465 spin_lock_irqsave(&cq->lock, flags);
466
467 for (npolled = 0; npolled < num_entries; npolled++) {
468 err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
469 if (err)
470 break;
471 }
472
473 if (npolled)
474 mlx5_cq_set_ci(&cq->mcq);
475
476 spin_unlock_irqrestore(&cq->lock, flags);
477
478 if (err == 0 || err == -EAGAIN)
479 return npolled;
480 else
481 return err;
482}
483
484int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
485{
486 mlx5_cq_arm(&to_mcq(ibcq)->mcq,
487 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
488 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
489 to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
490 MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
491
492 return 0;
493}
494
495static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
496 int nent, int cqe_size)
497{
498 int err;
499
500 err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
501 PAGE_SIZE * 2, &buf->buf);
502 if (err)
503 return err;
504
505 buf->cqe_size = cqe_size;
506
507 return 0;
508}
509
510static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
511{
512 mlx5_buf_free(&dev->mdev, &buf->buf);
513}
514
515static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
516 struct ib_ucontext *context, struct mlx5_ib_cq *cq,
517 int entries, struct mlx5_create_cq_mbox_in **cqb,
518 int *cqe_size, int *index, int *inlen)
519{
520 struct mlx5_ib_create_cq ucmd;
521 int page_shift;
522 int npages;
523 int ncont;
524 int err;
525
526 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
527 return -EFAULT;
528
529 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
530 return -EINVAL;
531
532 *cqe_size = ucmd.cqe_size;
533
534 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
535 entries * ucmd.cqe_size,
536 IB_ACCESS_LOCAL_WRITE, 1);
537 if (IS_ERR(cq->buf.umem)) {
538 err = PTR_ERR(cq->buf.umem);
539 return err;
540 }
541
542 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
543 &cq->db);
544 if (err)
545 goto err_umem;
546
547 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
548 &ncont, NULL);
549 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
550 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
551
552 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
553 *cqb = mlx5_vzalloc(*inlen);
554 if (!*cqb) {
555 err = -ENOMEM;
556 goto err_db;
557 }
558 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
559 (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
560
561 *index = to_mucontext(context)->uuari.uars[0].index;
562
563 return 0;
564
565err_db:
566 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
567
568err_umem:
569 ib_umem_release(cq->buf.umem);
570 return err;
571}
572
573static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
574{
575 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
576 ib_umem_release(cq->buf.umem);
577}
578
579static void init_cq_buf(struct mlx5_ib_cq *cq, int nent)
580{
581 int i;
582 void *cqe;
583 struct mlx5_cqe64 *cqe64;
584
585 for (i = 0; i < nent; i++) {
586 cqe = get_cqe(cq, i);
587 cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64;
588 cqe64->op_own = 0xf1;
589 }
590}
591
592static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
593 int entries, int cqe_size,
594 struct mlx5_create_cq_mbox_in **cqb,
595 int *index, int *inlen)
596{
597 int err;
598
599 err = mlx5_db_alloc(&dev->mdev, &cq->db);
600 if (err)
601 return err;
602
603 cq->mcq.set_ci_db = cq->db.db;
604 cq->mcq.arm_db = cq->db.db + 1;
605 *cq->mcq.set_ci_db = 0;
606 *cq->mcq.arm_db = 0;
607 cq->mcq.cqe_sz = cqe_size;
608
609 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
610 if (err)
611 goto err_db;
612
613 init_cq_buf(cq, entries);
614
615 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
616 *cqb = mlx5_vzalloc(*inlen);
617 if (!*cqb) {
618 err = -ENOMEM;
619 goto err_buf;
620 }
621 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
622
623 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT;
624 *index = dev->mdev.priv.uuari.uars[0].index;
625
626 return 0;
627
628err_buf:
629 free_cq_buf(dev, &cq->buf);
630
631err_db:
632 mlx5_db_free(&dev->mdev, &cq->db);
633 return err;
634}
635
636static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
637{
638 free_cq_buf(dev, &cq->buf);
639 mlx5_db_free(&dev->mdev, &cq->db);
640}
641
642struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
643 int vector, struct ib_ucontext *context,
644 struct ib_udata *udata)
645{
646 struct mlx5_create_cq_mbox_in *cqb = NULL;
647 struct mlx5_ib_dev *dev = to_mdev(ibdev);
648 struct mlx5_ib_cq *cq;
649 int uninitialized_var(index);
650 int uninitialized_var(inlen);
651 int cqe_size;
652 int irqn;
653 int eqn;
654 int err;
655
656 entries = roundup_pow_of_two(entries + 1);
657 if (entries < 1 || entries > dev->mdev.caps.max_cqes)
658 return ERR_PTR(-EINVAL);
659
660 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
661 if (!cq)
662 return ERR_PTR(-ENOMEM);
663
664 cq->ibcq.cqe = entries - 1;
665 mutex_init(&cq->resize_mutex);
666 spin_lock_init(&cq->lock);
667 cq->resize_buf = NULL;
668 cq->resize_umem = NULL;
669
670 if (context) {
671 err = create_cq_user(dev, udata, context, cq, entries,
672 &cqb, &cqe_size, &index, &inlen);
673 if (err)
674 goto err_create;
675 } else {
676 /* for now choose 64 bytes till we have a proper interface */
677 cqe_size = 64;
678 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
679 &index, &inlen);
680 if (err)
681 goto err_create;
682 }
683
684 cq->cqe_size = cqe_size;
685 cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
686 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
687 err = mlx5_vector2eqn(dev, vector, &eqn, &irqn);
688 if (err)
689 goto err_cqb;
690
691 cqb->ctx.c_eqn = cpu_to_be16(eqn);
692 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
693
694 err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
695 if (err)
696 goto err_cqb;
697
698 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
699 cq->mcq.irqn = irqn;
700 cq->mcq.comp = mlx5_ib_cq_comp;
701 cq->mcq.event = mlx5_ib_cq_event;
702
703 if (context)
704 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
705 err = -EFAULT;
706 goto err_cmd;
707 }
708
709
710 mlx5_vfree(cqb);
711 return &cq->ibcq;
712
713err_cmd:
714 mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
715
716err_cqb:
717 mlx5_vfree(cqb);
718 if (context)
719 destroy_cq_user(cq, context);
720 else
721 destroy_cq_kernel(dev, cq);
722
723err_create:
724 kfree(cq);
725
726 return ERR_PTR(err);
727}
728
729
730int mlx5_ib_destroy_cq(struct ib_cq *cq)
731{
732 struct mlx5_ib_dev *dev = to_mdev(cq->device);
733 struct mlx5_ib_cq *mcq = to_mcq(cq);
734 struct ib_ucontext *context = NULL;
735
736 if (cq->uobject)
737 context = cq->uobject->context;
738
739 mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
740 if (context)
741 destroy_cq_user(mcq, context);
742 else
743 destroy_cq_kernel(dev, mcq);
744
745 kfree(mcq);
746
747 return 0;
748}
749
750static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq,
751 u32 rsn)
752{
753 u32 lrsn;
754
755 if (srq)
756 lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff;
757 else
758 lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
759
760 return rsn == lrsn;
761}
762
763void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
764{
765 struct mlx5_cqe64 *cqe64, *dest64;
766 void *cqe, *dest;
767 u32 prod_index;
768 int nfreed = 0;
769 u8 owner_bit;
770
771 if (!cq)
772 return;
773
774 /* First we need to find the current producer index, so we
775 * know where to start cleaning from. It doesn't matter if HW
776 * adds new entries after this loop -- the QP we're worried
777 * about is already in RESET, so the new entries won't come
778 * from our QP and therefore don't need to be checked.
779 */
780 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
781 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
782 break;
783
784 /* Now sweep backwards through the CQ, removing CQ entries
785 * that match our QP by copying older entries on top of them.
786 */
787 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
788 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
789 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
790 if (is_equal_rsn(cqe64, srq, rsn)) {
791 if (srq)
792 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
793 ++nfreed;
794 } else if (nfreed) {
795 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
796 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
797 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
798 memcpy(dest, cqe, cq->mcq.cqe_sz);
799 dest64->op_own = owner_bit |
800 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
801 }
802 }
803
804 if (nfreed) {
805 cq->mcq.cons_index += nfreed;
806 /* Make sure update of buffer contents is done before
807 * updating consumer index.
808 */
809 wmb();
810 mlx5_cq_set_ci(&cq->mcq);
811 }
812}
813
814void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
815{
816 if (!cq)
817 return;
818
819 spin_lock_irq(&cq->lock);
820 __mlx5_ib_cq_clean(cq, qpn, srq);
821 spin_unlock_irq(&cq->lock);
822}
823
824int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
825{
826 return -ENOSYS;
827}
828
829int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
830{
831 return -ENOSYS;
832}
833
834int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
835{
836 struct mlx5_ib_cq *cq;
837
838 if (!ibcq)
839 return 128;
840
841 cq = to_mcq(ibcq);
842 return cq->cqe_size;
843}
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
new file mode 100644
index 000000000000..256a23344f28
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kref.h>
34#include <linux/slab.h>
35#include <rdma/ib_umem.h>
36
37#include "mlx5_ib.h"
38
39struct mlx5_ib_user_db_page {
40 struct list_head list;
41 struct ib_umem *umem;
42 unsigned long user_virt;
43 int refcnt;
44};
45
46int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
47 struct mlx5_db *db)
48{
49 struct mlx5_ib_user_db_page *page;
50 struct ib_umem_chunk *chunk;
51 int err = 0;
52
53 mutex_lock(&context->db_page_mutex);
54
55 list_for_each_entry(page, &context->db_page_list, list)
56 if (page->user_virt == (virt & PAGE_MASK))
57 goto found;
58
59 page = kmalloc(sizeof(*page), GFP_KERNEL);
60 if (!page) {
61 err = -ENOMEM;
62 goto out;
63 }
64
65 page->user_virt = (virt & PAGE_MASK);
66 page->refcnt = 0;
67 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
68 PAGE_SIZE, 0, 0);
69 if (IS_ERR(page->umem)) {
70 err = PTR_ERR(page->umem);
71 kfree(page);
72 goto out;
73 }
74
75 list_add(&page->list, &context->db_page_list);
76
77found:
78 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
79 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
80 db->u.user_page = page;
81 ++page->refcnt;
82
83out:
84 mutex_unlock(&context->db_page_mutex);
85
86 return err;
87}
88
89void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
90{
91 mutex_lock(&context->db_page_mutex);
92
93 if (!--db->u.user_page->refcnt) {
94 list_del(&db->u.user_page->list);
95 ib_umem_release(db->u.user_page->umem);
96 kfree(db->u.user_page);
97 }
98
99 mutex_unlock(&context->db_page_mutex);
100}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
new file mode 100644
index 000000000000..5c8938be0e08
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/cmd.h>
34#include <rdma/ib_mad.h>
35#include <rdma/ib_smi.h>
36#include "mlx5_ib.h"
37
38enum {
39 MLX5_IB_VENDOR_CLASS1 = 0x9,
40 MLX5_IB_VENDOR_CLASS2 = 0xa
41};
42
43int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
44 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
45 void *in_mad, void *response_mad)
46{
47 u8 op_modifier = 0;
48
49 /* Key check traps can't be generated unless we have in_wc to
50 * tell us where to send the trap.
51 */
52 if (ignore_mkey || !in_wc)
53 op_modifier |= 0x1;
54 if (ignore_bkey || !in_wc)
55 op_modifier |= 0x2;
56
57 return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
58}
59
60int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
61 struct ib_wc *in_wc, struct ib_grh *in_grh,
62 struct ib_mad *in_mad, struct ib_mad *out_mad)
63{
64 u16 slid;
65 int err;
66
67 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
68
69 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
70 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
71
72 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
73 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
74 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
75 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
76 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
77 return IB_MAD_RESULT_SUCCESS;
78
79 /* Don't process SMInfo queries -- the SMA can't handle them.
80 */
81 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
82 return IB_MAD_RESULT_SUCCESS;
83 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
84 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
85 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
86 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
87 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
88 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
89 return IB_MAD_RESULT_SUCCESS;
90 } else {
91 return IB_MAD_RESULT_SUCCESS;
92 }
93
94 err = mlx5_MAD_IFC(to_mdev(ibdev),
95 mad_flags & IB_MAD_IGNORE_MKEY,
96 mad_flags & IB_MAD_IGNORE_BKEY,
97 port_num, in_wc, in_grh, in_mad, out_mad);
98 if (err)
99 return IB_MAD_RESULT_FAILURE;
100
101 /* set return bit in status of directed route responses */
102 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
103 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
104
105 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
106 /* no response for trap repress */
107 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
108
109 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
110}
111
112int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
113{
114 struct ib_smp *in_mad = NULL;
115 struct ib_smp *out_mad = NULL;
116 int err = -ENOMEM;
117 u16 packet_error;
118
119 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
120 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
121 if (!in_mad || !out_mad)
122 goto out;
123
124 init_query_mad(in_mad);
125 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
126 in_mad->attr_mod = cpu_to_be32(port);
127
128 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
129
130 packet_error = be16_to_cpu(out_mad->status);
131
132 dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134
135out:
136 kfree(in_mad);
137 kfree(out_mad);
138 return err;
139}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
new file mode 100644
index 000000000000..6b1007f9bc29
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -0,0 +1,1504 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/io-mapping.h>
41#include <linux/sched.h>
42#include <rdma/ib_user_verbs.h>
43#include <rdma/ib_smi.h>
44#include <rdma/ib_umem.h>
45#include "user.h"
46#include "mlx5_ib.h"
47
48#define DRIVER_NAME "mlx5_ib"
49#define DRIVER_VERSION "1.0"
50#define DRIVER_RELDATE "June 2013"
51
52MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
54MODULE_LICENSE("Dual BSD/GPL");
55MODULE_VERSION(DRIVER_VERSION);
56
57static int prof_sel = 2;
58module_param_named(prof_sel, prof_sel, int, 0444);
59MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
60
61static char mlx5_version[] =
62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
64
65struct mlx5_profile profile[] = {
66 [0] = {
67 .mask = 0,
68 },
69 [1] = {
70 .mask = MLX5_PROF_MASK_QP_SIZE,
71 .log_max_qp = 12,
72 },
73 [2] = {
74 .mask = MLX5_PROF_MASK_QP_SIZE |
75 MLX5_PROF_MASK_MR_CACHE,
76 .log_max_qp = 17,
77 .mr_cache[0] = {
78 .size = 500,
79 .limit = 250
80 },
81 .mr_cache[1] = {
82 .size = 500,
83 .limit = 250
84 },
85 .mr_cache[2] = {
86 .size = 500,
87 .limit = 250
88 },
89 .mr_cache[3] = {
90 .size = 500,
91 .limit = 250
92 },
93 .mr_cache[4] = {
94 .size = 500,
95 .limit = 250
96 },
97 .mr_cache[5] = {
98 .size = 500,
99 .limit = 250
100 },
101 .mr_cache[6] = {
102 .size = 500,
103 .limit = 250
104 },
105 .mr_cache[7] = {
106 .size = 500,
107 .limit = 250
108 },
109 .mr_cache[8] = {
110 .size = 500,
111 .limit = 250
112 },
113 .mr_cache[9] = {
114 .size = 500,
115 .limit = 250
116 },
117 .mr_cache[10] = {
118 .size = 500,
119 .limit = 250
120 },
121 .mr_cache[11] = {
122 .size = 500,
123 .limit = 250
124 },
125 .mr_cache[12] = {
126 .size = 64,
127 .limit = 32
128 },
129 .mr_cache[13] = {
130 .size = 32,
131 .limit = 16
132 },
133 .mr_cache[14] = {
134 .size = 16,
135 .limit = 8
136 },
137 .mr_cache[15] = {
138 .size = 8,
139 .limit = 4
140 },
141 },
142};
143
144int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
145{
146 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
147 struct mlx5_eq *eq, *n;
148 int err = -ENOENT;
149
150 spin_lock(&table->lock);
151 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
152 if (eq->index == vector) {
153 *eqn = eq->eqn;
154 *irqn = eq->irqn;
155 err = 0;
156 break;
157 }
158 }
159 spin_unlock(&table->lock);
160
161 return err;
162}
163
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167 struct mlx5_eq *eq, *n;
168 int ncomp_vec;
169 int nent;
170 int err;
171 int i;
172
173 INIT_LIST_HEAD(&dev->eqs_list);
174 ncomp_vec = table->num_comp_vectors;
175 nent = MLX5_COMP_EQ_SIZE;
176 for (i = 0; i < ncomp_vec; i++) {
177 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
178 if (!eq) {
179 err = -ENOMEM;
180 goto clean;
181 }
182
183 snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184 err = mlx5_create_map_eq(&dev->mdev, eq,
185 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186 eq->name,
187 &dev->mdev.priv.uuari.uars[0]);
188 if (err) {
189 kfree(eq);
190 goto clean;
191 }
192 mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
193 eq->index = i;
194 spin_lock(&table->lock);
195 list_add_tail(&eq->list, &dev->eqs_list);
196 spin_unlock(&table->lock);
197 }
198
199 dev->num_comp_vectors = ncomp_vec;
200 return 0;
201
202clean:
203 spin_lock(&table->lock);
204 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
205 list_del(&eq->list);
206 spin_unlock(&table->lock);
207 if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
208 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
209 kfree(eq);
210 spin_lock(&table->lock);
211 }
212 spin_unlock(&table->lock);
213 return err;
214}
215
216static void free_comp_eqs(struct mlx5_ib_dev *dev)
217{
218 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
219 struct mlx5_eq *eq, *n;
220
221 spin_lock(&table->lock);
222 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
223 list_del(&eq->list);
224 spin_unlock(&table->lock);
225 if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
226 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
227 kfree(eq);
228 spin_lock(&table->lock);
229 }
230 spin_unlock(&table->lock);
231}
232
233static int mlx5_ib_query_device(struct ib_device *ibdev,
234 struct ib_device_attr *props)
235{
236 struct mlx5_ib_dev *dev = to_mdev(ibdev);
237 struct ib_smp *in_mad = NULL;
238 struct ib_smp *out_mad = NULL;
239 int err = -ENOMEM;
240 int max_rq_sg;
241 int max_sq_sg;
242 u64 flags;
243
244 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
245 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
246 if (!in_mad || !out_mad)
247 goto out;
248
249 init_query_mad(in_mad);
250 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
251
252 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
253 if (err)
254 goto out;
255
256 memset(props, 0, sizeof(*props));
257
258 props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
259 (fw_rev_min(&dev->mdev) << 16) |
260 fw_rev_sub(&dev->mdev);
261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
262 IB_DEVICE_PORT_ACTIVE_EVENT |
263 IB_DEVICE_SYS_IMAGE_GUID |
264 IB_DEVICE_RC_RNR_NAK_GEN |
265 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
266 flags = dev->mdev.caps.flags;
267 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
268 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
269 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
270 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
271 if (flags & MLX5_DEV_CAP_FLAG_APM)
272 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
273 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
274 if (flags & MLX5_DEV_CAP_FLAG_XRC)
275 props->device_cap_flags |= IB_DEVICE_XRC;
276 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
277
278 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
279 0xffffff;
280 props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30));
281 props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32));
282 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
283
284 props->max_mr_size = ~0ull;
285 props->page_size_cap = dev->mdev.caps.min_page_sz;
286 props->max_qp = 1 << dev->mdev.caps.log_max_qp;
287 props->max_qp_wr = dev->mdev.caps.max_wqes;
288 max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
289 max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
290 sizeof(struct mlx5_wqe_data_seg);
291 props->max_sge = min(max_rq_sg, max_sq_sg);
292 props->max_cq = 1 << dev->mdev.caps.log_max_cq;
293 props->max_cqe = dev->mdev.caps.max_cqes - 1;
294 props->max_mr = 1 << dev->mdev.caps.log_max_mkey;
295 props->max_pd = 1 << dev->mdev.caps.log_max_pd;
296 props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp;
297 props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
298 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
299 props->max_srq = 1 << dev->mdev.caps.log_max_srq;
300 props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1;
301 props->max_srq_sge = max_rq_sg - 1;
302 props->max_fast_reg_page_list_len = (unsigned int)-1;
303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304 props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
305 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
306 props->masked_atomic_cap = IB_ATOMIC_HCA;
307 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
310 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
311 props->max_mcast_grp;
312 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
313
314out:
315 kfree(in_mad);
316 kfree(out_mad);
317
318 return err;
319}
320
321int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
322 struct ib_port_attr *props)
323{
324 struct mlx5_ib_dev *dev = to_mdev(ibdev);
325 struct ib_smp *in_mad = NULL;
326 struct ib_smp *out_mad = NULL;
327 int ext_active_speed;
328 int err = -ENOMEM;
329
330 if (port < 1 || port > dev->mdev.caps.num_ports) {
331 mlx5_ib_warn(dev, "invalid port number %d\n", port);
332 return -EINVAL;
333 }
334
335 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
336 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
337 if (!in_mad || !out_mad)
338 goto out;
339
340 memset(props, 0, sizeof(*props));
341
342 init_query_mad(in_mad);
343 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
344 in_mad->attr_mod = cpu_to_be32(port);
345
346 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
347 if (err) {
348 mlx5_ib_warn(dev, "err %d\n", err);
349 goto out;
350 }
351
352
353 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
354 props->lmc = out_mad->data[34] & 0x7;
355 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
356 props->sm_sl = out_mad->data[36] & 0xf;
357 props->state = out_mad->data[32] & 0xf;
358 props->phys_state = out_mad->data[33] >> 4;
359 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
360 props->gid_tbl_len = out_mad->data[50];
361 props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
362 props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
363 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
364 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
365 props->active_width = out_mad->data[31] & 0xf;
366 props->active_speed = out_mad->data[35] >> 4;
367 props->max_mtu = out_mad->data[41] & 0xf;
368 props->active_mtu = out_mad->data[36] >> 4;
369 props->subnet_timeout = out_mad->data[51] & 0x1f;
370 props->max_vl_num = out_mad->data[37] >> 4;
371 props->init_type_reply = out_mad->data[41] >> 4;
372
373 /* Check if extended speeds (EDR/FDR/...) are supported */
374 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
375 ext_active_speed = out_mad->data[62] >> 4;
376
377 switch (ext_active_speed) {
378 case 1:
379 props->active_speed = 16; /* FDR */
380 break;
381 case 2:
382 props->active_speed = 32; /* EDR */
383 break;
384 }
385 }
386
387 /* If reported active speed is QDR, check if is FDR-10 */
388 if (props->active_speed == 4) {
389 if (dev->mdev.caps.ext_port_cap[port - 1] &
390 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
391 init_query_mad(in_mad);
392 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
393 in_mad->attr_mod = cpu_to_be32(port);
394
395 err = mlx5_MAD_IFC(dev, 1, 1, port,
396 NULL, NULL, in_mad, out_mad);
397 if (err)
398 goto out;
399
400 /* Checking LinkSpeedActive for FDR-10 */
401 if (out_mad->data[15] & 0x1)
402 props->active_speed = 8;
403 }
404 }
405
406out:
407 kfree(in_mad);
408 kfree(out_mad);
409
410 return err;
411}
412
413static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
414 union ib_gid *gid)
415{
416 struct ib_smp *in_mad = NULL;
417 struct ib_smp *out_mad = NULL;
418 int err = -ENOMEM;
419
420 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
421 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
422 if (!in_mad || !out_mad)
423 goto out;
424
425 init_query_mad(in_mad);
426 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
427 in_mad->attr_mod = cpu_to_be32(port);
428
429 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
430 if (err)
431 goto out;
432
433 memcpy(gid->raw, out_mad->data + 8, 8);
434
435 init_query_mad(in_mad);
436 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
437 in_mad->attr_mod = cpu_to_be32(index / 8);
438
439 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
440 if (err)
441 goto out;
442
443 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
444
445out:
446 kfree(in_mad);
447 kfree(out_mad);
448 return err;
449}
450
451static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
452 u16 *pkey)
453{
454 struct ib_smp *in_mad = NULL;
455 struct ib_smp *out_mad = NULL;
456 int err = -ENOMEM;
457
458 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
459 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
460 if (!in_mad || !out_mad)
461 goto out;
462
463 init_query_mad(in_mad);
464 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
465 in_mad->attr_mod = cpu_to_be32(index / 32);
466
467 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
468 if (err)
469 goto out;
470
471 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
472
473out:
474 kfree(in_mad);
475 kfree(out_mad);
476 return err;
477}
478
479struct mlx5_reg_node_desc {
480 u8 desc[64];
481};
482
483static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
484 struct ib_device_modify *props)
485{
486 struct mlx5_ib_dev *dev = to_mdev(ibdev);
487 struct mlx5_reg_node_desc in;
488 struct mlx5_reg_node_desc out;
489 int err;
490
491 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
492 return -EOPNOTSUPP;
493
494 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
495 return 0;
496
497 /*
498 * If possible, pass node desc to FW, so it can generate
499 * a 144 trap. If cmd fails, just ignore.
500 */
501 memcpy(&in, props->node_desc, 64);
502 err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
503 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
504 if (err)
505 return err;
506
507 memcpy(ibdev->node_desc, props->node_desc, 64);
508
509 return err;
510}
511
512static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
513 struct ib_port_modify *props)
514{
515 struct mlx5_ib_dev *dev = to_mdev(ibdev);
516 struct ib_port_attr attr;
517 u32 tmp;
518 int err;
519
520 mutex_lock(&dev->cap_mask_mutex);
521
522 err = mlx5_ib_query_port(ibdev, port, &attr);
523 if (err)
524 goto out;
525
526 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
527 ~props->clr_port_cap_mask;
528
529 err = mlx5_set_port_caps(&dev->mdev, port, tmp);
530
531out:
532 mutex_unlock(&dev->cap_mask_mutex);
533 return err;
534}
535
536static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
537 struct ib_udata *udata)
538{
539 struct mlx5_ib_dev *dev = to_mdev(ibdev);
540 struct mlx5_ib_alloc_ucontext_req req;
541 struct mlx5_ib_alloc_ucontext_resp resp;
542 struct mlx5_ib_ucontext *context;
543 struct mlx5_uuar_info *uuari;
544 struct mlx5_uar *uars;
545 int num_uars;
546 int uuarn;
547 int err;
548 int i;
549
550 if (!dev->ib_active)
551 return ERR_PTR(-EAGAIN);
552
553 err = ib_copy_from_udata(&req, udata, sizeof(req));
554 if (err)
555 return ERR_PTR(err);
556
557 if (req.total_num_uuars > MLX5_MAX_UUARS)
558 return ERR_PTR(-ENOMEM);
559
560 if (req.total_num_uuars == 0)
561 return ERR_PTR(-EINVAL);
562
563 req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
564 if (req.num_low_latency_uuars > req.total_num_uuars - 1)
565 return ERR_PTR(-EINVAL);
566
567 num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
568 resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
569 resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
570 resp.cache_line_size = L1_CACHE_BYTES;
571 resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
572 resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
573 resp.max_send_wqebb = dev->mdev.caps.max_wqes;
574 resp.max_recv_wr = dev->mdev.caps.max_wqes;
575 resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
576
577 context = kzalloc(sizeof(*context), GFP_KERNEL);
578 if (!context)
579 return ERR_PTR(-ENOMEM);
580
581 uuari = &context->uuari;
582 mutex_init(&uuari->lock);
583 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
584 if (!uars) {
585 err = -ENOMEM;
586 goto out_ctx;
587 }
588
589 uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
590 sizeof(*uuari->bitmap),
591 GFP_KERNEL);
592 if (!uuari->bitmap) {
593 err = -ENOMEM;
594 goto out_uar_ctx;
595 }
596 /*
597 * clear all fast path uuars
598 */
599 for (i = 0; i < req.total_num_uuars; i++) {
600 uuarn = i & 3;
601 if (uuarn == 2 || uuarn == 3)
602 set_bit(i, uuari->bitmap);
603 }
604
605 uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
606 if (!uuari->count) {
607 err = -ENOMEM;
608 goto out_bitmap;
609 }
610
611 for (i = 0; i < num_uars; i++) {
612 err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
613 if (err)
614 goto out_count;
615 }
616
617 INIT_LIST_HEAD(&context->db_page_list);
618 mutex_init(&context->db_page_mutex);
619
620 resp.tot_uuars = req.total_num_uuars;
621 resp.num_ports = dev->mdev.caps.num_ports;
622 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
623 if (err)
624 goto out_uars;
625
626 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
627 uuari->uars = uars;
628 uuari->num_uars = num_uars;
629 return &context->ibucontext;
630
631out_uars:
632 for (i--; i >= 0; i--)
633 mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
634out_count:
635 kfree(uuari->count);
636
637out_bitmap:
638 kfree(uuari->bitmap);
639
640out_uar_ctx:
641 kfree(uars);
642
643out_ctx:
644 kfree(context);
645 return ERR_PTR(err);
646}
647
648static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
649{
650 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
651 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
652 struct mlx5_uuar_info *uuari = &context->uuari;
653 int i;
654
655 for (i = 0; i < uuari->num_uars; i++) {
656 if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
657 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
658 }
659
660 kfree(uuari->count);
661 kfree(uuari->bitmap);
662 kfree(uuari->uars);
663 kfree(context);
664
665 return 0;
666}
667
668static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
669{
670 return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
671}
672
673static int get_command(unsigned long offset)
674{
675 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
676}
677
678static int get_arg(unsigned long offset)
679{
680 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
681}
682
683static int get_index(unsigned long offset)
684{
685 return get_arg(offset);
686}
687
688static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
689{
690 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
691 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
692 struct mlx5_uuar_info *uuari = &context->uuari;
693 unsigned long command;
694 unsigned long idx;
695 phys_addr_t pfn;
696
697 command = get_command(vma->vm_pgoff);
698 switch (command) {
699 case MLX5_IB_MMAP_REGULAR_PAGE:
700 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
701 return -EINVAL;
702
703 idx = get_index(vma->vm_pgoff);
704 pfn = uar_index2pfn(dev, uuari->uars[idx].index);
705 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
706 (unsigned long long)pfn);
707
708 if (idx >= uuari->num_uars)
709 return -EINVAL;
710
711 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
712 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
713 PAGE_SIZE, vma->vm_page_prot))
714 return -EAGAIN;
715
716 mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
717 vma->vm_start,
718 (unsigned long long)pfn << PAGE_SHIFT);
719 break;
720
721 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
722 return -ENOSYS;
723
724 default:
725 return -EINVAL;
726 }
727
728 return 0;
729}
730
731static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
732{
733 struct mlx5_create_mkey_mbox_in *in;
734 struct mlx5_mkey_seg *seg;
735 struct mlx5_core_mr mr;
736 int err;
737
738 in = kzalloc(sizeof(*in), GFP_KERNEL);
739 if (!in)
740 return -ENOMEM;
741
742 seg = &in->seg;
743 seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
744 seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
745 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
746 seg->start_addr = 0;
747
748 err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in));
749 if (err) {
750 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
751 goto err_in;
752 }
753
754 kfree(in);
755 *key = mr.key;
756
757 return 0;
758
759err_in:
760 kfree(in);
761
762 return err;
763}
764
765static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
766{
767 struct mlx5_core_mr mr;
768 int err;
769
770 memset(&mr, 0, sizeof(mr));
771 mr.key = key;
772 err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
773 if (err)
774 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
775}
776
777static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
778 struct ib_ucontext *context,
779 struct ib_udata *udata)
780{
781 struct mlx5_ib_alloc_pd_resp resp;
782 struct mlx5_ib_pd *pd;
783 int err;
784
785 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
786 if (!pd)
787 return ERR_PTR(-ENOMEM);
788
789 err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
790 if (err) {
791 kfree(pd);
792 return ERR_PTR(err);
793 }
794
795 if (context) {
796 resp.pdn = pd->pdn;
797 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
798 mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
799 kfree(pd);
800 return ERR_PTR(-EFAULT);
801 }
802 } else {
803 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
804 if (err) {
805 mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
806 kfree(pd);
807 return ERR_PTR(err);
808 }
809 }
810
811 return &pd->ibpd;
812}
813
814static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
815{
816 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
817 struct mlx5_ib_pd *mpd = to_mpd(pd);
818
819 if (!pd->uobject)
820 free_pa_mkey(mdev, mpd->pa_lkey);
821
822 mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
823 kfree(mpd);
824
825 return 0;
826}
827
828static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
829{
830 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
831 int err;
832
833 err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
834 if (err)
835 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
836 ibqp->qp_num, gid->raw);
837
838 return err;
839}
840
841static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
842{
843 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
844 int err;
845
846 err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
847 if (err)
848 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
849 ibqp->qp_num, gid->raw);
850
851 return err;
852}
853
854static int init_node_data(struct mlx5_ib_dev *dev)
855{
856 struct ib_smp *in_mad = NULL;
857 struct ib_smp *out_mad = NULL;
858 int err = -ENOMEM;
859
860 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
861 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
862 if (!in_mad || !out_mad)
863 goto out;
864
865 init_query_mad(in_mad);
866 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
867
868 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
869 if (err)
870 goto out;
871
872 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
873
874 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
875
876 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
877 if (err)
878 goto out;
879
880 dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
881 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
882
883out:
884 kfree(in_mad);
885 kfree(out_mad);
886 return err;
887}
888
889static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
890 char *buf)
891{
892 struct mlx5_ib_dev *dev =
893 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
894
895 return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
896}
897
898static ssize_t show_reg_pages(struct device *device,
899 struct device_attribute *attr, char *buf)
900{
901 struct mlx5_ib_dev *dev =
902 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
903
904 return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
905}
906
907static ssize_t show_hca(struct device *device, struct device_attribute *attr,
908 char *buf)
909{
910 struct mlx5_ib_dev *dev =
911 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
912 return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
913}
914
915static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
916 char *buf)
917{
918 struct mlx5_ib_dev *dev =
919 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
920 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
921 fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
922}
923
924static ssize_t show_rev(struct device *device, struct device_attribute *attr,
925 char *buf)
926{
927 struct mlx5_ib_dev *dev =
928 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
929 return sprintf(buf, "%x\n", dev->mdev.rev_id);
930}
931
932static ssize_t show_board(struct device *device, struct device_attribute *attr,
933 char *buf)
934{
935 struct mlx5_ib_dev *dev =
936 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
937 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
938 dev->mdev.board_id);
939}
940
941static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
942static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
943static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
944static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
945static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
946static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
947
948static struct device_attribute *mlx5_class_attributes[] = {
949 &dev_attr_hw_rev,
950 &dev_attr_fw_ver,
951 &dev_attr_hca_type,
952 &dev_attr_board_id,
953 &dev_attr_fw_pages,
954 &dev_attr_reg_pages,
955};
956
957static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
958 void *data)
959{
960 struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
961 struct ib_event ibev;
962 u8 port = 0;
963
964 switch (event) {
965 case MLX5_DEV_EVENT_SYS_ERROR:
966 ibdev->ib_active = false;
967 ibev.event = IB_EVENT_DEVICE_FATAL;
968 break;
969
970 case MLX5_DEV_EVENT_PORT_UP:
971 ibev.event = IB_EVENT_PORT_ACTIVE;
972 port = *(u8 *)data;
973 break;
974
975 case MLX5_DEV_EVENT_PORT_DOWN:
976 ibev.event = IB_EVENT_PORT_ERR;
977 port = *(u8 *)data;
978 break;
979
980 case MLX5_DEV_EVENT_PORT_INITIALIZED:
981 /* not used by ULPs */
982 return;
983
984 case MLX5_DEV_EVENT_LID_CHANGE:
985 ibev.event = IB_EVENT_LID_CHANGE;
986 port = *(u8 *)data;
987 break;
988
989 case MLX5_DEV_EVENT_PKEY_CHANGE:
990 ibev.event = IB_EVENT_PKEY_CHANGE;
991 port = *(u8 *)data;
992 break;
993
994 case MLX5_DEV_EVENT_GUID_CHANGE:
995 ibev.event = IB_EVENT_GID_CHANGE;
996 port = *(u8 *)data;
997 break;
998
999 case MLX5_DEV_EVENT_CLIENT_REREG:
1000 ibev.event = IB_EVENT_CLIENT_REREGISTER;
1001 port = *(u8 *)data;
1002 break;
1003 }
1004
1005 ibev.device = &ibdev->ib_dev;
1006 ibev.element.port_num = port;
1007
1008 if (ibdev->ib_active)
1009 ib_dispatch_event(&ibev);
1010}
1011
1012static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1013{
1014 int port;
1015
1016 for (port = 1; port <= dev->mdev.caps.num_ports; port++)
1017 mlx5_query_ext_port_caps(dev, port);
1018}
1019
1020static int get_port_caps(struct mlx5_ib_dev *dev)
1021{
1022 struct ib_device_attr *dprops = NULL;
1023 struct ib_port_attr *pprops = NULL;
1024 int err = 0;
1025 int port;
1026
1027 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
1028 if (!pprops)
1029 goto out;
1030
1031 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
1032 if (!dprops)
1033 goto out;
1034
1035 err = mlx5_ib_query_device(&dev->ib_dev, dprops);
1036 if (err) {
1037 mlx5_ib_warn(dev, "query_device failed %d\n", err);
1038 goto out;
1039 }
1040
1041 for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
1042 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1043 if (err) {
1044 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
1045 break;
1046 }
1047 dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
1048 dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
1049 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1050 dprops->max_pkeys, pprops->gid_tbl_len);
1051 }
1052
1053out:
1054 kfree(pprops);
1055 kfree(dprops);
1056
1057 return err;
1058}
1059
1060static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1061{
1062 int err;
1063
1064 err = mlx5_mr_cache_cleanup(dev);
1065 if (err)
1066 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1067
1068 mlx5_ib_destroy_qp(dev->umrc.qp);
1069 ib_destroy_cq(dev->umrc.cq);
1070 ib_dereg_mr(dev->umrc.mr);
1071 ib_dealloc_pd(dev->umrc.pd);
1072}
1073
1074enum {
1075 MAX_UMR_WR = 128,
1076};
1077
1078static int create_umr_res(struct mlx5_ib_dev *dev)
1079{
1080 struct ib_qp_init_attr *init_attr = NULL;
1081 struct ib_qp_attr *attr = NULL;
1082 struct ib_pd *pd;
1083 struct ib_cq *cq;
1084 struct ib_qp *qp;
1085 struct ib_mr *mr;
1086 int ret;
1087
1088 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1089 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1090 if (!attr || !init_attr) {
1091 ret = -ENOMEM;
1092 goto error_0;
1093 }
1094
1095 pd = ib_alloc_pd(&dev->ib_dev);
1096 if (IS_ERR(pd)) {
1097 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
1098 ret = PTR_ERR(pd);
1099 goto error_0;
1100 }
1101
1102 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
1103 if (IS_ERR(mr)) {
1104 mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n");
1105 ret = PTR_ERR(mr);
1106 goto error_1;
1107 }
1108
1109 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128,
1110 0);
1111 if (IS_ERR(cq)) {
1112 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1113 ret = PTR_ERR(cq);
1114 goto error_2;
1115 }
1116 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1117
1118 init_attr->send_cq = cq;
1119 init_attr->recv_cq = cq;
1120 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1121 init_attr->cap.max_send_wr = MAX_UMR_WR;
1122 init_attr->cap.max_send_sge = 1;
1123 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
1124 init_attr->port_num = 1;
1125 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
1126 if (IS_ERR(qp)) {
1127 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
1128 ret = PTR_ERR(qp);
1129 goto error_3;
1130 }
1131 qp->device = &dev->ib_dev;
1132 qp->real_qp = qp;
1133 qp->uobject = NULL;
1134 qp->qp_type = MLX5_IB_QPT_REG_UMR;
1135
1136 attr->qp_state = IB_QPS_INIT;
1137 attr->port_num = 1;
1138 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
1139 IB_QP_PORT, NULL);
1140 if (ret) {
1141 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
1142 goto error_4;
1143 }
1144
1145 memset(attr, 0, sizeof(*attr));
1146 attr->qp_state = IB_QPS_RTR;
1147 attr->path_mtu = IB_MTU_256;
1148
1149 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1150 if (ret) {
1151 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
1152 goto error_4;
1153 }
1154
1155 memset(attr, 0, sizeof(*attr));
1156 attr->qp_state = IB_QPS_RTS;
1157 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1158 if (ret) {
1159 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
1160 goto error_4;
1161 }
1162
1163 dev->umrc.qp = qp;
1164 dev->umrc.cq = cq;
1165 dev->umrc.mr = mr;
1166 dev->umrc.pd = pd;
1167
1168 sema_init(&dev->umrc.sem, MAX_UMR_WR);
1169 ret = mlx5_mr_cache_init(dev);
1170 if (ret) {
1171 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
1172 goto error_4;
1173 }
1174
1175 kfree(attr);
1176 kfree(init_attr);
1177
1178 return 0;
1179
1180error_4:
1181 mlx5_ib_destroy_qp(qp);
1182
1183error_3:
1184 ib_destroy_cq(cq);
1185
1186error_2:
1187 ib_dereg_mr(mr);
1188
1189error_1:
1190 ib_dealloc_pd(pd);
1191
1192error_0:
1193 kfree(attr);
1194 kfree(init_attr);
1195 return ret;
1196}
1197
1198static int create_dev_resources(struct mlx5_ib_resources *devr)
1199{
1200 struct ib_srq_init_attr attr;
1201 struct mlx5_ib_dev *dev;
1202 int ret = 0;
1203
1204 dev = container_of(devr, struct mlx5_ib_dev, devr);
1205
1206 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1207 if (IS_ERR(devr->p0)) {
1208 ret = PTR_ERR(devr->p0);
1209 goto error0;
1210 }
1211 devr->p0->device = &dev->ib_dev;
1212 devr->p0->uobject = NULL;
1213 atomic_set(&devr->p0->usecnt, 0);
1214
1215 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL);
1216 if (IS_ERR(devr->c0)) {
1217 ret = PTR_ERR(devr->c0);
1218 goto error1;
1219 }
1220 devr->c0->device = &dev->ib_dev;
1221 devr->c0->uobject = NULL;
1222 devr->c0->comp_handler = NULL;
1223 devr->c0->event_handler = NULL;
1224 devr->c0->cq_context = NULL;
1225 atomic_set(&devr->c0->usecnt, 0);
1226
1227 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1228 if (IS_ERR(devr->x0)) {
1229 ret = PTR_ERR(devr->x0);
1230 goto error2;
1231 }
1232 devr->x0->device = &dev->ib_dev;
1233 devr->x0->inode = NULL;
1234 atomic_set(&devr->x0->usecnt, 0);
1235 mutex_init(&devr->x0->tgt_qp_mutex);
1236 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
1237
1238 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1239 if (IS_ERR(devr->x1)) {
1240 ret = PTR_ERR(devr->x1);
1241 goto error3;
1242 }
1243 devr->x1->device = &dev->ib_dev;
1244 devr->x1->inode = NULL;
1245 atomic_set(&devr->x1->usecnt, 0);
1246 mutex_init(&devr->x1->tgt_qp_mutex);
1247 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
1248
1249 memset(&attr, 0, sizeof(attr));
1250 attr.attr.max_sge = 1;
1251 attr.attr.max_wr = 1;
1252 attr.srq_type = IB_SRQT_XRC;
1253 attr.ext.xrc.cq = devr->c0;
1254 attr.ext.xrc.xrcd = devr->x0;
1255
1256 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1257 if (IS_ERR(devr->s0)) {
1258 ret = PTR_ERR(devr->s0);
1259 goto error4;
1260 }
1261 devr->s0->device = &dev->ib_dev;
1262 devr->s0->pd = devr->p0;
1263 devr->s0->uobject = NULL;
1264 devr->s0->event_handler = NULL;
1265 devr->s0->srq_context = NULL;
1266 devr->s0->srq_type = IB_SRQT_XRC;
1267 devr->s0->ext.xrc.xrcd = devr->x0;
1268 devr->s0->ext.xrc.cq = devr->c0;
1269 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
1270 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
1271 atomic_inc(&devr->p0->usecnt);
1272 atomic_set(&devr->s0->usecnt, 0);
1273
1274 return 0;
1275
1276error4:
1277 mlx5_ib_dealloc_xrcd(devr->x1);
1278error3:
1279 mlx5_ib_dealloc_xrcd(devr->x0);
1280error2:
1281 mlx5_ib_destroy_cq(devr->c0);
1282error1:
1283 mlx5_ib_dealloc_pd(devr->p0);
1284error0:
1285 return ret;
1286}
1287
1288static void destroy_dev_resources(struct mlx5_ib_resources *devr)
1289{
1290 mlx5_ib_destroy_srq(devr->s0);
1291 mlx5_ib_dealloc_xrcd(devr->x0);
1292 mlx5_ib_dealloc_xrcd(devr->x1);
1293 mlx5_ib_destroy_cq(devr->c0);
1294 mlx5_ib_dealloc_pd(devr->p0);
1295}
1296
1297static int init_one(struct pci_dev *pdev,
1298 const struct pci_device_id *id)
1299{
1300 struct mlx5_core_dev *mdev;
1301 struct mlx5_ib_dev *dev;
1302 int err;
1303 int i;
1304
1305 printk_once(KERN_INFO "%s", mlx5_version);
1306
1307 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
1308 if (!dev)
1309 return -ENOMEM;
1310
1311 mdev = &dev->mdev;
1312 mdev->event = mlx5_ib_event;
1313 if (prof_sel >= ARRAY_SIZE(profile)) {
1314 pr_warn("selected pofile out of range, selceting default\n");
1315 prof_sel = 0;
1316 }
1317 mdev->profile = &profile[prof_sel];
1318 err = mlx5_dev_init(mdev, pdev);
1319 if (err)
1320 goto err_free;
1321
1322 err = get_port_caps(dev);
1323 if (err)
1324 goto err_cleanup;
1325
1326 get_ext_port_caps(dev);
1327
1328 err = alloc_comp_eqs(dev);
1329 if (err)
1330 goto err_cleanup;
1331
1332 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1333
1334 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1335 dev->ib_dev.owner = THIS_MODULE;
1336 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1337 dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey;
1338 dev->num_ports = mdev->caps.num_ports;
1339 dev->ib_dev.phys_port_cnt = dev->num_ports;
1340 dev->ib_dev.num_comp_vectors = dev->num_comp_vectors;
1341 dev->ib_dev.dma_device = &mdev->pdev->dev;
1342
1343 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
1344 dev->ib_dev.uverbs_cmd_mask =
1345 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1346 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1347 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1348 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1349 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1350 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1351 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1352 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1353 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1354 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1355 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1356 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1357 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1358 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1359 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1360 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1361 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1362 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1363 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1364 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1365 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1366 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1367 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1368
1369 dev->ib_dev.query_device = mlx5_ib_query_device;
1370 dev->ib_dev.query_port = mlx5_ib_query_port;
1371 dev->ib_dev.query_gid = mlx5_ib_query_gid;
1372 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
1373 dev->ib_dev.modify_device = mlx5_ib_modify_device;
1374 dev->ib_dev.modify_port = mlx5_ib_modify_port;
1375 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
1376 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
1377 dev->ib_dev.mmap = mlx5_ib_mmap;
1378 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
1379 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
1380 dev->ib_dev.create_ah = mlx5_ib_create_ah;
1381 dev->ib_dev.query_ah = mlx5_ib_query_ah;
1382 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
1383 dev->ib_dev.create_srq = mlx5_ib_create_srq;
1384 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
1385 dev->ib_dev.query_srq = mlx5_ib_query_srq;
1386 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
1387 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
1388 dev->ib_dev.create_qp = mlx5_ib_create_qp;
1389 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
1390 dev->ib_dev.query_qp = mlx5_ib_query_qp;
1391 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
1392 dev->ib_dev.post_send = mlx5_ib_post_send;
1393 dev->ib_dev.post_recv = mlx5_ib_post_recv;
1394 dev->ib_dev.create_cq = mlx5_ib_create_cq;
1395 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
1396 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
1397 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
1398 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
1399 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
1400 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
1401 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
1402 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
1403 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
1404 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
1405 dev->ib_dev.process_mad = mlx5_ib_process_mad;
1406 dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
1407 dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
1408 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
1409
1410 if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
1411 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1412 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1413 dev->ib_dev.uverbs_cmd_mask |=
1414 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
1415 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1416 }
1417
1418 err = init_node_data(dev);
1419 if (err)
1420 goto err_eqs;
1421
1422 mutex_init(&dev->cap_mask_mutex);
1423 spin_lock_init(&dev->mr_lock);
1424
1425 err = create_dev_resources(&dev->devr);
1426 if (err)
1427 goto err_eqs;
1428
1429 if (ib_register_device(&dev->ib_dev, NULL))
1430 goto err_rsrc;
1431
1432 err = create_umr_res(dev);
1433 if (err)
1434 goto err_dev;
1435
1436 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1437 if (device_create_file(&dev->ib_dev.dev,
1438 mlx5_class_attributes[i]))
1439 goto err_umrc;
1440 }
1441
1442 dev->ib_active = true;
1443
1444 return 0;
1445
1446err_umrc:
1447 destroy_umrc_res(dev);
1448
1449err_dev:
1450 ib_unregister_device(&dev->ib_dev);
1451
1452err_rsrc:
1453 destroy_dev_resources(&dev->devr);
1454
1455err_eqs:
1456 free_comp_eqs(dev);
1457
1458err_cleanup:
1459 mlx5_dev_cleanup(mdev);
1460
1461err_free:
1462 ib_dealloc_device((struct ib_device *)dev);
1463
1464 return err;
1465}
1466
1467static void remove_one(struct pci_dev *pdev)
1468{
1469 struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
1470
1471 destroy_umrc_res(dev);
1472 ib_unregister_device(&dev->ib_dev);
1473 destroy_dev_resources(&dev->devr);
1474 free_comp_eqs(dev);
1475 mlx5_dev_cleanup(&dev->mdev);
1476 ib_dealloc_device(&dev->ib_dev);
1477}
1478
1479static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
1480 { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
1481 { 0, }
1482};
1483
1484MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
1485
1486static struct pci_driver mlx5_ib_driver = {
1487 .name = DRIVER_NAME,
1488 .id_table = mlx5_ib_pci_table,
1489 .probe = init_one,
1490 .remove = remove_one
1491};
1492
1493static int __init mlx5_ib_init(void)
1494{
1495 return pci_register_driver(&mlx5_ib_driver);
1496}
1497
1498static void __exit mlx5_ib_cleanup(void)
1499{
1500 pci_unregister_driver(&mlx5_ib_driver);
1501}
1502
1503module_init(mlx5_ib_init);
1504module_exit(mlx5_ib_cleanup);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
new file mode 100644
index 000000000000..3a5322870b96
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
35#include "mlx5_ib.h"
36
37/* @umem: umem object to scan
38 * @addr: ib virtual address requested by the user
39 * @count: number of PAGE_SIZE pages covered by umem
40 * @shift: page shift for the compound pages found in the region
41 * @ncont: number of compund pages
42 * @order: log2 of the number of compound pages
43 */
44void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
45 int *ncont, int *order)
46{
47 struct ib_umem_chunk *chunk;
48 unsigned long tmp;
49 unsigned long m;
50 int i, j, k;
51 u64 base = 0;
52 int p = 0;
53 int skip;
54 int mask;
55 u64 len;
56 u64 pfn;
57
58 addr = addr >> PAGE_SHIFT;
59 tmp = (unsigned long)addr;
60 m = find_first_bit(&tmp, sizeof(tmp));
61 skip = 1 << m;
62 mask = skip - 1;
63 i = 0;
64 list_for_each_entry(chunk, &umem->chunk_list, list)
65 for (j = 0; j < chunk->nmap; j++) {
66 len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
67 pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT;
68 for (k = 0; k < len; k++) {
69 if (!(i & mask)) {
70 tmp = (unsigned long)pfn;
71 m = min(m, find_first_bit(&tmp, sizeof(tmp)));
72 skip = 1 << m;
73 mask = skip - 1;
74 base = pfn;
75 p = 0;
76 } else {
77 if (base + p != pfn) {
78 tmp = (unsigned long)p;
79 m = find_first_bit(&tmp, sizeof(tmp));
80 skip = 1 << m;
81 mask = skip - 1;
82 base = pfn;
83 p = 0;
84 }
85 }
86 p++;
87 i++;
88 }
89 }
90
91 if (i) {
92 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
93
94 if (order)
95 *order = ilog2(roundup_pow_of_two(i) >> m);
96
97 *ncont = DIV_ROUND_UP(i, (1 << m));
98 } else {
99 m = 0;
100
101 if (order)
102 *order = 0;
103
104 *ncont = 0;
105 }
106 *shift = PAGE_SHIFT + m;
107 *count = i;
108}
109
110void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
111 int page_shift, __be64 *pas, int umr)
112{
113 int shift = page_shift - PAGE_SHIFT;
114 int mask = (1 << shift) - 1;
115 struct ib_umem_chunk *chunk;
116 int i, j, k;
117 u64 cur = 0;
118 u64 base;
119 int len;
120
121 i = 0;
122 list_for_each_entry(chunk, &umem->chunk_list, list)
123 for (j = 0; j < chunk->nmap; j++) {
124 len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
125 base = sg_dma_address(&chunk->page_list[j]);
126 for (k = 0; k < len; k++) {
127 if (!(i & mask)) {
128 cur = base + (k << PAGE_SHIFT);
129 if (umr)
130 cur |= 3;
131
132 pas[i >> shift] = cpu_to_be64(cur);
133 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
134 i >> shift, be64_to_cpu(pas[i >> shift]));
135 } else
136 mlx5_ib_dbg(dev, "=====> 0x%llx\n",
137 base + (k << PAGE_SHIFT));
138 i++;
139 }
140 }
141}
142
143int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
144{
145 u64 page_size;
146 u64 page_mask;
147 u64 off_size;
148 u64 off_mask;
149 u64 buf_off;
150
151 page_size = 1 << page_shift;
152 page_mask = page_size - 1;
153 buf_off = addr & page_mask;
154 off_size = page_size >> 6;
155 off_mask = off_size - 1;
156
157 if (buf_off & off_mask)
158 return -EINVAL;
159
160 *offset = buf_off >> ilog2(off_size);
161 return 0;
162}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
new file mode 100644
index 000000000000..836be9157242
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -0,0 +1,545 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
45
46#define mlx5_ib_dbg(dev, format, arg...) \
47pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
48 __LINE__, current->pid, ##arg)
49
50#define mlx5_ib_err(dev, format, arg...) \
51pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
53
54#define mlx5_ib_warn(dev, format, arg...) \
55pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
57
58enum {
59 MLX5_IB_MMAP_CMD_SHIFT = 8,
60 MLX5_IB_MMAP_CMD_MASK = 0xff,
61};
62
63enum mlx5_ib_mmap_cmd {
64 MLX5_IB_MMAP_REGULAR_PAGE = 0,
65 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */
66};
67
68enum {
69 MLX5_RES_SCAT_DATA32_CQE = 0x1,
70 MLX5_RES_SCAT_DATA64_CQE = 0x2,
71 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
72 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
73};
74
75enum mlx5_ib_latency_class {
76 MLX5_IB_LATENCY_CLASS_LOW,
77 MLX5_IB_LATENCY_CLASS_MEDIUM,
78 MLX5_IB_LATENCY_CLASS_HIGH,
79 MLX5_IB_LATENCY_CLASS_FAST_PATH
80};
81
82enum mlx5_ib_mad_ifc_flags {
83 MLX5_MAD_IFC_IGNORE_MKEY = 1,
84 MLX5_MAD_IFC_IGNORE_BKEY = 2,
85 MLX5_MAD_IFC_NET_VIEW = 4,
86};
87
88struct mlx5_ib_ucontext {
89 struct ib_ucontext ibucontext;
90 struct list_head db_page_list;
91
92 /* protect doorbell record alloc/free
93 */
94 struct mutex db_page_mutex;
95 struct mlx5_uuar_info uuari;
96};
97
98static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
99{
100 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
101}
102
103struct mlx5_ib_pd {
104 struct ib_pd ibpd;
105 u32 pdn;
106 u32 pa_lkey;
107};
108
109/* Use macros here so that don't have to duplicate
110 * enum ib_send_flags and enum ib_qp_type for low-level driver
111 */
112
113#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
114#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
115#define MLX5_IB_WR_UMR IB_WR_RESERVED1
116
117struct wr_list {
118 u16 opcode;
119 u16 next;
120};
121
122struct mlx5_ib_wq {
123 u64 *wrid;
124 u32 *wr_data;
125 struct wr_list *w_list;
126 unsigned *wqe_head;
127 u16 unsig_count;
128
129 /* serialize post to the work queue
130 */
131 spinlock_t lock;
132 int wqe_cnt;
133 int max_post;
134 int max_gs;
135 int offset;
136 int wqe_shift;
137 unsigned head;
138 unsigned tail;
139 u16 cur_post;
140 u16 last_poll;
141 void *qend;
142};
143
144enum {
145 MLX5_QP_USER,
146 MLX5_QP_KERNEL,
147 MLX5_QP_EMPTY
148};
149
150struct mlx5_ib_qp {
151 struct ib_qp ibqp;
152 struct mlx5_core_qp mqp;
153 struct mlx5_buf buf;
154
155 struct mlx5_db db;
156 struct mlx5_ib_wq rq;
157
158 u32 doorbell_qpn;
159 u8 sq_signal_bits;
160 u8 fm_cache;
161 int sq_max_wqes_per_wr;
162 int sq_spare_wqes;
163 struct mlx5_ib_wq sq;
164
165 struct ib_umem *umem;
166 int buf_size;
167
168 /* serialize qp state modifications
169 */
170 struct mutex mutex;
171 u16 xrcdn;
172 u32 flags;
173 u8 port;
174 u8 alt_port;
175 u8 atomic_rd_en;
176 u8 resp_depth;
177 u8 state;
178 int mlx_type;
179 int wq_sig;
180 int scat_cqe;
181 int max_inline_data;
182 struct mlx5_bf *bf;
183 int has_rq;
184
185 /* only for user space QPs. For kernel
186 * we have it from the bf object
187 */
188 int uuarn;
189
190 int create_type;
191 u32 pa_lkey;
192};
193
194struct mlx5_ib_cq_buf {
195 struct mlx5_buf buf;
196 struct ib_umem *umem;
197 int cqe_size;
198};
199
200enum mlx5_ib_qp_flags {
201 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
202 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
203};
204
205struct mlx5_shared_mr_info {
206 int mr_id;
207 struct ib_umem *umem;
208};
209
210struct mlx5_ib_cq {
211 struct ib_cq ibcq;
212 struct mlx5_core_cq mcq;
213 struct mlx5_ib_cq_buf buf;
214 struct mlx5_db db;
215
216 /* serialize access to the CQ
217 */
218 spinlock_t lock;
219
220 /* protect resize cq
221 */
222 struct mutex resize_mutex;
223 struct mlx5_ib_cq_resize *resize_buf;
224 struct ib_umem *resize_umem;
225 int cqe_size;
226};
227
228struct mlx5_ib_srq {
229 struct ib_srq ibsrq;
230 struct mlx5_core_srq msrq;
231 struct mlx5_buf buf;
232 struct mlx5_db db;
233 u64 *wrid;
234 /* protect SRQ hanlding
235 */
236 spinlock_t lock;
237 int head;
238 int tail;
239 u16 wqe_ctr;
240 struct ib_umem *umem;
241 /* serialize arming a SRQ
242 */
243 struct mutex mutex;
244 int wq_sig;
245};
246
247struct mlx5_ib_xrcd {
248 struct ib_xrcd ibxrcd;
249 u32 xrcdn;
250};
251
252struct mlx5_ib_mr {
253 struct ib_mr ibmr;
254 struct mlx5_core_mr mmr;
255 struct ib_umem *umem;
256 struct mlx5_shared_mr_info *smr_info;
257 struct list_head list;
258 int order;
259 int umred;
260 __be64 *pas;
261 dma_addr_t dma;
262 int npages;
263 struct completion done;
264 enum ib_wc_status status;
265};
266
267struct mlx5_ib_fast_reg_page_list {
268 struct ib_fast_reg_page_list ibfrpl;
269 __be64 *mapped_page_list;
270 dma_addr_t map;
271};
272
273struct umr_common {
274 struct ib_pd *pd;
275 struct ib_cq *cq;
276 struct ib_qp *qp;
277 struct ib_mr *mr;
278 /* control access to UMR QP
279 */
280 struct semaphore sem;
281};
282
283enum {
284 MLX5_FMR_INVALID,
285 MLX5_FMR_VALID,
286 MLX5_FMR_BUSY,
287};
288
289struct mlx5_ib_fmr {
290 struct ib_fmr ibfmr;
291 struct mlx5_core_mr mr;
292 int access_flags;
293 int state;
294 /* protect fmr state
295 */
296 spinlock_t lock;
297 u64 wrid;
298 struct ib_send_wr wr[2];
299 u8 page_shift;
300 struct ib_fast_reg_page_list page_list;
301};
302
303struct mlx5_cache_ent {
304 struct list_head head;
305 /* sync access to the cahce entry
306 */
307 spinlock_t lock;
308
309
310 struct dentry *dir;
311 char name[4];
312 u32 order;
313 u32 size;
314 u32 cur;
315 u32 miss;
316 u32 limit;
317
318 struct dentry *fsize;
319 struct dentry *fcur;
320 struct dentry *fmiss;
321 struct dentry *flimit;
322
323 struct mlx5_ib_dev *dev;
324 struct work_struct work;
325 struct delayed_work dwork;
326};
327
328struct mlx5_mr_cache {
329 struct workqueue_struct *wq;
330 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
331 int stopped;
332 struct dentry *root;
333 unsigned long last_add;
334};
335
336struct mlx5_ib_resources {
337 struct ib_cq *c0;
338 struct ib_xrcd *x0;
339 struct ib_xrcd *x1;
340 struct ib_pd *p0;
341 struct ib_srq *s0;
342};
343
344struct mlx5_ib_dev {
345 struct ib_device ib_dev;
346 struct mlx5_core_dev mdev;
347 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
348 struct list_head eqs_list;
349 int num_ports;
350 int num_comp_vectors;
351 /* serialize update of capability mask
352 */
353 struct mutex cap_mask_mutex;
354 bool ib_active;
355 struct umr_common umrc;
356 /* sync used page count stats
357 */
358 spinlock_t mr_lock;
359 struct mlx5_ib_resources devr;
360 struct mlx5_mr_cache cache;
361};
362
363static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
364{
365 return container_of(mcq, struct mlx5_ib_cq, mcq);
366}
367
368static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
369{
370 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
371}
372
373static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
374{
375 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
376}
377
378static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
379{
380 return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
381}
382
383static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
384{
385 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
386}
387
388static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
389{
390 return container_of(mqp, struct mlx5_ib_qp, mqp);
391}
392
393static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
394{
395 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
396}
397
398static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
399{
400 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
401}
402
403static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
404{
405 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
406}
407
408static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
409{
410 return container_of(msrq, struct mlx5_ib_srq, msrq);
411}
412
413static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
414{
415 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
416}
417
418static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
419{
420 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
421}
422
423struct mlx5_ib_ah {
424 struct ib_ah ibah;
425 struct mlx5_av av;
426};
427
428static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
429{
430 return container_of(ibah, struct mlx5_ib_ah, ibah);
431}
432
433static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
434{
435 return container_of(dev, struct mlx5_ib_dev, mdev);
436}
437
438static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
439{
440 return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
441}
442
443int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
444 struct mlx5_db *db);
445void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
446void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
447void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
448void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
449int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
450 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
451 void *in_mad, void *response_mad);
452struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
453 struct mlx5_ib_ah *ah);
454struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
455int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
456int mlx5_ib_destroy_ah(struct ib_ah *ah);
457struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
458 struct ib_srq_init_attr *init_attr,
459 struct ib_udata *udata);
460int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
461 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
462int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
463int mlx5_ib_destroy_srq(struct ib_srq *srq);
464int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
465 struct ib_recv_wr **bad_wr);
466struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
467 struct ib_qp_init_attr *init_attr,
468 struct ib_udata *udata);
469int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
470 int attr_mask, struct ib_udata *udata);
471int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
472 struct ib_qp_init_attr *qp_init_attr);
473int mlx5_ib_destroy_qp(struct ib_qp *qp);
474int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
475 struct ib_send_wr **bad_wr);
476int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
477 struct ib_recv_wr **bad_wr);
478void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
479struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
480 int vector, struct ib_ucontext *context,
481 struct ib_udata *udata);
482int mlx5_ib_destroy_cq(struct ib_cq *cq);
483int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
484int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
485int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
486int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
487struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
488struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
489 u64 virt_addr, int access_flags,
490 struct ib_udata *udata);
491int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
492struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
493 int max_page_list_len);
494struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
495 int page_list_len);
496void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
497struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
498 struct ib_fmr_attr *fmr_attr);
499int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
500 int npages, u64 iova);
501int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
502int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
503int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
504 struct ib_wc *in_wc, struct ib_grh *in_grh,
505 struct ib_mad *in_mad, struct ib_mad *out_mad);
506struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
507 struct ib_ucontext *context,
508 struct ib_udata *udata);
509int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
510int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
511int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
512int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
513int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
514 struct ib_port_attr *props);
515int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
516void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
517void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
518 int *ncont, int *order);
519void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
520 int page_shift, __be64 *pas, int umr);
521void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
522int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
523int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
524int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
525int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
526void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
527
528static inline void init_query_mad(struct ib_smp *mad)
529{
530 mad->base_version = 1;
531 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
532 mad->class_version = 1;
533 mad->method = IB_MGMT_METHOD_GET;
534}
535
536static inline u8 convert_access(int acc)
537{
538 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
539 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
540 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
541 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
542 MLX5_PERM_LOCAL_READ;
543}
544
545#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
new file mode 100644
index 000000000000..e2daa8f02476
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -0,0 +1,1007 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
38#include <rdma/ib_umem.h>
39#include "mlx5_ib.h"
40
41enum {
42 DEF_CACHE_SIZE = 10,
43};
44
45static __be64 *mr_align(__be64 *ptr, int align)
46{
47 unsigned long mask = align - 1;
48
49 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
50}
51
52static int order2idx(struct mlx5_ib_dev *dev, int order)
53{
54 struct mlx5_mr_cache *cache = &dev->cache;
55
56 if (order < cache->ent[0].order)
57 return 0;
58 else
59 return order - cache->ent[0].order;
60}
61
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0;
72 int i;
73
74 in = kzalloc(sizeof(*in), GFP_KERNEL);
75 if (!in)
76 return -ENOMEM;
77
78 for (i = 0; i < num; i++) {
79 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
80 if (!mr) {
81 err = -ENOMEM;
82 goto out;
83 }
84 mr->order = ent->order;
85 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
104 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
105 in->seg.log2_page_size = 12;
106
107 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
108 sizeof(*in));
109 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr);
114 goto out;
115 }
116 cache->last_add = jiffies;
117
118 spin_lock(&ent->lock);
119 list_add_tail(&mr->list, &ent->head);
120 ent->cur++;
121 ent->size++;
122 spin_unlock(&ent->lock);
123 }
124
125out:
126 kfree(in);
127 return err;
128}
129
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr;
136 int size;
137 int err;
138 int i;
139
140 for (i = 0; i < num; i++) {
141 spin_lock(&ent->lock);
142 if (list_empty(&ent->head)) {
143 spin_unlock(&ent->lock);
144 return;
145 }
146 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
147 list_del(&mr->list);
148 ent->cur--;
149 ent->size--;
150 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) {
153 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else {
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr);
159 }
160 }
161}
162
163static ssize_t size_write(struct file *filp, const char __user *buf,
164 size_t count, loff_t *pos)
165{
166 struct mlx5_cache_ent *ent = filp->private_data;
167 struct mlx5_ib_dev *dev = ent->dev;
168 char lbuf[20];
169 u32 var;
170 int err;
171 int c;
172
173 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
174 return -EPERM;
175
176 c = order2idx(dev, ent->order);
177 lbuf[sizeof(lbuf) - 1] = 0;
178
179 if (sscanf(lbuf, "%u", &var) != 1)
180 return -EINVAL;
181
182 if (var < ent->limit)
183 return -EINVAL;
184
185 if (var > ent->size) {
186 err = add_keys(dev, c, var - ent->size);
187 if (err)
188 return err;
189 } else if (var < ent->size) {
190 remove_keys(dev, c, ent->size - var);
191 }
192
193 return count;
194}
195
196static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
197 loff_t *pos)
198{
199 struct mlx5_cache_ent *ent = filp->private_data;
200 char lbuf[20];
201 int err;
202
203 if (*pos)
204 return 0;
205
206 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
207 if (err < 0)
208 return err;
209
210 if (copy_to_user(buf, lbuf, err))
211 return -EPERM;
212
213 *pos += err;
214
215 return err;
216}
217
218static const struct file_operations size_fops = {
219 .owner = THIS_MODULE,
220 .open = simple_open,
221 .write = size_write,
222 .read = size_read,
223};
224
225static ssize_t limit_write(struct file *filp, const char __user *buf,
226 size_t count, loff_t *pos)
227{
228 struct mlx5_cache_ent *ent = filp->private_data;
229 struct mlx5_ib_dev *dev = ent->dev;
230 char lbuf[20];
231 u32 var;
232 int err;
233 int c;
234
235 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
236 return -EPERM;
237
238 c = order2idx(dev, ent->order);
239 lbuf[sizeof(lbuf) - 1] = 0;
240
241 if (sscanf(lbuf, "%u", &var) != 1)
242 return -EINVAL;
243
244 if (var > ent->size)
245 return -EINVAL;
246
247 ent->limit = var;
248
249 if (ent->cur < ent->limit) {
250 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
251 if (err)
252 return err;
253 }
254
255 return count;
256}
257
258static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
259 loff_t *pos)
260{
261 struct mlx5_cache_ent *ent = filp->private_data;
262 char lbuf[20];
263 int err;
264
265 if (*pos)
266 return 0;
267
268 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
269 if (err < 0)
270 return err;
271
272 if (copy_to_user(buf, lbuf, err))
273 return -EPERM;
274
275 *pos += err;
276
277 return err;
278}
279
280static const struct file_operations limit_fops = {
281 .owner = THIS_MODULE,
282 .open = simple_open,
283 .write = limit_write,
284 .read = limit_read,
285};
286
287static int someone_adding(struct mlx5_mr_cache *cache)
288{
289 int i;
290
291 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
292 if (cache->ent[i].cur < cache->ent[i].limit)
293 return 1;
294 }
295
296 return 0;
297}
298
299static void __cache_work_func(struct mlx5_cache_ent *ent)
300{
301 struct mlx5_ib_dev *dev = ent->dev;
302 struct mlx5_mr_cache *cache = &dev->cache;
303 int i = order2idx(dev, ent->order);
304
305 if (cache->stopped)
306 return;
307
308 ent = &dev->cache.ent[i];
309 if (ent->cur < 2 * ent->limit) {
310 add_keys(dev, i, 1);
311 if (ent->cur < 2 * ent->limit)
312 queue_work(cache->wq, &ent->work);
313 } else if (ent->cur > 2 * ent->limit) {
314 if (!someone_adding(cache) &&
315 time_after(jiffies, cache->last_add + 60 * HZ)) {
316 remove_keys(dev, i, 1);
317 if (ent->cur > ent->limit)
318 queue_work(cache->wq, &ent->work);
319 } else {
320 queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ);
321 }
322 }
323}
324
325static void delayed_cache_work_func(struct work_struct *work)
326{
327 struct mlx5_cache_ent *ent;
328
329 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
330 __cache_work_func(ent);
331}
332
333static void cache_work_func(struct work_struct *work)
334{
335 struct mlx5_cache_ent *ent;
336
337 ent = container_of(work, struct mlx5_cache_ent, work);
338 __cache_work_func(ent);
339}
340
341static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
342{
343 struct mlx5_mr_cache *cache = &dev->cache;
344 struct mlx5_ib_mr *mr = NULL;
345 struct mlx5_cache_ent *ent;
346 int c;
347 int i;
348
349 c = order2idx(dev, order);
350 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
351 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
352 return NULL;
353 }
354
355 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
356 ent = &cache->ent[i];
357
358 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
359
360 spin_lock(&ent->lock);
361 if (!list_empty(&ent->head)) {
362 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
363 list);
364 list_del(&mr->list);
365 ent->cur--;
366 spin_unlock(&ent->lock);
367 if (ent->cur < ent->limit)
368 queue_work(cache->wq, &ent->work);
369 break;
370 }
371 spin_unlock(&ent->lock);
372
373 queue_work(cache->wq, &ent->work);
374
375 if (mr)
376 break;
377 }
378
379 if (!mr)
380 cache->ent[c].miss++;
381
382 return mr;
383}
384
385static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
386{
387 struct mlx5_mr_cache *cache = &dev->cache;
388 struct mlx5_cache_ent *ent;
389 int shrink = 0;
390 int c;
391
392 c = order2idx(dev, mr->order);
393 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
394 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
395 return;
396 }
397 ent = &cache->ent[c];
398 spin_lock(&ent->lock);
399 list_add_tail(&mr->list, &ent->head);
400 ent->cur++;
401 if (ent->cur > 2 * ent->limit)
402 shrink = 1;
403 spin_unlock(&ent->lock);
404
405 if (shrink)
406 queue_work(cache->wq, &ent->work);
407}
408
409static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr;
415 int size;
416 int err;
417
418 while (1) {
419 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) {
421 spin_unlock(&ent->lock);
422 return;
423 }
424 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
425 list_del(&mr->list);
426 ent->cur--;
427 ent->size--;
428 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) {
431 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else {
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr);
437 }
438 }
439}
440
441static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
442{
443 struct mlx5_mr_cache *cache = &dev->cache;
444 struct mlx5_cache_ent *ent;
445 int i;
446
447 if (!mlx5_debugfs_root)
448 return 0;
449
450 cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
451 if (!cache->root)
452 return -ENOMEM;
453
454 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
455 ent = &cache->ent[i];
456 sprintf(ent->name, "%d", ent->order);
457 ent->dir = debugfs_create_dir(ent->name, cache->root);
458 if (!ent->dir)
459 return -ENOMEM;
460
461 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
462 &size_fops);
463 if (!ent->fsize)
464 return -ENOMEM;
465
466 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
467 &limit_fops);
468 if (!ent->flimit)
469 return -ENOMEM;
470
471 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
472 &ent->cur);
473 if (!ent->fcur)
474 return -ENOMEM;
475
476 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
477 &ent->miss);
478 if (!ent->fmiss)
479 return -ENOMEM;
480 }
481
482 return 0;
483}
484
485static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
486{
487 if (!mlx5_debugfs_root)
488 return;
489
490 debugfs_remove_recursive(dev->cache.root);
491}
492
493int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
494{
495 struct mlx5_mr_cache *cache = &dev->cache;
496 struct mlx5_cache_ent *ent;
497 int limit;
498 int size;
499 int err;
500 int i;
501
502 cache->wq = create_singlethread_workqueue("mkey_cache");
503 if (!cache->wq) {
504 mlx5_ib_warn(dev, "failed to create work queue\n");
505 return -ENOMEM;
506 }
507
508 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
509 INIT_LIST_HEAD(&cache->ent[i].head);
510 spin_lock_init(&cache->ent[i].lock);
511
512 ent = &cache->ent[i];
513 INIT_LIST_HEAD(&ent->head);
514 spin_lock_init(&ent->lock);
515 ent->order = i + 2;
516 ent->dev = dev;
517
518 if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) {
519 size = dev->mdev.profile->mr_cache[i].size;
520 limit = dev->mdev.profile->mr_cache[i].limit;
521 } else {
522 size = DEF_CACHE_SIZE;
523 limit = 0;
524 }
525 INIT_WORK(&ent->work, cache_work_func);
526 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
527 ent->limit = limit;
528 queue_work(cache->wq, &ent->work);
529 }
530
531 err = mlx5_mr_cache_debugfs_init(dev);
532 if (err)
533 mlx5_ib_warn(dev, "cache debugfs failure\n");
534
535 return 0;
536}
537
538int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
539{
540 int i;
541
542 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq);
544
545 mlx5_mr_cache_debugfs_cleanup(dev);
546
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i);
549
550 return 0;
551}
552
553struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
554{
555 struct mlx5_ib_dev *dev = to_mdev(pd->device);
556 struct mlx5_core_dev *mdev = &dev->mdev;
557 struct mlx5_create_mkey_mbox_in *in;
558 struct mlx5_mkey_seg *seg;
559 struct mlx5_ib_mr *mr;
560 int err;
561
562 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
563 if (!mr)
564 return ERR_PTR(-ENOMEM);
565
566 in = kzalloc(sizeof(*in), GFP_KERNEL);
567 if (!in) {
568 err = -ENOMEM;
569 goto err_free;
570 }
571
572 seg = &in->seg;
573 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
574 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
575 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
576 seg->start_addr = 0;
577
578 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in));
579 if (err)
580 goto err_in;
581
582 kfree(in);
583 mr->ibmr.lkey = mr->mmr.key;
584 mr->ibmr.rkey = mr->mmr.key;
585 mr->umem = NULL;
586
587 return &mr->ibmr;
588
589err_in:
590 kfree(in);
591
592err_free:
593 kfree(mr);
594
595 return ERR_PTR(err);
596}
597
598static int get_octo_len(u64 addr, u64 len, int page_size)
599{
600 u64 offset;
601 int npages;
602
603 offset = addr & (page_size - 1);
604 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
605 return (npages + 1) / 2;
606}
607
608static int use_umr(int order)
609{
610 return order <= 17;
611}
612
613static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
614 struct ib_sge *sg, u64 dma, int n, u32 key,
615 int page_shift, u64 virt_addr, u64 len,
616 int access_flags)
617{
618 struct mlx5_ib_dev *dev = to_mdev(pd->device);
619 struct ib_mr *mr = dev->umrc.mr;
620
621 sg->addr = dma;
622 sg->length = ALIGN(sizeof(u64) * n, 64);
623 sg->lkey = mr->lkey;
624
625 wr->next = NULL;
626 wr->send_flags = 0;
627 wr->sg_list = sg;
628 if (n)
629 wr->num_sge = 1;
630 else
631 wr->num_sge = 0;
632
633 wr->opcode = MLX5_IB_WR_UMR;
634 wr->wr.fast_reg.page_list_len = n;
635 wr->wr.fast_reg.page_shift = page_shift;
636 wr->wr.fast_reg.rkey = key;
637 wr->wr.fast_reg.iova_start = virt_addr;
638 wr->wr.fast_reg.length = len;
639 wr->wr.fast_reg.access_flags = access_flags;
640 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
641}
642
643static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
644 struct ib_send_wr *wr, u32 key)
645{
646 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
647 wr->opcode = MLX5_IB_WR_UMR;
648 wr->wr.fast_reg.rkey = key;
649}
650
651void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
652{
653 struct mlx5_ib_mr *mr;
654 struct ib_wc wc;
655 int err;
656
657 while (1) {
658 err = ib_poll_cq(cq, 1, &wc);
659 if (err < 0) {
660 pr_warn("poll cq error %d\n", err);
661 return;
662 }
663 if (err == 0)
664 break;
665
666 mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id;
667 mr->status = wc.status;
668 complete(&mr->done);
669 }
670 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
671}
672
673static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
674 u64 virt_addr, u64 len, int npages,
675 int page_shift, int order, int access_flags)
676{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device);
678 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr;
681 struct ib_sge sg;
682 int err;
683 int i;
684
685 for (i = 0; i < 10; i++) {
686 mr = alloc_cached_mr(dev, order);
687 if (mr)
688 break;
689
690 err = add_keys(dev, order2idx(dev, order), 1);
691 if (err) {
692 mlx5_ib_warn(dev, "add_keys failed\n");
693 break;
694 }
695 }
696
697 if (!mr)
698 return ERR_PTR(-EAGAIN);
699
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
701
702 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr;
704 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
705
706 /* We serialize polls so one process does not kidnap another's
707 * completion. This is not a problem since wr is completed in
708 * around 1 usec
709 */
710 down(&umrc->sem);
711 init_completion(&mr->done);
712 err = ib_post_send(umrc->qp, &wr, &bad);
713 if (err) {
714 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
715 up(&umrc->sem);
716 goto error;
717 }
718 wait_for_completion(&mr->done);
719 up(&umrc->sem);
720
721 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT;
724 goto error;
725 }
726
727 return mr;
728
729error:
730 free_cached_mr(dev, mr);
731 return ERR_PTR(err);
732}
733
734static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
735 u64 length, struct ib_umem *umem,
736 int npages, int page_shift,
737 int access_flags)
738{
739 struct mlx5_ib_dev *dev = to_mdev(pd->device);
740 struct mlx5_create_mkey_mbox_in *in;
741 struct mlx5_ib_mr *mr;
742 int inlen;
743 int err;
744
745 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
746 if (!mr)
747 return ERR_PTR(-ENOMEM);
748
749 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
750 in = mlx5_vzalloc(inlen);
751 if (!in) {
752 err = -ENOMEM;
753 goto err_1;
754 }
755 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
756
757 in->seg.flags = convert_access(access_flags) |
758 MLX5_ACCESS_MODE_MTT;
759 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
760 in->seg.start_addr = cpu_to_be64(virt_addr);
761 in->seg.len = cpu_to_be64(length);
762 in->seg.bsfs_octo_size = 0;
763 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
764 in->seg.log2_page_size = page_shift;
765 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
766 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
767 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen);
768 if (err) {
769 mlx5_ib_warn(dev, "create mkey failed\n");
770 goto err_2;
771 }
772 mr->umem = umem;
773 mlx5_vfree(in);
774
775 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
776
777 return mr;
778
779err_2:
780 mlx5_vfree(in);
781
782err_1:
783 kfree(mr);
784
785 return ERR_PTR(err);
786}
787
788struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
789 u64 virt_addr, int access_flags,
790 struct ib_udata *udata)
791{
792 struct mlx5_ib_dev *dev = to_mdev(pd->device);
793 struct mlx5_ib_mr *mr = NULL;
794 struct ib_umem *umem;
795 int page_shift;
796 int npages;
797 int ncont;
798 int order;
799 int err;
800
801 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n",
802 start, virt_addr, length);
803 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
804 0);
805 if (IS_ERR(umem)) {
806 mlx5_ib_dbg(dev, "umem get failed\n");
807 return (void *)umem;
808 }
809
810 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
811 if (!npages) {
812 mlx5_ib_warn(dev, "avoid zero region\n");
813 err = -EINVAL;
814 goto error;
815 }
816
817 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
818 npages, ncont, order, page_shift);
819
820 if (use_umr(order)) {
821 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
822 order, access_flags);
823 if (PTR_ERR(mr) == -EAGAIN) {
824 mlx5_ib_dbg(dev, "cache empty for order %d", order);
825 mr = NULL;
826 }
827 }
828
829 if (!mr)
830 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
831 access_flags);
832
833 if (IS_ERR(mr)) {
834 err = PTR_ERR(mr);
835 goto error;
836 }
837
838 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
839
840 mr->umem = umem;
841 mr->npages = npages;
842 spin_lock(&dev->mr_lock);
843 dev->mdev.priv.reg_pages += npages;
844 spin_unlock(&dev->mr_lock);
845 mr->ibmr.lkey = mr->mmr.key;
846 mr->ibmr.rkey = mr->mmr.key;
847
848 return &mr->ibmr;
849
850error:
851 ib_umem_release(umem);
852 return ERR_PTR(err);
853}
854
855static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
856{
857 struct umr_common *umrc = &dev->umrc;
858 struct ib_send_wr wr, *bad;
859 int err;
860
861 memset(&wr, 0, sizeof(wr));
862 wr.wr_id = (u64)(unsigned long)mr;
863 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
864
865 down(&umrc->sem);
866 init_completion(&mr->done);
867 err = ib_post_send(umrc->qp, &wr, &bad);
868 if (err) {
869 up(&umrc->sem);
870 mlx5_ib_dbg(dev, "err %d\n", err);
871 goto error;
872 }
873 wait_for_completion(&mr->done);
874 up(&umrc->sem);
875 if (mr->status != IB_WC_SUCCESS) {
876 mlx5_ib_warn(dev, "unreg umr failed\n");
877 err = -EFAULT;
878 goto error;
879 }
880 return 0;
881
882error:
883 return err;
884}
885
886int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
887{
888 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
889 struct mlx5_ib_mr *mr = to_mmr(ibmr);
890 struct ib_umem *umem = mr->umem;
891 int npages = mr->npages;
892 int umred = mr->umred;
893 int err;
894
895 if (!umred) {
896 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
897 if (err) {
898 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
899 mr->mmr.key, err);
900 return err;
901 }
902 } else {
903 err = unreg_umr(dev, mr);
904 if (err) {
905 mlx5_ib_warn(dev, "failed unregister\n");
906 return err;
907 }
908 free_cached_mr(dev, mr);
909 }
910
911 if (umem) {
912 ib_umem_release(umem);
913 spin_lock(&dev->mr_lock);
914 dev->mdev.priv.reg_pages -= npages;
915 spin_unlock(&dev->mr_lock);
916 }
917
918 if (!umred)
919 kfree(mr);
920
921 return 0;
922}
923
924struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
925 int max_page_list_len)
926{
927 struct mlx5_ib_dev *dev = to_mdev(pd->device);
928 struct mlx5_create_mkey_mbox_in *in;
929 struct mlx5_ib_mr *mr;
930 int err;
931
932 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
933 if (!mr)
934 return ERR_PTR(-ENOMEM);
935
936 in = kzalloc(sizeof(*in), GFP_KERNEL);
937 if (!in) {
938 err = -ENOMEM;
939 goto err_free;
940 }
941
942 in->seg.status = 1 << 6; /* free */
943 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
944 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
945 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
946 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
947 /*
948 * TBD not needed - issue 197292 */
949 in->seg.log2_page_size = PAGE_SHIFT;
950
951 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in));
952 kfree(in);
953 if (err)
954 goto err_free;
955
956 mr->ibmr.lkey = mr->mmr.key;
957 mr->ibmr.rkey = mr->mmr.key;
958 mr->umem = NULL;
959
960 return &mr->ibmr;
961
962err_free:
963 kfree(mr);
964 return ERR_PTR(err);
965}
966
967struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
968 int page_list_len)
969{
970 struct mlx5_ib_fast_reg_page_list *mfrpl;
971 int size = page_list_len * sizeof(u64);
972
973 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
974 if (!mfrpl)
975 return ERR_PTR(-ENOMEM);
976
977 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
978 if (!mfrpl->ibfrpl.page_list)
979 goto err_free;
980
981 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
982 size, &mfrpl->map,
983 GFP_KERNEL);
984 if (!mfrpl->mapped_page_list)
985 goto err_free;
986
987 WARN_ON(mfrpl->map & 0x3f);
988
989 return &mfrpl->ibfrpl;
990
991err_free:
992 kfree(mfrpl->ibfrpl.page_list);
993 kfree(mfrpl);
994 return ERR_PTR(-ENOMEM);
995}
996
997void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
998{
999 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1000 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1001 int size = page_list->max_page_list_len * sizeof(u64);
1002
1003 dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
1004 mfrpl->map);
1005 kfree(mfrpl->ibfrpl.page_list);
1006 kfree(mfrpl);
1007}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
new file mode 100644
index 000000000000..16ac54c9819f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -0,0 +1,2524 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
35#include "mlx5_ib.h"
36#include "user.h"
37
38/* not supported currently */
39static int wq_signature;
40
41enum {
42 MLX5_IB_ACK_REQ_FREQ = 8,
43};
44
45enum {
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
50};
51
52enum {
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
55};
56
57static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
71};
72
73struct umr_wr {
74 u64 virt_addr;
75 struct ib_pd *pd;
76 unsigned int page_shift;
77 unsigned int npages;
78 u32 length;
79 int access_flags;
80 u32 mkey;
81};
82
83static int is_qp0(enum ib_qp_type qp_type)
84{
85 return qp_type == IB_QPT_SMI;
86}
87
88static int is_qp1(enum ib_qp_type qp_type)
89{
90 return qp_type == IB_QPT_GSI;
91}
92
93static int is_sqp(enum ib_qp_type qp_type)
94{
95 return is_qp0(qp_type) || is_qp1(qp_type);
96}
97
98static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
99{
100 return mlx5_buf_offset(&qp->buf, offset);
101}
102
103static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
104{
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
106}
107
108void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
109{
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
111}
112
113static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
114{
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116 struct ib_event event;
117
118 if (type == MLX5_EVENT_TYPE_PATH_MIG)
119 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
120
121 if (ibqp->event_handler) {
122 event.device = ibqp->device;
123 event.element.qp = ibqp;
124 switch (type) {
125 case MLX5_EVENT_TYPE_PATH_MIG:
126 event.event = IB_EVENT_PATH_MIG;
127 break;
128 case MLX5_EVENT_TYPE_COMM_EST:
129 event.event = IB_EVENT_COMM_EST;
130 break;
131 case MLX5_EVENT_TYPE_SQ_DRAINED:
132 event.event = IB_EVENT_SQ_DRAINED;
133 break;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
135 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
136 break;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
138 event.event = IB_EVENT_QP_FATAL;
139 break;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
141 event.event = IB_EVENT_PATH_MIG_ERR;
142 break;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
144 event.event = IB_EVENT_QP_REQ_ERR;
145 break;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 event.event = IB_EVENT_QP_ACCESS_ERR;
148 break;
149 default:
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
151 return;
152 }
153
154 ibqp->event_handler(&event, ibqp->qp_context);
155 }
156}
157
158static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
160{
161 int wqe_size;
162 int wq_size;
163
164 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev.caps.max_wqes)
166 return -EINVAL;
167
168 if (!has_rq) {
169 qp->rq.max_gs = 0;
170 qp->rq.wqe_cnt = 0;
171 qp->rq.wqe_shift = 0;
172 } else {
173 if (ucmd) {
174 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
175 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
176 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
177 qp->rq.max_post = qp->rq.wqe_cnt;
178 } else {
179 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
180 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
181 wqe_size = roundup_pow_of_two(wqe_size);
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
187 wqe_size,
188 dev->mdev.caps.max_rq_desc_sz);
189 return -EINVAL;
190 }
191 qp->rq.wqe_shift = ilog2(wqe_size);
192 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
193 qp->rq.max_post = qp->rq.wqe_cnt;
194 }
195 }
196
197 return 0;
198}
199
200static int sq_overhead(enum ib_qp_type qp_type)
201{
202 int size;
203
204 switch (qp_type) {
205 case IB_QPT_XRC_INI:
206 size = sizeof(struct mlx5_wqe_xrc_seg);
207 /* fall through */
208 case IB_QPT_RC:
209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
210 sizeof(struct mlx5_wqe_atomic_seg) +
211 sizeof(struct mlx5_wqe_raddr_seg);
212 break;
213
214 case IB_QPT_UC:
215 size = sizeof(struct mlx5_wqe_ctrl_seg) +
216 sizeof(struct mlx5_wqe_raddr_seg);
217 break;
218
219 case IB_QPT_UD:
220 case IB_QPT_SMI:
221 case IB_QPT_GSI:
222 size = sizeof(struct mlx5_wqe_ctrl_seg) +
223 sizeof(struct mlx5_wqe_datagram_seg);
224 break;
225
226 case MLX5_IB_QPT_REG_UMR:
227 size = sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229 sizeof(struct mlx5_mkey_seg);
230 break;
231
232 default:
233 return -EINVAL;
234 }
235
236 return size;
237}
238
239static int calc_send_wqe(struct ib_qp_init_attr *attr)
240{
241 int inl_size = 0;
242 int size;
243
244 size = sq_overhead(attr->qp_type);
245 if (size < 0)
246 return size;
247
248 if (attr->cap.max_inline_data) {
249 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
250 attr->cap.max_inline_data;
251 }
252
253 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
254
255 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
256}
257
258static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
259 struct mlx5_ib_qp *qp)
260{
261 int wqe_size;
262 int wq_size;
263
264 if (!attr->cap.max_send_wr)
265 return 0;
266
267 wqe_size = calc_send_wqe(attr);
268 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
269 if (wqe_size < 0)
270 return wqe_size;
271
272 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273 mlx5_ib_dbg(dev, "\n");
274 return -EINVAL;
275 }
276
277 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
278 sizeof(struct mlx5_wqe_inline_seg);
279 attr->cap.max_inline_data = qp->max_inline_data;
280
281 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
283 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284 qp->sq.max_gs = attr->cap.max_send_sge;
285 qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
286
287 return wq_size;
288}
289
290static int set_user_buf_size(struct mlx5_ib_dev *dev,
291 struct mlx5_ib_qp *qp,
292 struct mlx5_ib_create_qp *ucmd)
293{
294 int desc_sz = 1 << qp->sq.wqe_shift;
295
296 if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
297 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
298 desc_sz, dev->mdev.caps.max_sq_desc_sz);
299 return -EINVAL;
300 }
301
302 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
303 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
304 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
305 return -EINVAL;
306 }
307
308 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
309
310 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
311 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
312 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
313 return -EINVAL;
314 }
315
316 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
317 (qp->sq.wqe_cnt << 6);
318
319 return 0;
320}
321
322static int qp_has_rq(struct ib_qp_init_attr *attr)
323{
324 if (attr->qp_type == IB_QPT_XRC_INI ||
325 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
326 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
327 !attr->cap.max_recv_wr)
328 return 0;
329
330 return 1;
331}
332
333static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
334{
335 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
336 int start_uuar;
337 int i;
338
339 start_uuar = nuuars - uuari->num_low_latency_uuars;
340 for (i = start_uuar; i < nuuars; i++) {
341 if (!test_bit(i, uuari->bitmap)) {
342 set_bit(i, uuari->bitmap);
343 uuari->count[i]++;
344 return i;
345 }
346 }
347
348 return -ENOMEM;
349}
350
351static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
352{
353 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
354 int minidx = 1;
355 int uuarn;
356 int end;
357 int i;
358
359 end = nuuars - uuari->num_low_latency_uuars;
360
361 for (i = 1; i < end; i++) {
362 uuarn = i & 3;
363 if (uuarn == 2 || uuarn == 3)
364 continue;
365
366 if (uuari->count[i] < uuari->count[minidx])
367 minidx = i;
368 }
369
370 uuari->count[minidx]++;
371 return minidx;
372}
373
374static int alloc_uuar(struct mlx5_uuar_info *uuari,
375 enum mlx5_ib_latency_class lat)
376{
377 int uuarn = -EINVAL;
378
379 mutex_lock(&uuari->lock);
380 switch (lat) {
381 case MLX5_IB_LATENCY_CLASS_LOW:
382 uuarn = 0;
383 uuari->count[uuarn]++;
384 break;
385
386 case MLX5_IB_LATENCY_CLASS_MEDIUM:
387 uuarn = alloc_med_class_uuar(uuari);
388 break;
389
390 case MLX5_IB_LATENCY_CLASS_HIGH:
391 uuarn = alloc_high_class_uuar(uuari);
392 break;
393
394 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
395 uuarn = 2;
396 break;
397 }
398 mutex_unlock(&uuari->lock);
399
400 return uuarn;
401}
402
403static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
404{
405 clear_bit(uuarn, uuari->bitmap);
406 --uuari->count[uuarn];
407}
408
409static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
410{
411 clear_bit(uuarn, uuari->bitmap);
412 --uuari->count[uuarn];
413}
414
415static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
416{
417 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
418 int high_uuar = nuuars - uuari->num_low_latency_uuars;
419
420 mutex_lock(&uuari->lock);
421 if (uuarn == 0) {
422 --uuari->count[uuarn];
423 goto out;
424 }
425
426 if (uuarn < high_uuar) {
427 free_med_class_uuar(uuari, uuarn);
428 goto out;
429 }
430
431 free_high_class_uuar(uuari, uuarn);
432
433out:
434 mutex_unlock(&uuari->lock);
435}
436
437static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
438{
439 switch (state) {
440 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
441 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
442 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
443 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
444 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
445 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
446 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
447 default: return -1;
448 }
449}
450
451static int to_mlx5_st(enum ib_qp_type type)
452{
453 switch (type) {
454 case IB_QPT_RC: return MLX5_QP_ST_RC;
455 case IB_QPT_UC: return MLX5_QP_ST_UC;
456 case IB_QPT_UD: return MLX5_QP_ST_UD;
457 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
458 case IB_QPT_XRC_INI:
459 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
460 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
461 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
462 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
463 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
464 case IB_QPT_RAW_PACKET:
465 case IB_QPT_MAX:
466 default: return -EINVAL;
467 }
468}
469
470static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
471{
472 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
473}
474
475static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
476 struct mlx5_ib_qp *qp, struct ib_udata *udata,
477 struct mlx5_create_qp_mbox_in **in,
478 struct mlx5_ib_create_qp_resp *resp, int *inlen)
479{
480 struct mlx5_ib_ucontext *context;
481 struct mlx5_ib_create_qp ucmd;
482 int page_shift;
483 int uar_index;
484 int npages;
485 u32 offset;
486 int uuarn;
487 int ncont;
488 int err;
489
490 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
491 if (err) {
492 mlx5_ib_dbg(dev, "copy failed\n");
493 return err;
494 }
495
496 context = to_mucontext(pd->uobject->context);
497 /*
498 * TBD: should come from the verbs when we have the API
499 */
500 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
501 if (uuarn < 0) {
502 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
503 mlx5_ib_dbg(dev, "reverting to high latency\n");
504 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
505 if (uuarn < 0) {
506 mlx5_ib_dbg(dev, "uuar allocation failed\n");
507 return uuarn;
508 }
509 }
510
511 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
512 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
513
514 err = set_user_buf_size(dev, qp, &ucmd);
515 if (err)
516 goto err_uuar;
517
518 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
519 qp->buf_size, 0, 0);
520 if (IS_ERR(qp->umem)) {
521 mlx5_ib_dbg(dev, "umem_get failed\n");
522 err = PTR_ERR(qp->umem);
523 goto err_uuar;
524 }
525
526 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
527 &ncont, NULL);
528 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
529 if (err) {
530 mlx5_ib_warn(dev, "bad offset\n");
531 goto err_umem;
532 }
533 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
534 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
535
536 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
537 *in = mlx5_vzalloc(*inlen);
538 if (!*in) {
539 err = -ENOMEM;
540 goto err_umem;
541 }
542 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
543 (*in)->ctx.log_pg_sz_remote_qpn =
544 cpu_to_be32((page_shift - PAGE_SHIFT) << 24);
545 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
546
547 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
548 resp->uuar_index = uuarn;
549 qp->uuarn = uuarn;
550
551 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
552 if (err) {
553 mlx5_ib_dbg(dev, "map failed\n");
554 goto err_free;
555 }
556
557 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
558 if (err) {
559 mlx5_ib_dbg(dev, "copy failed\n");
560 goto err_unmap;
561 }
562 qp->create_type = MLX5_QP_USER;
563
564 return 0;
565
566err_unmap:
567 mlx5_ib_db_unmap_user(context, &qp->db);
568
569err_free:
570 mlx5_vfree(*in);
571
572err_umem:
573 ib_umem_release(qp->umem);
574
575err_uuar:
576 free_uuar(&context->uuari, uuarn);
577 return err;
578}
579
580static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
581{
582 struct mlx5_ib_ucontext *context;
583
584 context = to_mucontext(pd->uobject->context);
585 mlx5_ib_db_unmap_user(context, &qp->db);
586 ib_umem_release(qp->umem);
587 free_uuar(&context->uuari, qp->uuarn);
588}
589
590static int create_kernel_qp(struct mlx5_ib_dev *dev,
591 struct ib_qp_init_attr *init_attr,
592 struct mlx5_ib_qp *qp,
593 struct mlx5_create_qp_mbox_in **in, int *inlen)
594{
595 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
596 struct mlx5_uuar_info *uuari;
597 int uar_index;
598 int uuarn;
599 int err;
600
601 uuari = &dev->mdev.priv.uuari;
602 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
603 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
604
605 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
606 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
607
608 uuarn = alloc_uuar(uuari, lc);
609 if (uuarn < 0) {
610 mlx5_ib_dbg(dev, "\n");
611 return -ENOMEM;
612 }
613
614 qp->bf = &uuari->bfs[uuarn];
615 uar_index = qp->bf->uar->index;
616
617 err = calc_sq_size(dev, init_attr, qp);
618 if (err < 0) {
619 mlx5_ib_dbg(dev, "err %d\n", err);
620 goto err_uuar;
621 }
622
623 qp->rq.offset = 0;
624 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
625 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
626
627 err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
628 if (err) {
629 mlx5_ib_dbg(dev, "err %d\n", err);
630 goto err_uuar;
631 }
632
633 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
634 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
635 *in = mlx5_vzalloc(*inlen);
636 if (!*in) {
637 err = -ENOMEM;
638 goto err_buf;
639 }
640 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
641 (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24);
642 /* Set "fast registration enabled" for all kernel QPs */
643 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
644 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
645
646 mlx5_fill_page_array(&qp->buf, (*in)->pas);
647
648 err = mlx5_db_alloc(&dev->mdev, &qp->db);
649 if (err) {
650 mlx5_ib_dbg(dev, "err %d\n", err);
651 goto err_free;
652 }
653
654 qp->db.db[0] = 0;
655 qp->db.db[1] = 0;
656
657 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
658 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
659 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
660 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
661 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
662
663 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
664 !qp->sq.w_list || !qp->sq.wqe_head) {
665 err = -ENOMEM;
666 goto err_wrid;
667 }
668 qp->create_type = MLX5_QP_KERNEL;
669
670 return 0;
671
672err_wrid:
673 mlx5_db_free(&dev->mdev, &qp->db);
674 kfree(qp->sq.wqe_head);
675 kfree(qp->sq.w_list);
676 kfree(qp->sq.wrid);
677 kfree(qp->sq.wr_data);
678 kfree(qp->rq.wrid);
679
680err_free:
681 mlx5_vfree(*in);
682
683err_buf:
684 mlx5_buf_free(&dev->mdev, &qp->buf);
685
686err_uuar:
687 free_uuar(&dev->mdev.priv.uuari, uuarn);
688 return err;
689}
690
691static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
692{
693 mlx5_db_free(&dev->mdev, &qp->db);
694 kfree(qp->sq.wqe_head);
695 kfree(qp->sq.w_list);
696 kfree(qp->sq.wrid);
697 kfree(qp->sq.wr_data);
698 kfree(qp->rq.wrid);
699 mlx5_buf_free(&dev->mdev, &qp->buf);
700 free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
701}
702
703static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
704{
705 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
706 (attr->qp_type == IB_QPT_XRC_INI))
707 return cpu_to_be32(MLX5_SRQ_RQ);
708 else if (!qp->has_rq)
709 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
710 else
711 return cpu_to_be32(MLX5_NON_ZERO_RQ);
712}
713
714static int is_connected(enum ib_qp_type qp_type)
715{
716 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
717 return 1;
718
719 return 0;
720}
721
722static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
723 struct ib_qp_init_attr *init_attr,
724 struct ib_udata *udata, struct mlx5_ib_qp *qp)
725{
726 struct mlx5_ib_resources *devr = &dev->devr;
727 struct mlx5_ib_create_qp_resp resp;
728 struct mlx5_create_qp_mbox_in *in;
729 struct mlx5_ib_create_qp ucmd;
730 int inlen = sizeof(*in);
731 int err;
732
733 mutex_init(&qp->mutex);
734 spin_lock_init(&qp->sq.lock);
735 spin_lock_init(&qp->rq.lock);
736
737 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
738 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
739
740 if (pd && pd->uobject) {
741 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
742 mlx5_ib_dbg(dev, "copy failed\n");
743 return -EFAULT;
744 }
745
746 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
747 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
748 } else {
749 qp->wq_sig = !!wq_signature;
750 }
751
752 qp->has_rq = qp_has_rq(init_attr);
753 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
754 qp, (pd && pd->uobject) ? &ucmd : NULL);
755 if (err) {
756 mlx5_ib_dbg(dev, "err %d\n", err);
757 return err;
758 }
759
760 if (pd) {
761 if (pd->uobject) {
762 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
763 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
764 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
765 mlx5_ib_dbg(dev, "invalid rq params\n");
766 return -EINVAL;
767 }
768 if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
769 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
770 ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
771 return -EINVAL;
772 }
773 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
774 if (err)
775 mlx5_ib_dbg(dev, "err %d\n", err);
776 } else {
777 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
778 if (err)
779 mlx5_ib_dbg(dev, "err %d\n", err);
780 else
781 qp->pa_lkey = to_mpd(pd)->pa_lkey;
782 }
783
784 if (err)
785 return err;
786 } else {
787 in = mlx5_vzalloc(sizeof(*in));
788 if (!in)
789 return -ENOMEM;
790
791 qp->create_type = MLX5_QP_EMPTY;
792 }
793
794 if (is_sqp(init_attr->qp_type))
795 qp->port = init_attr->port_num;
796
797 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
798 MLX5_QP_PM_MIGRATED << 11);
799
800 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
801 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
802 else
803 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
804
805 if (qp->wq_sig)
806 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
807
808 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
809 int rcqe_sz;
810 int scqe_sz;
811
812 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
813 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
814
815 if (rcqe_sz == 128)
816 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
817 else
818 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
819
820 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
821 if (scqe_sz == 128)
822 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
823 else
824 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
825 }
826 }
827
828 if (qp->rq.wqe_cnt) {
829 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
830 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
831 }
832
833 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
834
835 if (qp->sq.wqe_cnt)
836 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
837 else
838 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
839
840 /* Set default resources */
841 switch (init_attr->qp_type) {
842 case IB_QPT_XRC_TGT:
843 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
844 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
845 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
846 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
847 break;
848 case IB_QPT_XRC_INI:
849 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
850 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
851 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
852 break;
853 default:
854 if (init_attr->srq) {
855 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
856 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
857 } else {
858 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
859 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
860 }
861 }
862
863 if (init_attr->send_cq)
864 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
865
866 if (init_attr->recv_cq)
867 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
868
869 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
870
871 err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
872 if (err) {
873 mlx5_ib_dbg(dev, "create qp failed\n");
874 goto err_create;
875 }
876
877 mlx5_vfree(in);
878 /* Hardware wants QPN written in big-endian order (after
879 * shifting) for send doorbell. Precompute this value to save
880 * a little bit when posting sends.
881 */
882 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
883
884 qp->mqp.event = mlx5_ib_qp_event;
885
886 return 0;
887
888err_create:
889 if (qp->create_type == MLX5_QP_USER)
890 destroy_qp_user(pd, qp);
891 else if (qp->create_type == MLX5_QP_KERNEL)
892 destroy_qp_kernel(dev, qp);
893
894 mlx5_vfree(in);
895 return err;
896}
897
898static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
899 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
900{
901 if (send_cq) {
902 if (recv_cq) {
903 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
904 spin_lock_irq(&send_cq->lock);
905 spin_lock_nested(&recv_cq->lock,
906 SINGLE_DEPTH_NESTING);
907 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
908 spin_lock_irq(&send_cq->lock);
909 __acquire(&recv_cq->lock);
910 } else {
911 spin_lock_irq(&recv_cq->lock);
912 spin_lock_nested(&send_cq->lock,
913 SINGLE_DEPTH_NESTING);
914 }
915 } else {
916 spin_lock_irq(&send_cq->lock);
917 }
918 } else if (recv_cq) {
919 spin_lock_irq(&recv_cq->lock);
920 }
921}
922
923static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
924 __releases(&send_cq->lock) __releases(&recv_cq->lock)
925{
926 if (send_cq) {
927 if (recv_cq) {
928 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
929 spin_unlock(&recv_cq->lock);
930 spin_unlock_irq(&send_cq->lock);
931 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
932 __release(&recv_cq->lock);
933 spin_unlock_irq(&send_cq->lock);
934 } else {
935 spin_unlock(&send_cq->lock);
936 spin_unlock_irq(&recv_cq->lock);
937 }
938 } else {
939 spin_unlock_irq(&send_cq->lock);
940 }
941 } else if (recv_cq) {
942 spin_unlock_irq(&recv_cq->lock);
943 }
944}
945
946static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
947{
948 return to_mpd(qp->ibqp.pd);
949}
950
951static void get_cqs(struct mlx5_ib_qp *qp,
952 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
953{
954 switch (qp->ibqp.qp_type) {
955 case IB_QPT_XRC_TGT:
956 *send_cq = NULL;
957 *recv_cq = NULL;
958 break;
959 case MLX5_IB_QPT_REG_UMR:
960 case IB_QPT_XRC_INI:
961 *send_cq = to_mcq(qp->ibqp.send_cq);
962 *recv_cq = NULL;
963 break;
964
965 case IB_QPT_SMI:
966 case IB_QPT_GSI:
967 case IB_QPT_RC:
968 case IB_QPT_UC:
969 case IB_QPT_UD:
970 case IB_QPT_RAW_IPV6:
971 case IB_QPT_RAW_ETHERTYPE:
972 *send_cq = to_mcq(qp->ibqp.send_cq);
973 *recv_cq = to_mcq(qp->ibqp.recv_cq);
974 break;
975
976 case IB_QPT_RAW_PACKET:
977 case IB_QPT_MAX:
978 default:
979 *send_cq = NULL;
980 *recv_cq = NULL;
981 break;
982 }
983}
984
985static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
986{
987 struct mlx5_ib_cq *send_cq, *recv_cq;
988 struct mlx5_modify_qp_mbox_in *in;
989 int err;
990
991 in = kzalloc(sizeof(*in), GFP_KERNEL);
992 if (!in)
993 return;
994 if (qp->state != IB_QPS_RESET)
995 if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
996 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
997 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
998 qp->mqp.qpn);
999
1000 get_cqs(qp, &send_cq, &recv_cq);
1001
1002 if (qp->create_type == MLX5_QP_KERNEL) {
1003 mlx5_ib_lock_cqs(send_cq, recv_cq);
1004 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1005 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1006 if (send_cq != recv_cq)
1007 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1008 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1009 }
1010
1011 err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
1012 if (err)
1013 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1014 kfree(in);
1015
1016
1017 if (qp->create_type == MLX5_QP_KERNEL)
1018 destroy_qp_kernel(dev, qp);
1019 else if (qp->create_type == MLX5_QP_USER)
1020 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1021}
1022
1023static const char *ib_qp_type_str(enum ib_qp_type type)
1024{
1025 switch (type) {
1026 case IB_QPT_SMI:
1027 return "IB_QPT_SMI";
1028 case IB_QPT_GSI:
1029 return "IB_QPT_GSI";
1030 case IB_QPT_RC:
1031 return "IB_QPT_RC";
1032 case IB_QPT_UC:
1033 return "IB_QPT_UC";
1034 case IB_QPT_UD:
1035 return "IB_QPT_UD";
1036 case IB_QPT_RAW_IPV6:
1037 return "IB_QPT_RAW_IPV6";
1038 case IB_QPT_RAW_ETHERTYPE:
1039 return "IB_QPT_RAW_ETHERTYPE";
1040 case IB_QPT_XRC_INI:
1041 return "IB_QPT_XRC_INI";
1042 case IB_QPT_XRC_TGT:
1043 return "IB_QPT_XRC_TGT";
1044 case IB_QPT_RAW_PACKET:
1045 return "IB_QPT_RAW_PACKET";
1046 case MLX5_IB_QPT_REG_UMR:
1047 return "MLX5_IB_QPT_REG_UMR";
1048 case IB_QPT_MAX:
1049 default:
1050 return "Invalid QP type";
1051 }
1052}
1053
1054struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1055 struct ib_qp_init_attr *init_attr,
1056 struct ib_udata *udata)
1057{
1058 struct mlx5_ib_dev *dev;
1059 struct mlx5_ib_qp *qp;
1060 u16 xrcdn = 0;
1061 int err;
1062
1063 if (pd) {
1064 dev = to_mdev(pd->device);
1065 } else {
1066 /* being cautious here */
1067 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1068 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1069 pr_warn("%s: no PD for transport %s\n", __func__,
1070 ib_qp_type_str(init_attr->qp_type));
1071 return ERR_PTR(-EINVAL);
1072 }
1073 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1074 }
1075
1076 switch (init_attr->qp_type) {
1077 case IB_QPT_XRC_TGT:
1078 case IB_QPT_XRC_INI:
1079 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
1080 mlx5_ib_dbg(dev, "XRC not supported\n");
1081 return ERR_PTR(-ENOSYS);
1082 }
1083 init_attr->recv_cq = NULL;
1084 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1085 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1086 init_attr->send_cq = NULL;
1087 }
1088
1089 /* fall through */
1090 case IB_QPT_RC:
1091 case IB_QPT_UC:
1092 case IB_QPT_UD:
1093 case IB_QPT_SMI:
1094 case IB_QPT_GSI:
1095 case MLX5_IB_QPT_REG_UMR:
1096 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1097 if (!qp)
1098 return ERR_PTR(-ENOMEM);
1099
1100 err = create_qp_common(dev, pd, init_attr, udata, qp);
1101 if (err) {
1102 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1103 kfree(qp);
1104 return ERR_PTR(err);
1105 }
1106
1107 if (is_qp0(init_attr->qp_type))
1108 qp->ibqp.qp_num = 0;
1109 else if (is_qp1(init_attr->qp_type))
1110 qp->ibqp.qp_num = 1;
1111 else
1112 qp->ibqp.qp_num = qp->mqp.qpn;
1113
1114 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1115 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1116 to_mcq(init_attr->send_cq)->mcq.cqn);
1117
1118 qp->xrcdn = xrcdn;
1119
1120 break;
1121
1122 case IB_QPT_RAW_IPV6:
1123 case IB_QPT_RAW_ETHERTYPE:
1124 case IB_QPT_RAW_PACKET:
1125 case IB_QPT_MAX:
1126 default:
1127 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1128 init_attr->qp_type);
1129 /* Don't support raw QPs */
1130 return ERR_PTR(-EINVAL);
1131 }
1132
1133 return &qp->ibqp;
1134}
1135
1136int mlx5_ib_destroy_qp(struct ib_qp *qp)
1137{
1138 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1139 struct mlx5_ib_qp *mqp = to_mqp(qp);
1140
1141 destroy_qp_common(dev, mqp);
1142
1143 kfree(mqp);
1144
1145 return 0;
1146}
1147
1148static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1149 int attr_mask)
1150{
1151 u32 hw_access_flags = 0;
1152 u8 dest_rd_atomic;
1153 u32 access_flags;
1154
1155 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1156 dest_rd_atomic = attr->max_dest_rd_atomic;
1157 else
1158 dest_rd_atomic = qp->resp_depth;
1159
1160 if (attr_mask & IB_QP_ACCESS_FLAGS)
1161 access_flags = attr->qp_access_flags;
1162 else
1163 access_flags = qp->atomic_rd_en;
1164
1165 if (!dest_rd_atomic)
1166 access_flags &= IB_ACCESS_REMOTE_WRITE;
1167
1168 if (access_flags & IB_ACCESS_REMOTE_READ)
1169 hw_access_flags |= MLX5_QP_BIT_RRE;
1170 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1171 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1172 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1173 hw_access_flags |= MLX5_QP_BIT_RWE;
1174
1175 return cpu_to_be32(hw_access_flags);
1176}
1177
1178enum {
1179 MLX5_PATH_FLAG_FL = 1 << 0,
1180 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1181 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1182};
1183
1184static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1185{
1186 if (rate == IB_RATE_PORT_CURRENT) {
1187 return 0;
1188 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1189 return -EINVAL;
1190 } else {
1191 while (rate != IB_RATE_2_5_GBPS &&
1192 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1193 dev->mdev.caps.stat_rate_support))
1194 --rate;
1195 }
1196
1197 return rate + MLX5_STAT_RATE_OFFSET;
1198}
1199
1200static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1201 struct mlx5_qp_path *path, u8 port, int attr_mask,
1202 u32 path_flags, const struct ib_qp_attr *attr)
1203{
1204 int err;
1205
1206 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1207 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1208
1209 if (attr_mask & IB_QP_PKEY_INDEX)
1210 path->pkey_index = attr->pkey_index;
1211
1212 path->grh_mlid = ah->src_path_bits & 0x7f;
1213 path->rlid = cpu_to_be16(ah->dlid);
1214
1215 if (ah->ah_flags & IB_AH_GRH) {
1216 path->grh_mlid |= 1 << 7;
1217 path->mgid_index = ah->grh.sgid_index;
1218 path->hop_limit = ah->grh.hop_limit;
1219 path->tclass_flowlabel =
1220 cpu_to_be32((ah->grh.traffic_class << 20) |
1221 (ah->grh.flow_label));
1222 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1223 }
1224
1225 err = ib_rate_to_mlx5(dev, ah->static_rate);
1226 if (err < 0)
1227 return err;
1228 path->static_rate = err;
1229 path->port = port;
1230
1231 if (ah->ah_flags & IB_AH_GRH) {
1232 if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
1233 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1234 ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
1235 return -EINVAL;
1236 }
1237
1238 path->grh_mlid |= 1 << 7;
1239 path->mgid_index = ah->grh.sgid_index;
1240 path->hop_limit = ah->grh.hop_limit;
1241 path->tclass_flowlabel =
1242 cpu_to_be32((ah->grh.traffic_class << 20) |
1243 (ah->grh.flow_label));
1244 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1245 }
1246
1247 if (attr_mask & IB_QP_TIMEOUT)
1248 path->ackto_lt = attr->timeout << 3;
1249
1250 path->sl = ah->sl & 0xf;
1251
1252 return 0;
1253}
1254
1255static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1256 [MLX5_QP_STATE_INIT] = {
1257 [MLX5_QP_STATE_INIT] = {
1258 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1259 MLX5_QP_OPTPAR_RAE |
1260 MLX5_QP_OPTPAR_RWE |
1261 MLX5_QP_OPTPAR_PKEY_INDEX |
1262 MLX5_QP_OPTPAR_PRI_PORT,
1263 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1264 MLX5_QP_OPTPAR_PKEY_INDEX |
1265 MLX5_QP_OPTPAR_PRI_PORT,
1266 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1267 MLX5_QP_OPTPAR_Q_KEY |
1268 MLX5_QP_OPTPAR_PRI_PORT,
1269 },
1270 [MLX5_QP_STATE_RTR] = {
1271 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1272 MLX5_QP_OPTPAR_RRE |
1273 MLX5_QP_OPTPAR_RAE |
1274 MLX5_QP_OPTPAR_RWE |
1275 MLX5_QP_OPTPAR_PKEY_INDEX,
1276 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1277 MLX5_QP_OPTPAR_RWE |
1278 MLX5_QP_OPTPAR_PKEY_INDEX,
1279 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1280 MLX5_QP_OPTPAR_Q_KEY,
1281 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1282 MLX5_QP_OPTPAR_Q_KEY,
1283 },
1284 },
1285 [MLX5_QP_STATE_RTR] = {
1286 [MLX5_QP_STATE_RTS] = {
1287 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1288 MLX5_QP_OPTPAR_RRE |
1289 MLX5_QP_OPTPAR_RAE |
1290 MLX5_QP_OPTPAR_RWE |
1291 MLX5_QP_OPTPAR_PM_STATE |
1292 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1293 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294 MLX5_QP_OPTPAR_RWE |
1295 MLX5_QP_OPTPAR_PM_STATE,
1296 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1297 },
1298 },
1299 [MLX5_QP_STATE_RTS] = {
1300 [MLX5_QP_STATE_RTS] = {
1301 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1302 MLX5_QP_OPTPAR_RAE |
1303 MLX5_QP_OPTPAR_RWE |
1304 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1305 MLX5_QP_OPTPAR_PM_STATE,
1306 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1307 MLX5_QP_OPTPAR_PM_STATE,
1308 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1309 MLX5_QP_OPTPAR_SRQN |
1310 MLX5_QP_OPTPAR_CQN_RCV,
1311 },
1312 },
1313 [MLX5_QP_STATE_SQER] = {
1314 [MLX5_QP_STATE_RTS] = {
1315 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1316 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1317 },
1318 },
1319};
1320
1321static int ib_nr_to_mlx5_nr(int ib_mask)
1322{
1323 switch (ib_mask) {
1324 case IB_QP_STATE:
1325 return 0;
1326 case IB_QP_CUR_STATE:
1327 return 0;
1328 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1329 return 0;
1330 case IB_QP_ACCESS_FLAGS:
1331 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1332 MLX5_QP_OPTPAR_RAE;
1333 case IB_QP_PKEY_INDEX:
1334 return MLX5_QP_OPTPAR_PKEY_INDEX;
1335 case IB_QP_PORT:
1336 return MLX5_QP_OPTPAR_PRI_PORT;
1337 case IB_QP_QKEY:
1338 return MLX5_QP_OPTPAR_Q_KEY;
1339 case IB_QP_AV:
1340 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1341 MLX5_QP_OPTPAR_PRI_PORT;
1342 case IB_QP_PATH_MTU:
1343 return 0;
1344 case IB_QP_TIMEOUT:
1345 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1346 case IB_QP_RETRY_CNT:
1347 return MLX5_QP_OPTPAR_RETRY_COUNT;
1348 case IB_QP_RNR_RETRY:
1349 return MLX5_QP_OPTPAR_RNR_RETRY;
1350 case IB_QP_RQ_PSN:
1351 return 0;
1352 case IB_QP_MAX_QP_RD_ATOMIC:
1353 return MLX5_QP_OPTPAR_SRA_MAX;
1354 case IB_QP_ALT_PATH:
1355 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1356 case IB_QP_MIN_RNR_TIMER:
1357 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1358 case IB_QP_SQ_PSN:
1359 return 0;
1360 case IB_QP_MAX_DEST_RD_ATOMIC:
1361 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1362 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1363 case IB_QP_PATH_MIG_STATE:
1364 return MLX5_QP_OPTPAR_PM_STATE;
1365 case IB_QP_CAP:
1366 return 0;
1367 case IB_QP_DEST_QPN:
1368 return 0;
1369 }
1370 return 0;
1371}
1372
1373static int ib_mask_to_mlx5_opt(int ib_mask)
1374{
1375 int result = 0;
1376 int i;
1377
1378 for (i = 0; i < 8 * sizeof(int); i++) {
1379 if ((1 << i) & ib_mask)
1380 result |= ib_nr_to_mlx5_nr(1 << i);
1381 }
1382
1383 return result;
1384}
1385
1386static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1387 const struct ib_qp_attr *attr, int attr_mask,
1388 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1389{
1390 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1391 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1392 struct mlx5_ib_cq *send_cq, *recv_cq;
1393 struct mlx5_qp_context *context;
1394 struct mlx5_modify_qp_mbox_in *in;
1395 struct mlx5_ib_pd *pd;
1396 enum mlx5_qp_state mlx5_cur, mlx5_new;
1397 enum mlx5_qp_optpar optpar;
1398 int sqd_event;
1399 int mlx5_st;
1400 int err;
1401
1402 in = kzalloc(sizeof(*in), GFP_KERNEL);
1403 if (!in)
1404 return -ENOMEM;
1405
1406 context = &in->ctx;
1407 err = to_mlx5_st(ibqp->qp_type);
1408 if (err < 0)
1409 goto out;
1410
1411 context->flags = cpu_to_be32(err << 16);
1412
1413 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1414 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1415 } else {
1416 switch (attr->path_mig_state) {
1417 case IB_MIG_MIGRATED:
1418 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1419 break;
1420 case IB_MIG_REARM:
1421 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1422 break;
1423 case IB_MIG_ARMED:
1424 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1425 break;
1426 }
1427 }
1428
1429 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1430 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1431 } else if (ibqp->qp_type == IB_QPT_UD ||
1432 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1433 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1434 } else if (attr_mask & IB_QP_PATH_MTU) {
1435 if (attr->path_mtu < IB_MTU_256 ||
1436 attr->path_mtu > IB_MTU_4096) {
1437 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1438 err = -EINVAL;
1439 goto out;
1440 }
1441 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
1442 }
1443
1444 if (attr_mask & IB_QP_DEST_QPN)
1445 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1446
1447 if (attr_mask & IB_QP_PKEY_INDEX)
1448 context->pri_path.pkey_index = attr->pkey_index;
1449
1450 /* todo implement counter_index functionality */
1451
1452 if (is_sqp(ibqp->qp_type))
1453 context->pri_path.port = qp->port;
1454
1455 if (attr_mask & IB_QP_PORT)
1456 context->pri_path.port = attr->port_num;
1457
1458 if (attr_mask & IB_QP_AV) {
1459 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1460 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1461 attr_mask, 0, attr);
1462 if (err)
1463 goto out;
1464 }
1465
1466 if (attr_mask & IB_QP_TIMEOUT)
1467 context->pri_path.ackto_lt |= attr->timeout << 3;
1468
1469 if (attr_mask & IB_QP_ALT_PATH) {
1470 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1471 attr->alt_port_num, attr_mask, 0, attr);
1472 if (err)
1473 goto out;
1474 }
1475
1476 pd = get_pd(qp);
1477 get_cqs(qp, &send_cq, &recv_cq);
1478
1479 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1480 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1481 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1482 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1483
1484 if (attr_mask & IB_QP_RNR_RETRY)
1485 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1486
1487 if (attr_mask & IB_QP_RETRY_CNT)
1488 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1489
1490 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1491 if (attr->max_rd_atomic)
1492 context->params1 |=
1493 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1494 }
1495
1496 if (attr_mask & IB_QP_SQ_PSN)
1497 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1498
1499 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1500 if (attr->max_dest_rd_atomic)
1501 context->params2 |=
1502 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1503 }
1504
1505 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1506 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1507
1508 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1509 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1510
1511 if (attr_mask & IB_QP_RQ_PSN)
1512 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1513
1514 if (attr_mask & IB_QP_QKEY)
1515 context->qkey = cpu_to_be32(attr->qkey);
1516
1517 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1518 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1519
1520 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1521 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1522 sqd_event = 1;
1523 else
1524 sqd_event = 0;
1525
1526 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1527 context->sq_crq_size |= cpu_to_be16(1 << 4);
1528
1529
1530 mlx5_cur = to_mlx5_state(cur_state);
1531 mlx5_new = to_mlx5_state(new_state);
1532 mlx5_st = to_mlx5_st(ibqp->qp_type);
1533 if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0)
1534 goto out;
1535
1536 optpar = ib_mask_to_mlx5_opt(attr_mask);
1537 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1538 in->optparam = cpu_to_be32(optpar);
1539 err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
1540 to_mlx5_state(new_state), in, sqd_event,
1541 &qp->mqp);
1542 if (err)
1543 goto out;
1544
1545 qp->state = new_state;
1546
1547 if (attr_mask & IB_QP_ACCESS_FLAGS)
1548 qp->atomic_rd_en = attr->qp_access_flags;
1549 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1550 qp->resp_depth = attr->max_dest_rd_atomic;
1551 if (attr_mask & IB_QP_PORT)
1552 qp->port = attr->port_num;
1553 if (attr_mask & IB_QP_ALT_PATH)
1554 qp->alt_port = attr->alt_port_num;
1555
1556 /*
1557 * If we moved a kernel QP to RESET, clean up all old CQ
1558 * entries and reinitialize the QP.
1559 */
1560 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1561 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1562 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1563 if (send_cq != recv_cq)
1564 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1565
1566 qp->rq.head = 0;
1567 qp->rq.tail = 0;
1568 qp->sq.head = 0;
1569 qp->sq.tail = 0;
1570 qp->sq.cur_post = 0;
1571 qp->sq.last_poll = 0;
1572 qp->db.db[MLX5_RCV_DBR] = 0;
1573 qp->db.db[MLX5_SND_DBR] = 0;
1574 }
1575
1576out:
1577 kfree(in);
1578 return err;
1579}
1580
1581int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1582 int attr_mask, struct ib_udata *udata)
1583{
1584 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1585 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1586 enum ib_qp_state cur_state, new_state;
1587 int err = -EINVAL;
1588 int port;
1589
1590 mutex_lock(&qp->mutex);
1591
1592 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1593 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1594
1595 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1596 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
1597 goto out;
1598
1599 if ((attr_mask & IB_QP_PORT) &&
1600 (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
1601 goto out;
1602
1603 if (attr_mask & IB_QP_PKEY_INDEX) {
1604 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1605 if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
1606 goto out;
1607 }
1608
1609 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1610 attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
1611 goto out;
1612
1613 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1614 attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
1615 goto out;
1616
1617 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1618 err = 0;
1619 goto out;
1620 }
1621
1622 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1623
1624out:
1625 mutex_unlock(&qp->mutex);
1626 return err;
1627}
1628
1629static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1630{
1631 struct mlx5_ib_cq *cq;
1632 unsigned cur;
1633
1634 cur = wq->head - wq->tail;
1635 if (likely(cur + nreq < wq->max_post))
1636 return 0;
1637
1638 cq = to_mcq(ib_cq);
1639 spin_lock(&cq->lock);
1640 cur = wq->head - wq->tail;
1641 spin_unlock(&cq->lock);
1642
1643 return cur + nreq >= wq->max_post;
1644}
1645
1646static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1647 u64 remote_addr, u32 rkey)
1648{
1649 rseg->raddr = cpu_to_be64(remote_addr);
1650 rseg->rkey = cpu_to_be32(rkey);
1651 rseg->reserved = 0;
1652}
1653
1654static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655{
1656 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662 } else {
1663 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664 aseg->compare = 0;
1665 }
1666}
1667
1668static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669 struct ib_send_wr *wr)
1670{
1671 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675}
1676
1677static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1678 struct ib_send_wr *wr)
1679{
1680 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1681 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1682 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1683}
1684
1685static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1686{
1687 dseg->byte_count = cpu_to_be32(sg->length);
1688 dseg->lkey = cpu_to_be32(sg->lkey);
1689 dseg->addr = cpu_to_be64(sg->addr);
1690}
1691
1692static __be16 get_klm_octo(int npages)
1693{
1694 return cpu_to_be16(ALIGN(npages, 8) / 2);
1695}
1696
1697static __be64 frwr_mkey_mask(void)
1698{
1699 u64 result;
1700
1701 result = MLX5_MKEY_MASK_LEN |
1702 MLX5_MKEY_MASK_PAGE_SIZE |
1703 MLX5_MKEY_MASK_START_ADDR |
1704 MLX5_MKEY_MASK_EN_RINVAL |
1705 MLX5_MKEY_MASK_KEY |
1706 MLX5_MKEY_MASK_LR |
1707 MLX5_MKEY_MASK_LW |
1708 MLX5_MKEY_MASK_RR |
1709 MLX5_MKEY_MASK_RW |
1710 MLX5_MKEY_MASK_A |
1711 MLX5_MKEY_MASK_SMALL_FENCE |
1712 MLX5_MKEY_MASK_FREE;
1713
1714 return cpu_to_be64(result);
1715}
1716
1717static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1718 struct ib_send_wr *wr, int li)
1719{
1720 memset(umr, 0, sizeof(*umr));
1721
1722 if (li) {
1723 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1724 umr->flags = 1 << 7;
1725 return;
1726 }
1727
1728 umr->flags = (1 << 5); /* fail if not free */
1729 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1730 umr->mkey_mask = frwr_mkey_mask();
1731}
1732
1733static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1734 struct ib_send_wr *wr)
1735{
1736 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
1737 u64 mask;
1738
1739 memset(umr, 0, sizeof(*umr));
1740
1741 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1742 umr->flags = 1 << 5; /* fail if not free */
1743 umr->klm_octowords = get_klm_octo(umrwr->npages);
1744 mask = MLX5_MKEY_MASK_LEN |
1745 MLX5_MKEY_MASK_PAGE_SIZE |
1746 MLX5_MKEY_MASK_START_ADDR |
1747 MLX5_MKEY_MASK_PD |
1748 MLX5_MKEY_MASK_LR |
1749 MLX5_MKEY_MASK_LW |
1750 MLX5_MKEY_MASK_RR |
1751 MLX5_MKEY_MASK_RW |
1752 MLX5_MKEY_MASK_A |
1753 MLX5_MKEY_MASK_FREE;
1754 umr->mkey_mask = cpu_to_be64(mask);
1755 } else {
1756 umr->flags = 2 << 5; /* fail if free */
1757 mask = MLX5_MKEY_MASK_FREE;
1758 umr->mkey_mask = cpu_to_be64(mask);
1759 }
1760
1761 if (!wr->num_sge)
1762 umr->flags |= (1 << 7); /* inline */
1763}
1764
1765static u8 get_umr_flags(int acc)
1766{
1767 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1768 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1769 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1770 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1771 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1772}
1773
1774static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1775 int li, int *writ)
1776{
1777 memset(seg, 0, sizeof(*seg));
1778 if (li) {
1779 seg->status = 1 << 6;
1780 return;
1781 }
1782
1783 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags);
1784 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1785 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1786 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1787 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1788 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1789 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1790 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1791}
1792
1793static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1794{
1795 memset(seg, 0, sizeof(*seg));
1796 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1797 seg->status = 1 << 6;
1798 return;
1799 }
1800
1801 seg->flags = convert_access(wr->wr.fast_reg.access_flags);
1802 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
1803 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1804 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1805 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1806 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1807}
1808
1809static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1810 struct ib_send_wr *wr,
1811 struct mlx5_core_dev *mdev,
1812 struct mlx5_ib_pd *pd,
1813 int writ)
1814{
1815 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1816 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1817 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1818 int i;
1819
1820 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1821 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1822 dseg->addr = cpu_to_be64(mfrpl->map);
1823 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1824 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1825}
1826
1827static __be32 send_ieth(struct ib_send_wr *wr)
1828{
1829 switch (wr->opcode) {
1830 case IB_WR_SEND_WITH_IMM:
1831 case IB_WR_RDMA_WRITE_WITH_IMM:
1832 return wr->ex.imm_data;
1833
1834 case IB_WR_SEND_WITH_INV:
1835 return cpu_to_be32(wr->ex.invalidate_rkey);
1836
1837 default:
1838 return 0;
1839 }
1840}
1841
1842static u8 calc_sig(void *wqe, int size)
1843{
1844 u8 *p = wqe;
1845 u8 res = 0;
1846 int i;
1847
1848 for (i = 0; i < size; i++)
1849 res ^= p[i];
1850
1851 return ~res;
1852}
1853
1854static u8 wq_sig(void *wqe)
1855{
1856 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
1857}
1858
1859static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1860 void *wqe, int *sz)
1861{
1862 struct mlx5_wqe_inline_seg *seg;
1863 void *qend = qp->sq.qend;
1864 void *addr;
1865 int inl = 0;
1866 int copy;
1867 int len;
1868 int i;
1869
1870 seg = wqe;
1871 wqe += sizeof(*seg);
1872 for (i = 0; i < wr->num_sge; i++) {
1873 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
1874 len = wr->sg_list[i].length;
1875 inl += len;
1876
1877 if (unlikely(inl > qp->max_inline_data))
1878 return -ENOMEM;
1879
1880 if (unlikely(wqe + len > qend)) {
1881 copy = qend - wqe;
1882 memcpy(wqe, addr, copy);
1883 addr += copy;
1884 len -= copy;
1885 wqe = mlx5_get_send_wqe(qp, 0);
1886 }
1887 memcpy(wqe, addr, len);
1888 wqe += len;
1889 }
1890
1891 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
1892
1893 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
1894
1895 return 0;
1896}
1897
1898static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
1899 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
1900{
1901 int writ = 0;
1902 int li;
1903
1904 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
1905 if (unlikely(wr->send_flags & IB_SEND_INLINE))
1906 return -EINVAL;
1907
1908 set_frwr_umr_segment(*seg, wr, li);
1909 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
1910 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
1911 if (unlikely((*seg == qp->sq.qend)))
1912 *seg = mlx5_get_send_wqe(qp, 0);
1913 set_mkey_segment(*seg, wr, li, &writ);
1914 *seg += sizeof(struct mlx5_mkey_seg);
1915 *size += sizeof(struct mlx5_mkey_seg) / 16;
1916 if (unlikely((*seg == qp->sq.qend)))
1917 *seg = mlx5_get_send_wqe(qp, 0);
1918 if (!li) {
1919 set_frwr_pages(*seg, wr, mdev, pd, writ);
1920 *seg += sizeof(struct mlx5_wqe_data_seg);
1921 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
1922 }
1923 return 0;
1924}
1925
1926static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
1927{
1928 __be32 *p = NULL;
1929 int tidx = idx;
1930 int i, j;
1931
1932 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
1933 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
1934 if ((i & 0xf) == 0) {
1935 void *buf = mlx5_get_send_wqe(qp, tidx);
1936 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
1937 p = buf;
1938 j = 0;
1939 }
1940 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
1941 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
1942 be32_to_cpu(p[j + 3]));
1943 }
1944}
1945
1946static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
1947 unsigned bytecnt, struct mlx5_ib_qp *qp)
1948{
1949 while (bytecnt > 0) {
1950 __iowrite64_copy(dst++, src++, 8);
1951 __iowrite64_copy(dst++, src++, 8);
1952 __iowrite64_copy(dst++, src++, 8);
1953 __iowrite64_copy(dst++, src++, 8);
1954 __iowrite64_copy(dst++, src++, 8);
1955 __iowrite64_copy(dst++, src++, 8);
1956 __iowrite64_copy(dst++, src++, 8);
1957 __iowrite64_copy(dst++, src++, 8);
1958 bytecnt -= 64;
1959 if (unlikely(src == qp->sq.qend))
1960 src = mlx5_get_send_wqe(qp, 0);
1961 }
1962}
1963
1964static u8 get_fence(u8 fence, struct ib_send_wr *wr)
1965{
1966 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
1967 wr->send_flags & IB_SEND_FENCE))
1968 return MLX5_FENCE_MODE_STRONG_ORDERING;
1969
1970 if (unlikely(fence)) {
1971 if (wr->send_flags & IB_SEND_FENCE)
1972 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
1973 else
1974 return fence;
1975
1976 } else {
1977 return 0;
1978 }
1979}
1980
1981int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1982 struct ib_send_wr **bad_wr)
1983{
1984 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
1985 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1986 struct mlx5_core_dev *mdev = &dev->mdev;
1987 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1988 struct mlx5_wqe_data_seg *dpseg;
1989 struct mlx5_wqe_xrc_seg *xrc;
1990 struct mlx5_bf *bf = qp->bf;
1991 int uninitialized_var(size);
1992 void *qend = qp->sq.qend;
1993 unsigned long flags;
1994 u32 mlx5_opcode;
1995 unsigned idx;
1996 int err = 0;
1997 int inl = 0;
1998 int num_sge;
1999 void *seg;
2000 int nreq;
2001 int i;
2002 u8 next_fence = 0;
2003 u8 opmod = 0;
2004 u8 fence;
2005
2006 spin_lock_irqsave(&qp->sq.lock, flags);
2007
2008 for (nreq = 0; wr; nreq++, wr = wr->next) {
2009 if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
2010 mlx5_ib_warn(dev, "\n");
2011 err = -EINVAL;
2012 *bad_wr = wr;
2013 goto out;
2014 }
2015
2016 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2017 mlx5_ib_warn(dev, "\n");
2018 err = -ENOMEM;
2019 *bad_wr = wr;
2020 goto out;
2021 }
2022
2023 fence = qp->fm_cache;
2024 num_sge = wr->num_sge;
2025 if (unlikely(num_sge > qp->sq.max_gs)) {
2026 mlx5_ib_warn(dev, "\n");
2027 err = -ENOMEM;
2028 *bad_wr = wr;
2029 goto out;
2030 }
2031
2032 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2033 seg = mlx5_get_send_wqe(qp, idx);
2034 ctrl = seg;
2035 *(uint32_t *)(seg + 8) = 0;
2036 ctrl->imm = send_ieth(wr);
2037 ctrl->fm_ce_se = qp->sq_signal_bits |
2038 (wr->send_flags & IB_SEND_SIGNALED ?
2039 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2040 (wr->send_flags & IB_SEND_SOLICITED ?
2041 MLX5_WQE_CTRL_SOLICITED : 0);
2042
2043 seg += sizeof(*ctrl);
2044 size = sizeof(*ctrl) / 16;
2045
2046 switch (ibqp->qp_type) {
2047 case IB_QPT_XRC_INI:
2048 xrc = seg;
2049 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2050 seg += sizeof(*xrc);
2051 size += sizeof(*xrc) / 16;
2052 /* fall through */
2053 case IB_QPT_RC:
2054 switch (wr->opcode) {
2055 case IB_WR_RDMA_READ:
2056 case IB_WR_RDMA_WRITE:
2057 case IB_WR_RDMA_WRITE_WITH_IMM:
2058 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2059 wr->wr.rdma.rkey);
2060 seg += sizeof(struct mlx5_wqe_raddr_seg);
2061 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2062 break;
2063
2064 case IB_WR_ATOMIC_CMP_AND_SWP:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD:
2066 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067 wr->wr.atomic.rkey);
2068 seg += sizeof(struct mlx5_wqe_raddr_seg);
2069
2070 set_atomic_seg(seg, wr);
2071 seg += sizeof(struct mlx5_wqe_atomic_seg);
2072
2073 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075 break;
2076
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2079 wr->wr.atomic.rkey);
2080 seg += sizeof(struct mlx5_wqe_raddr_seg);
2081
2082 set_masked_atomic_seg(seg, wr);
2083 seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084
2085 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087 break;
2088
2089 case IB_WR_LOCAL_INV:
2090 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2091 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2092 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2093 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2094 if (err) {
2095 mlx5_ib_warn(dev, "\n");
2096 *bad_wr = wr;
2097 goto out;
2098 }
2099 num_sge = 0;
2100 break;
2101
2102 case IB_WR_FAST_REG_MR:
2103 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2104 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2105 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2106 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2107 if (err) {
2108 mlx5_ib_warn(dev, "\n");
2109 *bad_wr = wr;
2110 goto out;
2111 }
2112 num_sge = 0;
2113 break;
2114
2115 default:
2116 break;
2117 }
2118 break;
2119
2120 case IB_QPT_UC:
2121 switch (wr->opcode) {
2122 case IB_WR_RDMA_WRITE:
2123 case IB_WR_RDMA_WRITE_WITH_IMM:
2124 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2125 wr->wr.rdma.rkey);
2126 seg += sizeof(struct mlx5_wqe_raddr_seg);
2127 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2128 break;
2129
2130 default:
2131 break;
2132 }
2133 break;
2134
2135 case IB_QPT_UD:
2136 case IB_QPT_SMI:
2137 case IB_QPT_GSI:
2138 set_datagram_seg(seg, wr);
2139 seg += sizeof(struct mlx5_wqe_datagram_seg);
2140 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2141 if (unlikely((seg == qend)))
2142 seg = mlx5_get_send_wqe(qp, 0);
2143 break;
2144
2145 case MLX5_IB_QPT_REG_UMR:
2146 if (wr->opcode != MLX5_IB_WR_UMR) {
2147 err = -EINVAL;
2148 mlx5_ib_warn(dev, "bad opcode\n");
2149 goto out;
2150 }
2151 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2152 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2153 set_reg_umr_segment(seg, wr);
2154 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2155 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2156 if (unlikely((seg == qend)))
2157 seg = mlx5_get_send_wqe(qp, 0);
2158 set_reg_mkey_segment(seg, wr);
2159 seg += sizeof(struct mlx5_mkey_seg);
2160 size += sizeof(struct mlx5_mkey_seg) / 16;
2161 if (unlikely((seg == qend)))
2162 seg = mlx5_get_send_wqe(qp, 0);
2163 break;
2164
2165 default:
2166 break;
2167 }
2168
2169 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2170 int uninitialized_var(sz);
2171
2172 err = set_data_inl_seg(qp, wr, seg, &sz);
2173 if (unlikely(err)) {
2174 mlx5_ib_warn(dev, "\n");
2175 *bad_wr = wr;
2176 goto out;
2177 }
2178 inl = 1;
2179 size += sz;
2180 } else {
2181 dpseg = seg;
2182 for (i = 0; i < num_sge; i++) {
2183 if (unlikely(dpseg == qend)) {
2184 seg = mlx5_get_send_wqe(qp, 0);
2185 dpseg = seg;
2186 }
2187 if (likely(wr->sg_list[i].length)) {
2188 set_data_ptr_seg(dpseg, wr->sg_list + i);
2189 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2190 dpseg++;
2191 }
2192 }
2193 }
2194
2195 mlx5_opcode = mlx5_ib_opcode[wr->opcode];
2196 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2197 mlx5_opcode |
2198 ((u32)opmod << 24));
2199 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2200 ctrl->fm_ce_se |= get_fence(fence, wr);
2201 qp->fm_cache = next_fence;
2202 if (unlikely(qp->wq_sig))
2203 ctrl->signature = wq_sig(ctrl);
2204
2205 qp->sq.wrid[idx] = wr->wr_id;
2206 qp->sq.w_list[idx].opcode = mlx5_opcode;
2207 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2208 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2209 qp->sq.w_list[idx].next = qp->sq.cur_post;
2210
2211 if (0)
2212 dump_wqe(qp, idx, size);
2213 }
2214
2215out:
2216 if (likely(nreq)) {
2217 qp->sq.head += nreq;
2218
2219 /* Make sure that descriptors are written before
2220 * updating doorbell record and ringing the doorbell
2221 */
2222 wmb();
2223
2224 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2225
2226 if (bf->need_lock)
2227 spin_lock(&bf->lock);
2228
2229 /* TBD enable WC */
2230 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2231 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2232 /* wc_wmb(); */
2233 } else {
2234 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2235 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2236 /* Make sure doorbells don't leak out of SQ spinlock
2237 * and reach the HCA out of order.
2238 */
2239 mmiowb();
2240 }
2241 bf->offset ^= bf->buf_size;
2242 if (bf->need_lock)
2243 spin_unlock(&bf->lock);
2244 }
2245
2246 spin_unlock_irqrestore(&qp->sq.lock, flags);
2247
2248 return err;
2249}
2250
2251static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2252{
2253 sig->signature = calc_sig(sig, size);
2254}
2255
2256int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2257 struct ib_recv_wr **bad_wr)
2258{
2259 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2260 struct mlx5_wqe_data_seg *scat;
2261 struct mlx5_rwqe_sig *sig;
2262 unsigned long flags;
2263 int err = 0;
2264 int nreq;
2265 int ind;
2266 int i;
2267
2268 spin_lock_irqsave(&qp->rq.lock, flags);
2269
2270 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2271
2272 for (nreq = 0; wr; nreq++, wr = wr->next) {
2273 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2274 err = -ENOMEM;
2275 *bad_wr = wr;
2276 goto out;
2277 }
2278
2279 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2280 err = -EINVAL;
2281 *bad_wr = wr;
2282 goto out;
2283 }
2284
2285 scat = get_recv_wqe(qp, ind);
2286 if (qp->wq_sig)
2287 scat++;
2288
2289 for (i = 0; i < wr->num_sge; i++)
2290 set_data_ptr_seg(scat + i, wr->sg_list + i);
2291
2292 if (i < qp->rq.max_gs) {
2293 scat[i].byte_count = 0;
2294 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2295 scat[i].addr = 0;
2296 }
2297
2298 if (qp->wq_sig) {
2299 sig = (struct mlx5_rwqe_sig *)scat;
2300 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2301 }
2302
2303 qp->rq.wrid[ind] = wr->wr_id;
2304
2305 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2306 }
2307
2308out:
2309 if (likely(nreq)) {
2310 qp->rq.head += nreq;
2311
2312 /* Make sure that descriptors are written before
2313 * doorbell record.
2314 */
2315 wmb();
2316
2317 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2318 }
2319
2320 spin_unlock_irqrestore(&qp->rq.lock, flags);
2321
2322 return err;
2323}
2324
2325static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2326{
2327 switch (mlx5_state) {
2328 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2329 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2330 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2331 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2332 case MLX5_QP_STATE_SQ_DRAINING:
2333 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2334 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2335 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2336 default: return -1;
2337 }
2338}
2339
2340static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2341{
2342 switch (mlx5_mig_state) {
2343 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2344 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2345 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2346 default: return -1;
2347 }
2348}
2349
2350static int to_ib_qp_access_flags(int mlx5_flags)
2351{
2352 int ib_flags = 0;
2353
2354 if (mlx5_flags & MLX5_QP_BIT_RRE)
2355 ib_flags |= IB_ACCESS_REMOTE_READ;
2356 if (mlx5_flags & MLX5_QP_BIT_RWE)
2357 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2358 if (mlx5_flags & MLX5_QP_BIT_RAE)
2359 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2360
2361 return ib_flags;
2362}
2363
2364static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2365 struct mlx5_qp_path *path)
2366{
2367 struct mlx5_core_dev *dev = &ibdev->mdev;
2368
2369 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2370 ib_ah_attr->port_num = path->port;
2371
2372 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2373 return;
2374
2375 ib_ah_attr->sl = path->sl & 0xf;
2376
2377 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2378 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2379 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2380 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2381 if (ib_ah_attr->ah_flags) {
2382 ib_ah_attr->grh.sgid_index = path->mgid_index;
2383 ib_ah_attr->grh.hop_limit = path->hop_limit;
2384 ib_ah_attr->grh.traffic_class =
2385 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2386 ib_ah_attr->grh.flow_label =
2387 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2388 memcpy(ib_ah_attr->grh.dgid.raw,
2389 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2390 }
2391}
2392
2393int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2394 struct ib_qp_init_attr *qp_init_attr)
2395{
2396 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2397 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2398 struct mlx5_query_qp_mbox_out *outb;
2399 struct mlx5_qp_context *context;
2400 int mlx5_state;
2401 int err = 0;
2402
2403 mutex_lock(&qp->mutex);
2404 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2405 if (!outb) {
2406 err = -ENOMEM;
2407 goto out;
2408 }
2409 context = &outb->ctx;
2410 err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
2411 if (err)
2412 goto out_free;
2413
2414 mlx5_state = be32_to_cpu(context->flags) >> 28;
2415
2416 qp->state = to_ib_qp_state(mlx5_state);
2417 qp_attr->qp_state = qp->state;
2418 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2419 qp_attr->path_mig_state =
2420 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2421 qp_attr->qkey = be32_to_cpu(context->qkey);
2422 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2423 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2424 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2425 qp_attr->qp_access_flags =
2426 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2427
2428 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2429 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2430 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2431 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2432 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2433 }
2434
2435 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2436 qp_attr->port_num = context->pri_path.port;
2437
2438 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2439 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2440
2441 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2442
2443 qp_attr->max_dest_rd_atomic =
2444 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
2445 qp_attr->min_rnr_timer =
2446 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
2447 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
2448 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
2449 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
2450 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
2451 qp_attr->cur_qp_state = qp_attr->qp_state;
2452 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2453 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2454
2455 if (!ibqp->uobject) {
2456 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
2457 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2458 } else {
2459 qp_attr->cap.max_send_wr = 0;
2460 qp_attr->cap.max_send_sge = 0;
2461 }
2462
2463 /* We don't support inline sends for kernel QPs (yet), and we
2464 * don't know what userspace's value should be.
2465 */
2466 qp_attr->cap.max_inline_data = 0;
2467
2468 qp_init_attr->cap = qp_attr->cap;
2469
2470 qp_init_attr->create_flags = 0;
2471 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2472 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2473
2474 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
2475 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2476
2477out_free:
2478 kfree(outb);
2479
2480out:
2481 mutex_unlock(&qp->mutex);
2482 return err;
2483}
2484
2485struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
2486 struct ib_ucontext *context,
2487 struct ib_udata *udata)
2488{
2489 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2490 struct mlx5_ib_xrcd *xrcd;
2491 int err;
2492
2493 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
2494 return ERR_PTR(-ENOSYS);
2495
2496 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
2497 if (!xrcd)
2498 return ERR_PTR(-ENOMEM);
2499
2500 err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
2501 if (err) {
2502 kfree(xrcd);
2503 return ERR_PTR(-ENOMEM);
2504 }
2505
2506 return &xrcd->ibxrcd;
2507}
2508
2509int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
2510{
2511 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
2512 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
2513 int err;
2514
2515 err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
2516 if (err) {
2517 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
2518 return err;
2519 }
2520
2521 kfree(xrcd);
2522
2523 return 0;
2524}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
new file mode 100644
index 000000000000..84d297afd6a9
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/mlx5/qp.h>
35#include <linux/mlx5/srq.h>
36#include <linux/slab.h>
37#include <rdma/ib_umem.h>
38
39#include "mlx5_ib.h"
40#include "user.h"
41
42/* not supported currently */
43static int srq_signature;
44
45static void *get_wqe(struct mlx5_ib_srq *srq, int n)
46{
47 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
48}
49
50static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
51{
52 struct ib_event event;
53 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54
55 if (ibsrq->event_handler) {
56 event.device = ibsrq->device;
57 event.element.srq = ibsrq;
58 switch (type) {
59 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
60 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
61 break;
62 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
63 event.event = IB_EVENT_SRQ_ERR;
64 break;
65 default:
66 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
67 type, srq->srqn);
68 return;
69 }
70
71 ibsrq->event_handler(&event, ibsrq->srq_context);
72 }
73}
74
75static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
76 struct mlx5_create_srq_mbox_in **in,
77 struct ib_udata *udata, int buf_size, int *inlen)
78{
79 struct mlx5_ib_dev *dev = to_mdev(pd->device);
80 struct mlx5_ib_create_srq ucmd;
81 int err;
82 int npages;
83 int page_shift;
84 int ncont;
85 u32 offset;
86
87 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
88 mlx5_ib_dbg(dev, "failed copy udata\n");
89 return -EFAULT;
90 }
91 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
92
93 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
94 0, 0);
95 if (IS_ERR(srq->umem)) {
96 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
97 err = PTR_ERR(srq->umem);
98 return err;
99 }
100
101 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
102 &page_shift, &ncont, NULL);
103 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
104 &offset);
105 if (err) {
106 mlx5_ib_warn(dev, "bad offset\n");
107 goto err_umem;
108 }
109
110 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
111 *in = mlx5_vzalloc(*inlen);
112 if (!(*in)) {
113 err = -ENOMEM;
114 goto err_umem;
115 }
116
117 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
118
119 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
120 ucmd.db_addr, &srq->db);
121 if (err) {
122 mlx5_ib_dbg(dev, "map doorbell failed\n");
123 goto err_in;
124 }
125
126 (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
127 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
128
129 return 0;
130
131err_in:
132 mlx5_vfree(*in);
133
134err_umem:
135 ib_umem_release(srq->umem);
136
137 return err;
138}
139
140static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
141 struct mlx5_create_srq_mbox_in **in, int buf_size,
142 int *inlen)
143{
144 int err;
145 int i;
146 struct mlx5_wqe_srq_next_seg *next;
147 int page_shift;
148 int npages;
149
150 err = mlx5_db_alloc(&dev->mdev, &srq->db);
151 if (err) {
152 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
153 return err;
154 }
155
156 *srq->db.db = 0;
157
158 if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
159 mlx5_ib_dbg(dev, "buf alloc failed\n");
160 err = -ENOMEM;
161 goto err_db;
162 }
163 page_shift = srq->buf.page_shift;
164
165 srq->head = 0;
166 srq->tail = srq->msrq.max - 1;
167 srq->wqe_ctr = 0;
168
169 for (i = 0; i < srq->msrq.max; i++) {
170 next = get_wqe(srq, i);
171 next->next_wqe_index =
172 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
173 }
174
175 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
176 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
177 buf_size, page_shift, srq->buf.npages, npages);
178 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
179 *in = mlx5_vzalloc(*inlen);
180 if (!*in) {
181 err = -ENOMEM;
182 goto err_buf;
183 }
184 mlx5_fill_page_array(&srq->buf, (*in)->pas);
185
186 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
187 if (!srq->wrid) {
188 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
189 (unsigned long)(srq->msrq.max * sizeof(u64)));
190 err = -ENOMEM;
191 goto err_in;
192 }
193 srq->wq_sig = !!srq_signature;
194
195 (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
196
197 return 0;
198
199err_in:
200 mlx5_vfree(*in);
201
202err_buf:
203 mlx5_buf_free(&dev->mdev, &srq->buf);
204
205err_db:
206 mlx5_db_free(&dev->mdev, &srq->db);
207 return err;
208}
209
210static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
211{
212 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
213 ib_umem_release(srq->umem);
214}
215
216
217static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
218{
219 kfree(srq->wrid);
220 mlx5_buf_free(&dev->mdev, &srq->buf);
221 mlx5_db_free(&dev->mdev, &srq->db);
222}
223
224struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
225 struct ib_srq_init_attr *init_attr,
226 struct ib_udata *udata)
227{
228 struct mlx5_ib_dev *dev = to_mdev(pd->device);
229 struct mlx5_ib_srq *srq;
230 int desc_size;
231 int buf_size;
232 int err;
233 struct mlx5_create_srq_mbox_in *uninitialized_var(in);
234 int uninitialized_var(inlen);
235 int is_xrc;
236 u32 flgs, xrcdn;
237
238 /* Sanity check SRQ size before proceeding */
239 if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
240 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
241 init_attr->attr.max_wr,
242 dev->mdev.caps.max_srq_wqes);
243 return ERR_PTR(-EINVAL);
244 }
245
246 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
247 if (!srq)
248 return ERR_PTR(-ENOMEM);
249
250 mutex_init(&srq->mutex);
251 spin_lock_init(&srq->lock);
252 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
253 srq->msrq.max_gs = init_attr->attr.max_sge;
254
255 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
256 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
257 desc_size = roundup_pow_of_two(desc_size);
258 desc_size = max_t(int, 32, desc_size);
259 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
260 sizeof(struct mlx5_wqe_data_seg);
261 srq->msrq.wqe_shift = ilog2(desc_size);
262 buf_size = srq->msrq.max * desc_size;
263 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
264 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
265 srq->msrq.max_avail_gather);
266
267 if (pd->uobject)
268 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
269 else
270 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
271
272 if (err) {
273 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
274 pd->uobject ? "user" : "kernel", err);
275 goto err_srq;
276 }
277
278 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
279 in->ctx.state_log_sz = ilog2(srq->msrq.max);
280 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
281 xrcdn = 0;
282 if (is_xrc) {
283 xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
284 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
285 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
286 xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
287 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
288 }
289
290 in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
291
292 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
293 in->ctx.db_record = cpu_to_be64(srq->db.dma);
294 err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
295 mlx5_vfree(in);
296 if (err) {
297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
298 goto err_srq;
299 }
300
301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
302
303 srq->msrq.event = mlx5_ib_srq_event;
304 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
305
306 if (pd->uobject)
307 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
308 mlx5_ib_dbg(dev, "copy to user failed\n");
309 err = -EFAULT;
310 goto err_core;
311 }
312
313 init_attr->attr.max_wr = srq->msrq.max - 1;
314
315 return &srq->ibsrq;
316
317err_core:
318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
319 if (pd->uobject)
320 destroy_srq_user(pd, srq);
321 else
322 destroy_srq_kernel(dev, srq);
323
324err_srq:
325 kfree(srq);
326
327 return ERR_PTR(err);
328}
329
330int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
331 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
332{
333 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
334 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
335 int ret;
336
337 /* We don't support resizing SRQs yet */
338 if (attr_mask & IB_SRQ_MAX_WR)
339 return -EINVAL;
340
341 if (attr_mask & IB_SRQ_LIMIT) {
342 if (attr->srq_limit >= srq->msrq.max)
343 return -EINVAL;
344
345 mutex_lock(&srq->mutex);
346 ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
347 mutex_unlock(&srq->mutex);
348
349 if (ret)
350 return ret;
351 }
352
353 return 0;
354}
355
356int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
357{
358 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
359 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
360 int ret;
361 struct mlx5_query_srq_mbox_out *out;
362
363 out = kzalloc(sizeof(*out), GFP_KERNEL);
364 if (!out)
365 return -ENOMEM;
366
367 ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
368 if (ret)
369 goto out_box;
370
371 srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
372 srq_attr->max_wr = srq->msrq.max - 1;
373 srq_attr->max_sge = srq->msrq.max_gs;
374
375out_box:
376 kfree(out);
377 return ret;
378}
379
380int mlx5_ib_destroy_srq(struct ib_srq *srq)
381{
382 struct mlx5_ib_dev *dev = to_mdev(srq->device);
383 struct mlx5_ib_srq *msrq = to_msrq(srq);
384
385 mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
386
387 if (srq->uobject) {
388 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
389 ib_umem_release(msrq->umem);
390 } else {
391 kfree(msrq->wrid);
392 mlx5_buf_free(&dev->mdev, &msrq->buf);
393 mlx5_db_free(&dev->mdev, &msrq->db);
394 }
395
396 kfree(srq);
397 return 0;
398}
399
400void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
401{
402 struct mlx5_wqe_srq_next_seg *next;
403
404 /* always called with interrupts disabled. */
405 spin_lock(&srq->lock);
406
407 next = get_wqe(srq, srq->tail);
408 next->next_wqe_index = cpu_to_be16(wqe_index);
409 srq->tail = wqe_index;
410
411 spin_unlock(&srq->lock);
412}
413
414int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
415 struct ib_recv_wr **bad_wr)
416{
417 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
418 struct mlx5_wqe_srq_next_seg *next;
419 struct mlx5_wqe_data_seg *scat;
420 unsigned long flags;
421 int err = 0;
422 int nreq;
423 int i;
424
425 spin_lock_irqsave(&srq->lock, flags);
426
427 for (nreq = 0; wr; nreq++, wr = wr->next) {
428 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
429 err = -EINVAL;
430 *bad_wr = wr;
431 break;
432 }
433
434 if (unlikely(srq->head == srq->tail)) {
435 err = -ENOMEM;
436 *bad_wr = wr;
437 break;
438 }
439
440 srq->wrid[srq->head] = wr->wr_id;
441
442 next = get_wqe(srq, srq->head);
443 srq->head = be16_to_cpu(next->next_wqe_index);
444 scat = (struct mlx5_wqe_data_seg *)(next + 1);
445
446 for (i = 0; i < wr->num_sge; i++) {
447 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
448 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
449 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
450 }
451
452 if (i < srq->msrq.max_avail_gather) {
453 scat[i].byte_count = 0;
454 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
455 scat[i].addr = 0;
456 }
457 }
458
459 if (likely(nreq)) {
460 srq->wqe_ctr += nreq;
461
462 /* Make sure that descriptors are written before
463 * doorbell record.
464 */
465 wmb();
466
467 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
468 }
469
470 spin_unlock_irqrestore(&srq->lock, flags);
471
472 return err;
473}
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
new file mode 100644
index 000000000000..a886de3e593c
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_USER_H
34#define MLX5_IB_USER_H
35
36#include <linux/types.h>
37
38enum {
39 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
40 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
41};
42
43enum {
44 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
45};
46
47
48/* Increment this value if any changes that break userspace ABI
49 * compatibility are made.
50 */
51#define MLX5_IB_UVERBS_ABI_VERSION 1
52
53/* Make sure that all structs defined in this file remain laid out so
54 * that they pack the same way on 32-bit and 64-bit architectures (to
55 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
56 * In particular do not use pointer types -- pass pointers in __u64
57 * instead.
58 */
59
60struct mlx5_ib_alloc_ucontext_req {
61 __u32 total_num_uuars;
62 __u32 num_low_latency_uuars;
63};
64
65struct mlx5_ib_alloc_ucontext_resp {
66 __u32 qp_tab_size;
67 __u32 bf_reg_size;
68 __u32 tot_uuars;
69 __u32 cache_line_size;
70 __u16 max_sq_desc_sz;
71 __u16 max_rq_desc_sz;
72 __u32 max_send_wqebb;
73 __u32 max_recv_wr;
74 __u32 max_srq_recv_wr;
75 __u16 num_ports;
76 __u16 reserved;
77};
78
79struct mlx5_ib_alloc_pd_resp {
80 __u32 pdn;
81};
82
83struct mlx5_ib_create_cq {
84 __u64 buf_addr;
85 __u64 db_addr;
86 __u32 cqe_size;
87};
88
89struct mlx5_ib_create_cq_resp {
90 __u32 cqn;
91 __u32 reserved;
92};
93
94struct mlx5_ib_resize_cq {
95 __u64 buf_addr;
96};
97
98struct mlx5_ib_create_srq {
99 __u64 buf_addr;
100 __u64 db_addr;
101 __u32 flags;
102};
103
104struct mlx5_ib_create_srq_resp {
105 __u32 srqn;
106 __u32 reserved;
107};
108
109struct mlx5_ib_create_qp {
110 __u64 buf_addr;
111 __u64 db_addr;
112 __u32 sq_wqe_count;
113 __u32 rq_wqe_count;
114 __u32 rq_wqe_shift;
115 __u32 flags;
116};
117
118struct mlx5_ib_create_qp_resp {
119 __u32 uuar_index;
120};
121#endif /* MLX5_IB_USER_H */
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index bcdbc14aeff0..8cf7563a8d92 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX
19if NET_VENDOR_MELLANOX 19if NET_VENDOR_MELLANOX
20 20
21source "drivers/net/ethernet/mellanox/mlx4/Kconfig" 21source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
22source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
22 23
23endif # NET_VENDOR_MELLANOX 24endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 37afb9683372..38fe32ef5e5f 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_MLX4_CORE) += mlx4/ 5obj-$(CONFIG_MLX4_CORE) += mlx4/
6obj-$(CONFIG_MLX5_CORE) += mlx5/core/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
new file mode 100644
index 000000000000..21962828925a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -0,0 +1,18 @@
1#
2# Mellanox driver configuration
3#
4
5config MLX5_CORE
6 tristate
7 depends on PCI && X86
8 default n
9
10config MLX5_DEBUG
11 bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
12 depends on MLX5_CORE
13 default y
14 ---help---
15 This option causes debugging code to be compiled into the
16 mlx5_core driver. The output can be turned on via the
17 debug_mask module parameter (which can also be set after
18 the driver is loaded through sysfs).
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
new file mode 100644
index 000000000000..105780bb980b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
2
3mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
5 mad.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
new file mode 100644
index 000000000000..b215742b842f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/export.h>
37#include <linux/bitmap.h>
38#include <linux/dma-mapping.h>
39#include <linux/vmalloc.h>
40#include <linux/mlx5/driver.h>
41
42#include "mlx5_core.h"
43
44/* Handling for queue buffers -- we allocate a bunch of memory and
45 * register it in a memory region at HCA virtual address 0. If the
46 * requested size is > max_direct, we split the allocation into
47 * multiple pages, so we don't require too much contiguous memory.
48 */
49
50int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
51 struct mlx5_buf *buf)
52{
53 dma_addr_t t;
54
55 buf->size = size;
56 if (size <= max_direct) {
57 buf->nbufs = 1;
58 buf->npages = 1;
59 buf->page_shift = get_order(size) + PAGE_SHIFT;
60 buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
61 size, &t, GFP_KERNEL);
62 if (!buf->direct.buf)
63 return -ENOMEM;
64
65 buf->direct.map = t;
66
67 while (t & ((1 << buf->page_shift) - 1)) {
68 --buf->page_shift;
69 buf->npages *= 2;
70 }
71 } else {
72 int i;
73
74 buf->direct.buf = NULL;
75 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
76 buf->npages = buf->nbufs;
77 buf->page_shift = PAGE_SHIFT;
78 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
79 GFP_KERNEL);
80 if (!buf->page_list)
81 return -ENOMEM;
82
83 for (i = 0; i < buf->nbufs; i++) {
84 buf->page_list[i].buf =
85 dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
86 &t, GFP_KERNEL);
87 if (!buf->page_list[i].buf)
88 goto err_free;
89
90 buf->page_list[i].map = t;
91 }
92
93 if (BITS_PER_LONG == 64) {
94 struct page **pages;
95 pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
96 if (!pages)
97 goto err_free;
98 for (i = 0; i < buf->nbufs; i++)
99 pages[i] = virt_to_page(buf->page_list[i].buf);
100 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
101 kfree(pages);
102 if (!buf->direct.buf)
103 goto err_free;
104 }
105 }
106
107 return 0;
108
109err_free:
110 mlx5_buf_free(dev, buf);
111
112 return -ENOMEM;
113}
114EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
115
116void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
117{
118 int i;
119
120 if (buf->nbufs == 1)
121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
122 buf->direct.map);
123 else {
124 if (BITS_PER_LONG == 64 && buf->direct.buf)
125 vunmap(buf->direct.buf);
126
127 for (i = 0; i < buf->nbufs; i++)
128 if (buf->page_list[i].buf)
129 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
130 buf->page_list[i].buf,
131 buf->page_list[i].map);
132 kfree(buf->page_list);
133 }
134}
135EXPORT_SYMBOL_GPL(mlx5_buf_free);
136
137static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
138{
139 struct mlx5_db_pgdir *pgdir;
140
141 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
142 if (!pgdir)
143 return NULL;
144
145 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
146 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
147 &pgdir->db_dma, GFP_KERNEL);
148 if (!pgdir->db_page) {
149 kfree(pgdir);
150 return NULL;
151 }
152
153 return pgdir;
154}
155
156static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
157 struct mlx5_db *db)
158{
159 int offset;
160 int i;
161
162 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
163 if (i >= MLX5_DB_PER_PAGE)
164 return -ENOMEM;
165
166 __clear_bit(i, pgdir->bitmap);
167
168 db->u.pgdir = pgdir;
169 db->index = i;
170 offset = db->index * L1_CACHE_BYTES;
171 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
172 db->dma = pgdir->db_dma + offset;
173
174 return 0;
175}
176
177int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
178{
179 struct mlx5_db_pgdir *pgdir;
180 int ret = 0;
181
182 mutex_lock(&dev->priv.pgdir_mutex);
183
184 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
185 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
186 goto out;
187
188 pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
189 if (!pgdir) {
190 ret = -ENOMEM;
191 goto out;
192 }
193
194 list_add(&pgdir->list, &dev->priv.pgdir_list);
195
196 /* This should never fail -- we just allocated an empty page: */
197 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
198
199out:
200 mutex_unlock(&dev->priv.pgdir_mutex);
201
202 return ret;
203}
204EXPORT_SYMBOL_GPL(mlx5_db_alloc);
205
206void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
207{
208 mutex_lock(&dev->priv.pgdir_mutex);
209
210 __set_bit(db->index, db->u.pgdir->bitmap);
211
212 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
213 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
214 db->u.pgdir->db_page, db->u.pgdir->db_dma);
215 list_del(&db->u.pgdir->list);
216 kfree(db->u.pgdir);
217 }
218
219 mutex_unlock(&dev->priv.pgdir_mutex);
220}
221EXPORT_SYMBOL_GPL(mlx5_db_free);
222
223
224void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
225{
226 u64 addr;
227 int i;
228
229 for (i = 0; i < buf->npages; i++) {
230 if (buf->nbufs == 1)
231 addr = buf->direct.map + (i << buf->page_shift);
232 else
233 addr = buf->page_list[i].map;
234
235 pas[i] = cpu_to_be64(addr);
236 }
237}
238EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
new file mode 100644
index 000000000000..c1c0eef89694
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -0,0 +1,1515 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/random.h>
42#include <linux/io-mapping.h>
43#include <linux/mlx5/driver.h>
44#include <linux/debugfs.h>
45
46#include "mlx5_core.h"
47
48enum {
49 CMD_IF_REV = 3,
50};
51
52enum {
53 CMD_MODE_POLLING,
54 CMD_MODE_EVENTS
55};
56
57enum {
58 NUM_LONG_LISTS = 2,
59 NUM_MED_LISTS = 64,
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
63};
64
65enum {
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
77};
78
79enum {
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
96};
97
98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
101 mlx5_cmd_cbk_t cbk,
102 void *context, int page_queue)
103{
104 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 struct mlx5_cmd_work_ent *ent;
106
107 ent = kzalloc(sizeof(*ent), alloc_flags);
108 if (!ent)
109 return ERR_PTR(-ENOMEM);
110
111 ent->in = in;
112 ent->out = out;
113 ent->callback = cbk;
114 ent->context = context;
115 ent->cmd = cmd;
116 ent->page_queue = page_queue;
117
118 return ent;
119}
120
121static u8 alloc_token(struct mlx5_cmd *cmd)
122{
123 u8 token;
124
125 spin_lock(&cmd->token_lock);
126 token = cmd->token++ % 255 + 1;
127 spin_unlock(&cmd->token_lock);
128
129 return token;
130}
131
132static int alloc_ent(struct mlx5_cmd *cmd)
133{
134 unsigned long flags;
135 int ret;
136
137 spin_lock_irqsave(&cmd->alloc_lock, flags);
138 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
139 if (ret < cmd->max_reg_cmds)
140 clear_bit(ret, &cmd->bitmask);
141 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
142
143 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
144}
145
146static void free_ent(struct mlx5_cmd *cmd, int idx)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&cmd->alloc_lock, flags);
151 set_bit(idx, &cmd->bitmask);
152 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
153}
154
155static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
156{
157 return cmd->cmd_buf + (idx << cmd->log_stride);
158}
159
160static u8 xor8_buf(void *buf, int len)
161{
162 u8 *ptr = buf;
163 u8 sum = 0;
164 int i;
165
166 for (i = 0; i < len; i++)
167 sum ^= ptr[i];
168
169 return sum;
170}
171
172static int verify_block_sig(struct mlx5_cmd_prot_block *block)
173{
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
175 return -EINVAL;
176
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
178 return -EINVAL;
179
180 return 0;
181}
182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
184{
185 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
188}
189
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
191{
192 struct mlx5_cmd_mailbox *next = msg->next;
193
194 while (next) {
195 calc_block_sig(next->buf, token);
196 next = next->next;
197 }
198}
199
200static void set_signature(struct mlx5_cmd_work_ent *ent)
201{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token);
204 calc_chain_sig(ent->out, ent->token);
205}
206
207static void poll_timeout(struct mlx5_cmd_work_ent *ent)
208{
209 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
210 u8 own;
211
212 do {
213 own = ent->lay->status_own;
214 if (!(own & CMD_OWNER_HW)) {
215 ent->ret = 0;
216 return;
217 }
218 usleep_range(5000, 10000);
219 } while (time_before(jiffies, poll_end));
220
221 ent->ret = -ETIMEDOUT;
222}
223
224static void free_cmd(struct mlx5_cmd_work_ent *ent)
225{
226 kfree(ent);
227}
228
229
230static int verify_signature(struct mlx5_cmd_work_ent *ent)
231{
232 struct mlx5_cmd_mailbox *next = ent->out->next;
233 int err;
234 u8 sig;
235
236 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
237 if (sig != 0xff)
238 return -EINVAL;
239
240 while (next) {
241 err = verify_block_sig(next->buf);
242 if (err)
243 return err;
244
245 next = next->next;
246 }
247
248 return 0;
249}
250
251static void dump_buf(void *buf, int size, int data_only, int offset)
252{
253 __be32 *p = buf;
254 int i;
255
256 for (i = 0; i < size; i += 16) {
257 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
258 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
259 be32_to_cpu(p[3]));
260 p += 4;
261 offset += 16;
262 }
263 if (!data_only)
264 pr_debug("\n");
265}
266
267const char *mlx5_command_str(int command)
268{
269 switch (command) {
270 case MLX5_CMD_OP_QUERY_HCA_CAP:
271 return "QUERY_HCA_CAP";
272
273 case MLX5_CMD_OP_SET_HCA_CAP:
274 return "SET_HCA_CAP";
275
276 case MLX5_CMD_OP_QUERY_ADAPTER:
277 return "QUERY_ADAPTER";
278
279 case MLX5_CMD_OP_INIT_HCA:
280 return "INIT_HCA";
281
282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA";
284
285 case MLX5_CMD_OP_QUERY_PAGES:
286 return "QUERY_PAGES";
287
288 case MLX5_CMD_OP_MANAGE_PAGES:
289 return "MANAGE_PAGES";
290
291 case MLX5_CMD_OP_CREATE_MKEY:
292 return "CREATE_MKEY";
293
294 case MLX5_CMD_OP_QUERY_MKEY:
295 return "QUERY_MKEY";
296
297 case MLX5_CMD_OP_DESTROY_MKEY:
298 return "DESTROY_MKEY";
299
300 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
301 return "QUERY_SPECIAL_CONTEXTS";
302
303 case MLX5_CMD_OP_CREATE_EQ:
304 return "CREATE_EQ";
305
306 case MLX5_CMD_OP_DESTROY_EQ:
307 return "DESTROY_EQ";
308
309 case MLX5_CMD_OP_QUERY_EQ:
310 return "QUERY_EQ";
311
312 case MLX5_CMD_OP_CREATE_CQ:
313 return "CREATE_CQ";
314
315 case MLX5_CMD_OP_DESTROY_CQ:
316 return "DESTROY_CQ";
317
318 case MLX5_CMD_OP_QUERY_CQ:
319 return "QUERY_CQ";
320
321 case MLX5_CMD_OP_MODIFY_CQ:
322 return "MODIFY_CQ";
323
324 case MLX5_CMD_OP_CREATE_QP:
325 return "CREATE_QP";
326
327 case MLX5_CMD_OP_DESTROY_QP:
328 return "DESTROY_QP";
329
330 case MLX5_CMD_OP_RST2INIT_QP:
331 return "RST2INIT_QP";
332
333 case MLX5_CMD_OP_INIT2RTR_QP:
334 return "INIT2RTR_QP";
335
336 case MLX5_CMD_OP_RTR2RTS_QP:
337 return "RTR2RTS_QP";
338
339 case MLX5_CMD_OP_RTS2RTS_QP:
340 return "RTS2RTS_QP";
341
342 case MLX5_CMD_OP_SQERR2RTS_QP:
343 return "SQERR2RTS_QP";
344
345 case MLX5_CMD_OP_2ERR_QP:
346 return "2ERR_QP";
347
348 case MLX5_CMD_OP_RTS2SQD_QP:
349 return "RTS2SQD_QP";
350
351 case MLX5_CMD_OP_SQD2RTS_QP:
352 return "SQD2RTS_QP";
353
354 case MLX5_CMD_OP_2RST_QP:
355 return "2RST_QP";
356
357 case MLX5_CMD_OP_QUERY_QP:
358 return "QUERY_QP";
359
360 case MLX5_CMD_OP_CONF_SQP:
361 return "CONF_SQP";
362
363 case MLX5_CMD_OP_MAD_IFC:
364 return "MAD_IFC";
365
366 case MLX5_CMD_OP_INIT2INIT_QP:
367 return "INIT2INIT_QP";
368
369 case MLX5_CMD_OP_SUSPEND_QP:
370 return "SUSPEND_QP";
371
372 case MLX5_CMD_OP_UNSUSPEND_QP:
373 return "UNSUSPEND_QP";
374
375 case MLX5_CMD_OP_SQD2SQD_QP:
376 return "SQD2SQD_QP";
377
378 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
379 return "ALLOC_QP_COUNTER_SET";
380
381 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
382 return "DEALLOC_QP_COUNTER_SET";
383
384 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
385 return "QUERY_QP_COUNTER_SET";
386
387 case MLX5_CMD_OP_CREATE_PSV:
388 return "CREATE_PSV";
389
390 case MLX5_CMD_OP_DESTROY_PSV:
391 return "DESTROY_PSV";
392
393 case MLX5_CMD_OP_QUERY_PSV:
394 return "QUERY_PSV";
395
396 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
397 return "QUERY_SIG_RULE_TABLE";
398
399 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
400 return "QUERY_BLOCK_SIZE_TABLE";
401
402 case MLX5_CMD_OP_CREATE_SRQ:
403 return "CREATE_SRQ";
404
405 case MLX5_CMD_OP_DESTROY_SRQ:
406 return "DESTROY_SRQ";
407
408 case MLX5_CMD_OP_QUERY_SRQ:
409 return "QUERY_SRQ";
410
411 case MLX5_CMD_OP_ARM_RQ:
412 return "ARM_RQ";
413
414 case MLX5_CMD_OP_RESIZE_SRQ:
415 return "RESIZE_SRQ";
416
417 case MLX5_CMD_OP_ALLOC_PD:
418 return "ALLOC_PD";
419
420 case MLX5_CMD_OP_DEALLOC_PD:
421 return "DEALLOC_PD";
422
423 case MLX5_CMD_OP_ALLOC_UAR:
424 return "ALLOC_UAR";
425
426 case MLX5_CMD_OP_DEALLOC_UAR:
427 return "DEALLOC_UAR";
428
429 case MLX5_CMD_OP_ATTACH_TO_MCG:
430 return "ATTACH_TO_MCG";
431
432 case MLX5_CMD_OP_DETACH_FROM_MCG:
433 return "DETACH_FROM_MCG";
434
435 case MLX5_CMD_OP_ALLOC_XRCD:
436 return "ALLOC_XRCD";
437
438 case MLX5_CMD_OP_DEALLOC_XRCD:
439 return "DEALLOC_XRCD";
440
441 case MLX5_CMD_OP_ACCESS_REG:
442 return "MLX5_CMD_OP_ACCESS_REG";
443
444 default: return "unknown command opcode";
445 }
446}
447
448static void dump_command(struct mlx5_core_dev *dev,
449 struct mlx5_cmd_work_ent *ent, int input)
450{
451 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
452 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
453 struct mlx5_cmd_mailbox *next = msg->next;
454 int data_only;
455 int offset = 0;
456 int dump_len;
457
458 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
459
460 if (data_only)
461 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
462 "dump command data %s(0x%x) %s\n",
463 mlx5_command_str(op), op,
464 input ? "INPUT" : "OUTPUT");
465 else
466 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
467 mlx5_command_str(op), op,
468 input ? "INPUT" : "OUTPUT");
469
470 if (data_only) {
471 if (input) {
472 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
473 offset += sizeof(ent->lay->in);
474 } else {
475 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
476 offset += sizeof(ent->lay->out);
477 }
478 } else {
479 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
480 offset += sizeof(*ent->lay);
481 }
482
483 while (next && offset < msg->len) {
484 if (data_only) {
485 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
486 dump_buf(next->buf, dump_len, 1, offset);
487 offset += MLX5_CMD_DATA_BLOCK_SIZE;
488 } else {
489 mlx5_core_dbg(dev, "command block:\n");
490 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
491 offset += sizeof(struct mlx5_cmd_prot_block);
492 }
493 next = next->next;
494 }
495
496 if (data_only)
497 pr_debug("\n");
498}
499
500static void cmd_work_handler(struct work_struct *work)
501{
502 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
503 struct mlx5_cmd *cmd = ent->cmd;
504 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
505 struct mlx5_cmd_layout *lay;
506 struct semaphore *sem;
507
508 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
509 down(sem);
510 if (!ent->page_queue) {
511 ent->idx = alloc_ent(cmd);
512 if (ent->idx < 0) {
513 mlx5_core_err(dev, "failed to allocate command entry\n");
514 up(sem);
515 return;
516 }
517 } else {
518 ent->idx = cmd->max_reg_cmds;
519 }
520
521 ent->token = alloc_token(cmd);
522 cmd->ent_arr[ent->idx] = ent;
523 lay = get_inst(cmd, ent->idx);
524 ent->lay = lay;
525 memset(lay, 0, sizeof(*lay));
526 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
527 if (ent->in->next)
528 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
529 lay->inlen = cpu_to_be32(ent->in->len);
530 if (ent->out->next)
531 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
532 lay->outlen = cpu_to_be32(ent->out->len);
533 lay->type = MLX5_PCI_CMD_XPORT;
534 lay->token = ent->token;
535 lay->status_own = CMD_OWNER_HW;
536 if (!cmd->checksum_disabled)
537 set_signature(ent);
538 dump_command(dev, ent, 1);
539 ktime_get_ts(&ent->ts1);
540
541 /* ring doorbell after the descriptor is valid */
542 wmb();
543 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
544 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
545 mmiowb();
546 if (cmd->mode == CMD_MODE_POLLING) {
547 poll_timeout(ent);
548 /* make sure we read the descriptor after ownership is SW */
549 rmb();
550 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
551 }
552}
553
554static const char *deliv_status_to_str(u8 status)
555{
556 switch (status) {
557 case MLX5_CMD_DELIVERY_STAT_OK:
558 return "no errors";
559 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
560 return "signature error";
561 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
562 return "token error";
563 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
564 return "bad block number";
565 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
566 return "output pointer not aligned to block size";
567 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
568 return "input pointer not aligned to block size";
569 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
570 return "firmware internal error";
571 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
572 return "command input length error";
573 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
574 return "command ouput length error";
575 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
576 return "reserved fields not cleared";
577 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
578 return "bad command descriptor type";
579 default:
580 return "unknown status code";
581 }
582}
583
584static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
585{
586 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
587
588 return be16_to_cpu(hdr->opcode);
589}
590
591static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
592{
593 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
594 struct mlx5_cmd *cmd = &dev->cmd;
595 int err;
596
597 if (cmd->mode == CMD_MODE_POLLING) {
598 wait_for_completion(&ent->done);
599 err = ent->ret;
600 } else {
601 if (!wait_for_completion_timeout(&ent->done, timeout))
602 err = -ETIMEDOUT;
603 else
604 err = 0;
605 }
606 if (err == -ETIMEDOUT) {
607 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
608 mlx5_command_str(msg_to_opcode(ent->in)),
609 msg_to_opcode(ent->in));
610 }
611 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
612 deliv_status_to_str(ent->status), ent->status);
613
614 return err;
615}
616
617/* Notes:
618 * 1. Callback functions may not sleep
619 * 2. page queue commands do not support asynchrous completion
620 */
621static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
622 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
623 void *context, int page_queue, u8 *status)
624{
625 struct mlx5_cmd *cmd = &dev->cmd;
626 struct mlx5_cmd_work_ent *ent;
627 ktime_t t1, t2, delta;
628 struct mlx5_cmd_stats *stats;
629 int err = 0;
630 s64 ds;
631 u16 op;
632
633 if (callback && page_queue)
634 return -EINVAL;
635
636 ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
637 if (IS_ERR(ent))
638 return PTR_ERR(ent);
639
640 if (!callback)
641 init_completion(&ent->done);
642
643 INIT_WORK(&ent->work, cmd_work_handler);
644 if (page_queue) {
645 cmd_work_handler(&ent->work);
646 } else if (!queue_work(cmd->wq, &ent->work)) {
647 mlx5_core_warn(dev, "failed to queue work\n");
648 err = -ENOMEM;
649 goto out_free;
650 }
651
652 if (!callback) {
653 err = wait_func(dev, ent);
654 if (err == -ETIMEDOUT)
655 goto out;
656
657 t1 = timespec_to_ktime(ent->ts1);
658 t2 = timespec_to_ktime(ent->ts2);
659 delta = ktime_sub(t2, t1);
660 ds = ktime_to_ns(delta);
661 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
662 if (op < ARRAY_SIZE(cmd->stats)) {
663 stats = &cmd->stats[op];
664 spin_lock(&stats->lock);
665 stats->sum += ds;
666 ++stats->n;
667 spin_unlock(&stats->lock);
668 }
669 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
670 "fw exec time for %s is %lld nsec\n",
671 mlx5_command_str(op), ds);
672 *status = ent->status;
673 free_cmd(ent);
674 }
675
676 return err;
677
678out_free:
679 free_cmd(ent);
680out:
681 return err;
682}
683
684static ssize_t dbg_write(struct file *filp, const char __user *buf,
685 size_t count, loff_t *pos)
686{
687 struct mlx5_core_dev *dev = filp->private_data;
688 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
689 char lbuf[3];
690 int err;
691
692 if (!dbg->in_msg || !dbg->out_msg)
693 return -ENOMEM;
694
695 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
696 return -EPERM;
697
698 lbuf[sizeof(lbuf) - 1] = 0;
699
700 if (strcmp(lbuf, "go"))
701 return -EINVAL;
702
703 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
704
705 return err ? err : count;
706}
707
708
709static const struct file_operations fops = {
710 .owner = THIS_MODULE,
711 .open = simple_open,
712 .write = dbg_write,
713};
714
715static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
716{
717 struct mlx5_cmd_prot_block *block;
718 struct mlx5_cmd_mailbox *next;
719 int copy;
720
721 if (!to || !from)
722 return -ENOMEM;
723
724 copy = min_t(int, size, sizeof(to->first.data));
725 memcpy(to->first.data, from, copy);
726 size -= copy;
727 from += copy;
728
729 next = to->next;
730 while (size) {
731 if (!next) {
732 /* this is a BUG */
733 return -ENOMEM;
734 }
735
736 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
737 block = next->buf;
738 memcpy(block->data, from, copy);
739 from += copy;
740 size -= copy;
741 next = next->next;
742 }
743
744 return 0;
745}
746
747static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
748{
749 struct mlx5_cmd_prot_block *block;
750 struct mlx5_cmd_mailbox *next;
751 int copy;
752
753 if (!to || !from)
754 return -ENOMEM;
755
756 copy = min_t(int, size, sizeof(from->first.data));
757 memcpy(to, from->first.data, copy);
758 size -= copy;
759 to += copy;
760
761 next = from->next;
762 while (size) {
763 if (!next) {
764 /* this is a BUG */
765 return -ENOMEM;
766 }
767
768 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
769 block = next->buf;
770 if (xor8_buf(block, sizeof(*block)) != 0xff)
771 return -EINVAL;
772
773 memcpy(to, block->data, copy);
774 to += copy;
775 size -= copy;
776 next = next->next;
777 }
778
779 return 0;
780}
781
782static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
783 gfp_t flags)
784{
785 struct mlx5_cmd_mailbox *mailbox;
786
787 mailbox = kmalloc(sizeof(*mailbox), flags);
788 if (!mailbox)
789 return ERR_PTR(-ENOMEM);
790
791 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
792 &mailbox->dma);
793 if (!mailbox->buf) {
794 mlx5_core_dbg(dev, "failed allocation\n");
795 kfree(mailbox);
796 return ERR_PTR(-ENOMEM);
797 }
798 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
799 mailbox->next = NULL;
800
801 return mailbox;
802}
803
804static void free_cmd_box(struct mlx5_core_dev *dev,
805 struct mlx5_cmd_mailbox *mailbox)
806{
807 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
808 kfree(mailbox);
809}
810
811static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
812 gfp_t flags, int size)
813{
814 struct mlx5_cmd_mailbox *tmp, *head = NULL;
815 struct mlx5_cmd_prot_block *block;
816 struct mlx5_cmd_msg *msg;
817 int blen;
818 int err;
819 int n;
820 int i;
821
822 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
823 if (!msg)
824 return ERR_PTR(-ENOMEM);
825
826 blen = size - min_t(int, sizeof(msg->first.data), size);
827 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
828
829 for (i = 0; i < n; i++) {
830 tmp = alloc_cmd_box(dev, flags);
831 if (IS_ERR(tmp)) {
832 mlx5_core_warn(dev, "failed allocating block\n");
833 err = PTR_ERR(tmp);
834 goto err_alloc;
835 }
836
837 block = tmp->buf;
838 tmp->next = head;
839 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
840 block->block_num = cpu_to_be32(n - i - 1);
841 head = tmp;
842 }
843 msg->next = head;
844 msg->len = size;
845 return msg;
846
847err_alloc:
848 while (head) {
849 tmp = head->next;
850 free_cmd_box(dev, head);
851 head = tmp;
852 }
853 kfree(msg);
854
855 return ERR_PTR(err);
856}
857
858static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
859 struct mlx5_cmd_msg *msg)
860{
861 struct mlx5_cmd_mailbox *head = msg->next;
862 struct mlx5_cmd_mailbox *next;
863
864 while (head) {
865 next = head->next;
866 free_cmd_box(dev, head);
867 head = next;
868 }
869 kfree(msg);
870}
871
872static ssize_t data_write(struct file *filp, const char __user *buf,
873 size_t count, loff_t *pos)
874{
875 struct mlx5_core_dev *dev = filp->private_data;
876 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
877 void *ptr;
878 int err;
879
880 if (*pos != 0)
881 return -EINVAL;
882
883 kfree(dbg->in_msg);
884 dbg->in_msg = NULL;
885 dbg->inlen = 0;
886
887 ptr = kzalloc(count, GFP_KERNEL);
888 if (!ptr)
889 return -ENOMEM;
890
891 if (copy_from_user(ptr, buf, count)) {
892 err = -EPERM;
893 goto out;
894 }
895 dbg->in_msg = ptr;
896 dbg->inlen = count;
897
898 *pos = count;
899
900 return count;
901
902out:
903 kfree(ptr);
904 return err;
905}
906
907static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
908 loff_t *pos)
909{
910 struct mlx5_core_dev *dev = filp->private_data;
911 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
912 int copy;
913
914 if (*pos)
915 return 0;
916
917 if (!dbg->out_msg)
918 return -ENOMEM;
919
920 copy = min_t(int, count, dbg->outlen);
921 if (copy_to_user(buf, dbg->out_msg, copy))
922 return -EPERM;
923
924 *pos += copy;
925
926 return copy;
927}
928
929static const struct file_operations dfops = {
930 .owner = THIS_MODULE,
931 .open = simple_open,
932 .write = data_write,
933 .read = data_read,
934};
935
936static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
937 loff_t *pos)
938{
939 struct mlx5_core_dev *dev = filp->private_data;
940 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
941 char outlen[8];
942 int err;
943
944 if (*pos)
945 return 0;
946
947 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
948 if (err < 0)
949 return err;
950
951 if (copy_to_user(buf, &outlen, err))
952 return -EPERM;
953
954 *pos += err;
955
956 return err;
957}
958
959static ssize_t outlen_write(struct file *filp, const char __user *buf,
960 size_t count, loff_t *pos)
961{
962 struct mlx5_core_dev *dev = filp->private_data;
963 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
964 char outlen_str[8];
965 int outlen;
966 void *ptr;
967 int err;
968
969 if (*pos != 0 || count > 6)
970 return -EINVAL;
971
972 kfree(dbg->out_msg);
973 dbg->out_msg = NULL;
974 dbg->outlen = 0;
975
976 if (copy_from_user(outlen_str, buf, count))
977 return -EPERM;
978
979 outlen_str[7] = 0;
980
981 err = sscanf(outlen_str, "%d", &outlen);
982 if (err < 0)
983 return err;
984
985 ptr = kzalloc(outlen, GFP_KERNEL);
986 if (!ptr)
987 return -ENOMEM;
988
989 dbg->out_msg = ptr;
990 dbg->outlen = outlen;
991
992 *pos = count;
993
994 return count;
995}
996
997static const struct file_operations olfops = {
998 .owner = THIS_MODULE,
999 .open = simple_open,
1000 .write = outlen_write,
1001 .read = outlen_read,
1002};
1003
1004static void set_wqname(struct mlx5_core_dev *dev)
1005{
1006 struct mlx5_cmd *cmd = &dev->cmd;
1007
1008 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1009 dev_name(&dev->pdev->dev));
1010}
1011
1012static void clean_debug_files(struct mlx5_core_dev *dev)
1013{
1014 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1015
1016 if (!mlx5_debugfs_root)
1017 return;
1018
1019 mlx5_cmdif_debugfs_cleanup(dev);
1020 debugfs_remove_recursive(dbg->dbg_root);
1021}
1022
1023static int create_debugfs_files(struct mlx5_core_dev *dev)
1024{
1025 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1026 int err = -ENOMEM;
1027
1028 if (!mlx5_debugfs_root)
1029 return 0;
1030
1031 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1032 if (!dbg->dbg_root)
1033 return err;
1034
1035 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1036 dev, &dfops);
1037 if (!dbg->dbg_in)
1038 goto err_dbg;
1039
1040 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1041 dev, &dfops);
1042 if (!dbg->dbg_out)
1043 goto err_dbg;
1044
1045 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1046 dev, &olfops);
1047 if (!dbg->dbg_outlen)
1048 goto err_dbg;
1049
1050 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1051 &dbg->status);
1052 if (!dbg->dbg_status)
1053 goto err_dbg;
1054
1055 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1056 if (!dbg->dbg_run)
1057 goto err_dbg;
1058
1059 mlx5_cmdif_debugfs_init(dev);
1060
1061 return 0;
1062
1063err_dbg:
1064 clean_debug_files(dev);
1065 return err;
1066}
1067
1068void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1069{
1070 struct mlx5_cmd *cmd = &dev->cmd;
1071 int i;
1072
1073 for (i = 0; i < cmd->max_reg_cmds; i++)
1074 down(&cmd->sem);
1075
1076 down(&cmd->pages_sem);
1077
1078 flush_workqueue(cmd->wq);
1079
1080 cmd->mode = CMD_MODE_EVENTS;
1081
1082 up(&cmd->pages_sem);
1083 for (i = 0; i < cmd->max_reg_cmds; i++)
1084 up(&cmd->sem);
1085}
1086
1087void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1088{
1089 struct mlx5_cmd *cmd = &dev->cmd;
1090 int i;
1091
1092 for (i = 0; i < cmd->max_reg_cmds; i++)
1093 down(&cmd->sem);
1094
1095 down(&cmd->pages_sem);
1096
1097 flush_workqueue(cmd->wq);
1098 cmd->mode = CMD_MODE_POLLING;
1099
1100 up(&cmd->pages_sem);
1101 for (i = 0; i < cmd->max_reg_cmds; i++)
1102 up(&cmd->sem);
1103}
1104
1105void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1106{
1107 struct mlx5_cmd *cmd = &dev->cmd;
1108 struct mlx5_cmd_work_ent *ent;
1109 mlx5_cmd_cbk_t callback;
1110 void *context;
1111 int err;
1112 int i;
1113
1114 for (i = 0; i < (1 << cmd->log_sz); i++) {
1115 if (test_bit(i, &vector)) {
1116 ent = cmd->ent_arr[i];
1117 ktime_get_ts(&ent->ts2);
1118 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1119 dump_command(dev, ent, 0);
1120 if (!ent->ret) {
1121 if (!cmd->checksum_disabled)
1122 ent->ret = verify_signature(ent);
1123 else
1124 ent->ret = 0;
1125 ent->status = ent->lay->status_own >> 1;
1126 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1127 ent->ret, deliv_status_to_str(ent->status), ent->status);
1128 }
1129 free_ent(cmd, ent->idx);
1130 if (ent->callback) {
1131 callback = ent->callback;
1132 context = ent->context;
1133 err = ent->ret;
1134 free_cmd(ent);
1135 callback(err, context);
1136 } else {
1137 complete(&ent->done);
1138 }
1139 if (ent->page_queue)
1140 up(&cmd->pages_sem);
1141 else
1142 up(&cmd->sem);
1143 }
1144 }
1145}
1146EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1147
1148static int status_to_err(u8 status)
1149{
1150 return status ? -1 : 0; /* TBD more meaningful codes */
1151}
1152
1153static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1154{
1155 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1156 struct mlx5_cmd *cmd = &dev->cmd;
1157 struct cache_ent *ent = NULL;
1158
1159 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1160 ent = &cmd->cache.large;
1161 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1162 ent = &cmd->cache.med;
1163
1164 if (ent) {
1165 spin_lock(&ent->lock);
1166 if (!list_empty(&ent->head)) {
1167 msg = list_entry(ent->head.next, typeof(*msg), list);
1168 /* For cached lists, we must explicitly state what is
1169 * the real size
1170 */
1171 msg->len = in_size;
1172 list_del(&msg->list);
1173 }
1174 spin_unlock(&ent->lock);
1175 }
1176
1177 if (IS_ERR(msg))
1178 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
1179
1180 return msg;
1181}
1182
1183static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1184{
1185 if (msg->cache) {
1186 spin_lock(&msg->cache->lock);
1187 list_add_tail(&msg->list, &msg->cache->head);
1188 spin_unlock(&msg->cache->lock);
1189 } else {
1190 mlx5_free_cmd_msg(dev, msg);
1191 }
1192}
1193
1194static int is_manage_pages(struct mlx5_inbox_hdr *in)
1195{
1196 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1197}
1198
1199int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1200 int out_size)
1201{
1202 struct mlx5_cmd_msg *inb;
1203 struct mlx5_cmd_msg *outb;
1204 int pages_queue;
1205 int err;
1206 u8 status = 0;
1207
1208 pages_queue = is_manage_pages(in);
1209
1210 inb = alloc_msg(dev, in_size);
1211 if (IS_ERR(inb)) {
1212 err = PTR_ERR(inb);
1213 return err;
1214 }
1215
1216 err = mlx5_copy_to_msg(inb, in, in_size);
1217 if (err) {
1218 mlx5_core_warn(dev, "err %d\n", err);
1219 goto out_in;
1220 }
1221
1222 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
1223 if (IS_ERR(outb)) {
1224 err = PTR_ERR(outb);
1225 goto out_in;
1226 }
1227
1228 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
1229 if (err)
1230 goto out_out;
1231
1232 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1233 if (status) {
1234 err = status_to_err(status);
1235 goto out_out;
1236 }
1237
1238 err = mlx5_copy_from_msg(out, outb, out_size);
1239
1240out_out:
1241 mlx5_free_cmd_msg(dev, outb);
1242
1243out_in:
1244 free_msg(dev, inb);
1245 return err;
1246}
1247EXPORT_SYMBOL(mlx5_cmd_exec);
1248
1249static void destroy_msg_cache(struct mlx5_core_dev *dev)
1250{
1251 struct mlx5_cmd *cmd = &dev->cmd;
1252 struct mlx5_cmd_msg *msg;
1253 struct mlx5_cmd_msg *n;
1254
1255 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1256 list_del(&msg->list);
1257 mlx5_free_cmd_msg(dev, msg);
1258 }
1259
1260 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1261 list_del(&msg->list);
1262 mlx5_free_cmd_msg(dev, msg);
1263 }
1264}
1265
1266static int create_msg_cache(struct mlx5_core_dev *dev)
1267{
1268 struct mlx5_cmd *cmd = &dev->cmd;
1269 struct mlx5_cmd_msg *msg;
1270 int err;
1271 int i;
1272
1273 spin_lock_init(&cmd->cache.large.lock);
1274 INIT_LIST_HEAD(&cmd->cache.large.head);
1275 spin_lock_init(&cmd->cache.med.lock);
1276 INIT_LIST_HEAD(&cmd->cache.med.head);
1277
1278 for (i = 0; i < NUM_LONG_LISTS; i++) {
1279 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1280 if (IS_ERR(msg)) {
1281 err = PTR_ERR(msg);
1282 goto ex_err;
1283 }
1284 msg->cache = &cmd->cache.large;
1285 list_add_tail(&msg->list, &cmd->cache.large.head);
1286 }
1287
1288 for (i = 0; i < NUM_MED_LISTS; i++) {
1289 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1290 if (IS_ERR(msg)) {
1291 err = PTR_ERR(msg);
1292 goto ex_err;
1293 }
1294 msg->cache = &cmd->cache.med;
1295 list_add_tail(&msg->list, &cmd->cache.med.head);
1296 }
1297
1298 return 0;
1299
1300ex_err:
1301 destroy_msg_cache(dev);
1302 return err;
1303}
1304
1305int mlx5_cmd_init(struct mlx5_core_dev *dev)
1306{
1307 int size = sizeof(struct mlx5_cmd_prot_block);
1308 int align = roundup_pow_of_two(size);
1309 struct mlx5_cmd *cmd = &dev->cmd;
1310 u32 cmd_h, cmd_l;
1311 u16 cmd_if_rev;
1312 int err;
1313 int i;
1314
1315 cmd_if_rev = cmdif_rev(dev);
1316 if (cmd_if_rev != CMD_IF_REV) {
1317 dev_err(&dev->pdev->dev,
1318 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1319 CMD_IF_REV, cmd_if_rev);
1320 return -EINVAL;
1321 }
1322
1323 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1324 if (!cmd->pool)
1325 return -ENOMEM;
1326
1327 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1328 if (!cmd->cmd_buf) {
1329 err = -ENOMEM;
1330 goto err_free_pool;
1331 }
1332 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1333 DMA_BIDIRECTIONAL);
1334 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1335 err = -ENOMEM;
1336 goto err_free;
1337 }
1338
1339 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1340 cmd->log_sz = cmd_l >> 4 & 0xf;
1341 cmd->log_stride = cmd_l & 0xf;
1342 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1343 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1344 1 << cmd->log_sz);
1345 err = -EINVAL;
1346 goto err_map;
1347 }
1348
1349 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1350 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1351 err = -EINVAL;
1352 goto err_map;
1353 }
1354
1355 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1356 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1357
1358 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1359 if (cmd->cmdif_rev > CMD_IF_REV) {
1360 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1361 CMD_IF_REV, cmd->cmdif_rev);
1362 err = -ENOTSUPP;
1363 goto err_map;
1364 }
1365
1366 spin_lock_init(&cmd->alloc_lock);
1367 spin_lock_init(&cmd->token_lock);
1368 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1369 spin_lock_init(&cmd->stats[i].lock);
1370
1371 sema_init(&cmd->sem, cmd->max_reg_cmds);
1372 sema_init(&cmd->pages_sem, 1);
1373
1374 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1375 cmd_l = (u32)(cmd->dma);
1376 if (cmd_l & 0xfff) {
1377 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1378 err = -ENOMEM;
1379 goto err_map;
1380 }
1381
1382 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1383 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1384
1385 /* Make sure firmware sees the complete address before we proceed */
1386 wmb();
1387
1388 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1389
1390 cmd->mode = CMD_MODE_POLLING;
1391
1392 err = create_msg_cache(dev);
1393 if (err) {
1394 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1395 goto err_map;
1396 }
1397
1398 set_wqname(dev);
1399 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1400 if (!cmd->wq) {
1401 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1402 err = -ENOMEM;
1403 goto err_cache;
1404 }
1405
1406 err = create_debugfs_files(dev);
1407 if (err) {
1408 err = -ENOMEM;
1409 goto err_wq;
1410 }
1411
1412 return 0;
1413
1414err_wq:
1415 destroy_workqueue(cmd->wq);
1416
1417err_cache:
1418 destroy_msg_cache(dev);
1419
1420err_map:
1421 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1422 DMA_BIDIRECTIONAL);
1423err_free:
1424 free_pages((unsigned long)cmd->cmd_buf, 0);
1425
1426err_free_pool:
1427 pci_pool_destroy(cmd->pool);
1428
1429 return err;
1430}
1431EXPORT_SYMBOL(mlx5_cmd_init);
1432
1433void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1434{
1435 struct mlx5_cmd *cmd = &dev->cmd;
1436
1437 clean_debug_files(dev);
1438 destroy_workqueue(cmd->wq);
1439 destroy_msg_cache(dev);
1440 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1441 DMA_BIDIRECTIONAL);
1442 free_pages((unsigned long)cmd->cmd_buf, 0);
1443 pci_pool_destroy(cmd->pool);
1444}
1445EXPORT_SYMBOL(mlx5_cmd_cleanup);
1446
1447static const char *cmd_status_str(u8 status)
1448{
1449 switch (status) {
1450 case MLX5_CMD_STAT_OK:
1451 return "OK";
1452 case MLX5_CMD_STAT_INT_ERR:
1453 return "internal error";
1454 case MLX5_CMD_STAT_BAD_OP_ERR:
1455 return "bad operation";
1456 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1457 return "bad parameter";
1458 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1459 return "bad system state";
1460 case MLX5_CMD_STAT_BAD_RES_ERR:
1461 return "bad resource";
1462 case MLX5_CMD_STAT_RES_BUSY:
1463 return "resource busy";
1464 case MLX5_CMD_STAT_LIM_ERR:
1465 return "limits exceeded";
1466 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1467 return "bad resource state";
1468 case MLX5_CMD_STAT_IX_ERR:
1469 return "bad index";
1470 case MLX5_CMD_STAT_NO_RES_ERR:
1471 return "no resources";
1472 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1473 return "bad input length";
1474 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1475 return "bad output length";
1476 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1477 return "bad QP state";
1478 case MLX5_CMD_STAT_BAD_PKT_ERR:
1479 return "bad packet (discarded)";
1480 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1481 return "bad size too many outstanding CQEs";
1482 default:
1483 return "unknown status";
1484 }
1485}
1486
1487int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1488{
1489 if (!hdr->status)
1490 return 0;
1491
1492 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1493 cmd_status_str(hdr->status), hdr->status,
1494 be32_to_cpu(hdr->syndrome));
1495
1496 switch (hdr->status) {
1497 case MLX5_CMD_STAT_OK: return 0;
1498 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1499 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1500 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1501 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1502 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1503 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1504 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
1505 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1506 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1507 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1508 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1509 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1510 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1511 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1513 default: return -EIO;
1514 }
1515}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
new file mode 100644
index 000000000000..c2d660be6f76
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -0,0 +1,224 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/hardirq.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include <rdma/ib_verbs.h>
39#include <linux/mlx5/cq.h>
40#include "mlx5_core.h"
41
42void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
43{
44 struct mlx5_core_cq *cq;
45 struct mlx5_cq_table *table = &dev->priv.cq_table;
46
47 spin_lock(&table->lock);
48 cq = radix_tree_lookup(&table->tree, cqn);
49 if (likely(cq))
50 atomic_inc(&cq->refcount);
51 spin_unlock(&table->lock);
52
53 if (!cq) {
54 mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
55 return;
56 }
57
58 ++cq->arm_sn;
59
60 cq->comp(cq);
61
62 if (atomic_dec_and_test(&cq->refcount))
63 complete(&cq->free);
64}
65
66void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
67{
68 struct mlx5_cq_table *table = &dev->priv.cq_table;
69 struct mlx5_core_cq *cq;
70
71 spin_lock(&table->lock);
72
73 cq = radix_tree_lookup(&table->tree, cqn);
74 if (cq)
75 atomic_inc(&cq->refcount);
76
77 spin_unlock(&table->lock);
78
79 if (!cq) {
80 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
81 return;
82 }
83
84 cq->event(cq, event_type);
85
86 if (atomic_dec_and_test(&cq->refcount))
87 complete(&cq->free);
88}
89
90
91int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
92 struct mlx5_create_cq_mbox_in *in, int inlen)
93{
94 int err;
95 struct mlx5_cq_table *table = &dev->priv.cq_table;
96 struct mlx5_create_cq_mbox_out out;
97 struct mlx5_destroy_cq_mbox_in din;
98 struct mlx5_destroy_cq_mbox_out dout;
99
100 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
101 memset(&out, 0, sizeof(out));
102 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
103 if (err)
104 return err;
105
106 if (out.hdr.status)
107 return mlx5_cmd_status_to_err(&out.hdr);
108
109 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
110 cq->cons_index = 0;
111 cq->arm_sn = 0;
112 atomic_set(&cq->refcount, 1);
113 init_completion(&cq->free);
114
115 spin_lock_irq(&table->lock);
116 err = radix_tree_insert(&table->tree, cq->cqn, cq);
117 spin_unlock_irq(&table->lock);
118 if (err)
119 goto err_cmd;
120
121 cq->pid = current->pid;
122 err = mlx5_debug_cq_add(dev, cq);
123 if (err)
124 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
125 cq->cqn);
126
127 return 0;
128
129err_cmd:
130 memset(&din, 0, sizeof(din));
131 memset(&dout, 0, sizeof(dout));
132 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
133 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
134 return err;
135}
136EXPORT_SYMBOL(mlx5_core_create_cq);
137
138int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
139{
140 struct mlx5_cq_table *table = &dev->priv.cq_table;
141 struct mlx5_destroy_cq_mbox_in in;
142 struct mlx5_destroy_cq_mbox_out out;
143 struct mlx5_core_cq *tmp;
144 int err;
145
146 spin_lock_irq(&table->lock);
147 tmp = radix_tree_delete(&table->tree, cq->cqn);
148 spin_unlock_irq(&table->lock);
149 if (!tmp) {
150 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
151 return -EINVAL;
152 }
153 if (tmp != cq) {
154 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
155 return -EINVAL;
156 }
157
158 memset(&in, 0, sizeof(in));
159 memset(&out, 0, sizeof(out));
160 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
161 in.cqn = cpu_to_be32(cq->cqn);
162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
163 if (err)
164 return err;
165
166 if (out.hdr.status)
167 return mlx5_cmd_status_to_err(&out.hdr);
168
169 synchronize_irq(cq->irqn);
170
171 mlx5_debug_cq_remove(dev, cq);
172 if (atomic_dec_and_test(&cq->refcount))
173 complete(&cq->free);
174 wait_for_completion(&cq->free);
175
176 return 0;
177}
178EXPORT_SYMBOL(mlx5_core_destroy_cq);
179
180int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
181 struct mlx5_query_cq_mbox_out *out)
182{
183 struct mlx5_query_cq_mbox_in in;
184 int err;
185
186 memset(&in, 0, sizeof(in));
187 memset(out, 0, sizeof(*out));
188
189 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
190 in.cqn = cpu_to_be32(cq->cqn);
191 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
192 if (err)
193 return err;
194
195 if (out->hdr.status)
196 return mlx5_cmd_status_to_err(&out->hdr);
197
198 return err;
199}
200EXPORT_SYMBOL(mlx5_core_query_cq);
201
202
203int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
204 int type, struct mlx5_cq_modify_params *params)
205{
206 return -ENOSYS;
207}
208
209int mlx5_init_cq_table(struct mlx5_core_dev *dev)
210{
211 struct mlx5_cq_table *table = &dev->priv.cq_table;
212 int err;
213
214 spin_lock_init(&table->lock);
215 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
216 err = mlx5_cq_debugfs_init(dev);
217
218 return err;
219}
220
221void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
222{
223 mlx5_cq_debugfs_cleanup(dev);
224}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
new file mode 100644
index 000000000000..5e9cf2b9aaf7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -0,0 +1,587 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/debugfs.h>
35#include <linux/mlx5/qp.h>
36#include <linux/mlx5/cq.h>
37#include <linux/mlx5/driver.h>
38#include "mlx5_core.h"
39
40enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50};
51
52static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62};
63
64enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68};
69
70static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74};
75
76enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80};
81
82static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86};
87
88struct dentry *mlx5_debugfs_root;
89EXPORT_SYMBOL(mlx5_debugfs_root);
90
91void mlx5_register_debugfs(void)
92{
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 if (IS_ERR_OR_NULL(mlx5_debugfs_root))
95 mlx5_debugfs_root = NULL;
96}
97
98void mlx5_unregister_debugfs(void)
99{
100 debugfs_remove(mlx5_debugfs_root);
101}
102
103int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
104{
105 if (!mlx5_debugfs_root)
106 return 0;
107
108 atomic_set(&dev->num_qps, 0);
109
110 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
111 if (!dev->priv.qp_debugfs)
112 return -ENOMEM;
113
114 return 0;
115}
116
117void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
118{
119 if (!mlx5_debugfs_root)
120 return;
121
122 debugfs_remove_recursive(dev->priv.qp_debugfs);
123}
124
125int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
126{
127 if (!mlx5_debugfs_root)
128 return 0;
129
130 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
131 if (!dev->priv.eq_debugfs)
132 return -ENOMEM;
133
134 return 0;
135}
136
137void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
138{
139 if (!mlx5_debugfs_root)
140 return;
141
142 debugfs_remove_recursive(dev->priv.eq_debugfs);
143}
144
145static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
146 loff_t *pos)
147{
148 struct mlx5_cmd_stats *stats;
149 u64 field = 0;
150 int ret;
151 int err;
152 char tbuf[22];
153
154 if (*pos)
155 return 0;
156
157 stats = filp->private_data;
158 spin_lock(&stats->lock);
159 if (stats->n)
160 field = stats->sum / stats->n;
161 spin_unlock(&stats->lock);
162 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
163 if (ret > 0) {
164 err = copy_to_user(buf, tbuf, ret);
165 if (err)
166 return err;
167 }
168
169 *pos += ret;
170 return ret;
171}
172
173
174static ssize_t average_write(struct file *filp, const char __user *buf,
175 size_t count, loff_t *pos)
176{
177 struct mlx5_cmd_stats *stats;
178
179 stats = filp->private_data;
180 spin_lock(&stats->lock);
181 stats->sum = 0;
182 stats->n = 0;
183 spin_unlock(&stats->lock);
184
185 *pos += count;
186
187 return count;
188}
189
190static const struct file_operations stats_fops = {
191 .owner = THIS_MODULE,
192 .open = simple_open,
193 .read = average_read,
194 .write = average_write,
195};
196
197int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
198{
199 struct mlx5_cmd_stats *stats;
200 struct dentry **cmd;
201 const char *namep;
202 int err;
203 int i;
204
205 if (!mlx5_debugfs_root)
206 return 0;
207
208 cmd = &dev->priv.cmdif_debugfs;
209 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
210 if (!*cmd)
211 return -ENOMEM;
212
213 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
214 stats = &dev->cmd.stats[i];
215 namep = mlx5_command_str(i);
216 if (strcmp(namep, "unknown command opcode")) {
217 stats->root = debugfs_create_dir(namep, *cmd);
218 if (!stats->root) {
219 mlx5_core_warn(dev, "failed adding command %d\n",
220 i);
221 err = -ENOMEM;
222 goto out;
223 }
224
225 stats->avg = debugfs_create_file("average", 0400,
226 stats->root, stats,
227 &stats_fops);
228 if (!stats->avg) {
229 mlx5_core_warn(dev, "failed creating debugfs file\n");
230 err = -ENOMEM;
231 goto out;
232 }
233
234 stats->count = debugfs_create_u64("n", 0400,
235 stats->root,
236 &stats->n);
237 if (!stats->count) {
238 mlx5_core_warn(dev, "failed creating debugfs file\n");
239 err = -ENOMEM;
240 goto out;
241 }
242 }
243 }
244
245 return 0;
246out:
247 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
248 return err;
249}
250
251void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
252{
253 if (!mlx5_debugfs_root)
254 return;
255
256 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
257}
258
259int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
260{
261 if (!mlx5_debugfs_root)
262 return 0;
263
264 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
265 if (!dev->priv.cq_debugfs)
266 return -ENOMEM;
267
268 return 0;
269}
270
271void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
272{
273 if (!mlx5_debugfs_root)
274 return;
275
276 debugfs_remove_recursive(dev->priv.cq_debugfs);
277}
278
279static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
280 int index)
281{
282 struct mlx5_query_qp_mbox_out *out;
283 struct mlx5_qp_context *ctx;
284 u64 param = 0;
285 int err;
286 int no_sq;
287
288 out = kzalloc(sizeof(*out), GFP_KERNEL);
289 if (!out)
290 return param;
291
292 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
293 if (err) {
294 mlx5_core_warn(dev, "failed to query qp\n");
295 goto out;
296 }
297
298 ctx = &out->ctx;
299 switch (index) {
300 case QP_PID:
301 param = qp->pid;
302 break;
303 case QP_STATE:
304 param = be32_to_cpu(ctx->flags) >> 28;
305 break;
306 case QP_XPORT:
307 param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
308 break;
309 case QP_MTU:
310 param = ctx->mtu_msgmax >> 5;
311 break;
312 case QP_N_RECV:
313 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
314 break;
315 case QP_RECV_SZ:
316 param = 1 << ((ctx->rq_size_stride & 7) + 4);
317 break;
318 case QP_N_SEND:
319 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
320 if (!no_sq)
321 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
322 else
323 param = 0;
324 break;
325 case QP_LOG_PG_SZ:
326 param = ((cpu_to_be32(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f);
327 param += 12;
328 break;
329 case QP_RQPN:
330 param = cpu_to_be32(ctx->log_pg_sz_remote_qpn) & 0xffffff;
331 break;
332 }
333
334out:
335 kfree(out);
336 return param;
337}
338
339static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
340 int index)
341{
342 struct mlx5_query_eq_mbox_out *out;
343 struct mlx5_eq_context *ctx;
344 u64 param = 0;
345 int err;
346
347 out = kzalloc(sizeof(*out), GFP_KERNEL);
348 if (!out)
349 return param;
350
351 ctx = &out->ctx;
352
353 err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
354 if (err) {
355 mlx5_core_warn(dev, "failed to query eq\n");
356 goto out;
357 }
358
359 switch (index) {
360 case EQ_NUM_EQES:
361 param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
362 break;
363 case EQ_INTR:
364 param = ctx->intr;
365 break;
366 case EQ_LOG_PG_SZ:
367 param = (ctx->log_page_size & 0x1f) + 12;
368 break;
369 }
370
371out:
372 kfree(out);
373 return param;
374}
375
376static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
377 int index)
378{
379 struct mlx5_query_cq_mbox_out *out;
380 struct mlx5_cq_context *ctx;
381 u64 param = 0;
382 int err;
383
384 out = kzalloc(sizeof(*out), GFP_KERNEL);
385 if (!out)
386 return param;
387
388 ctx = &out->ctx;
389
390 err = mlx5_core_query_cq(dev, cq, out);
391 if (err) {
392 mlx5_core_warn(dev, "failed to query cq\n");
393 goto out;
394 }
395
396 switch (index) {
397 case CQ_PID:
398 param = cq->pid;
399 break;
400 case CQ_NUM_CQES:
401 param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
402 break;
403 case CQ_LOG_PG_SZ:
404 param = (ctx->log_pg_sz & 0x1f) + 12;
405 break;
406 }
407
408out:
409 kfree(out);
410 return param;
411}
412
413static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
414 loff_t *pos)
415{
416 struct mlx5_field_desc *desc;
417 struct mlx5_rsc_debug *d;
418 char tbuf[18];
419 u64 field;
420 int ret;
421 int err;
422
423 if (*pos)
424 return 0;
425
426 desc = filp->private_data;
427 d = (void *)(desc - desc->i) - sizeof(*d);
428 switch (d->type) {
429 case MLX5_DBG_RSC_QP:
430 field = qp_read_field(d->dev, d->object, desc->i);
431 break;
432
433 case MLX5_DBG_RSC_EQ:
434 field = eq_read_field(d->dev, d->object, desc->i);
435 break;
436
437 case MLX5_DBG_RSC_CQ:
438 field = cq_read_field(d->dev, d->object, desc->i);
439 break;
440
441 default:
442 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
443 return -EINVAL;
444 }
445
446 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
447 if (ret > 0) {
448 err = copy_to_user(buf, tbuf, ret);
449 if (err)
450 return err;
451 }
452
453 *pos += ret;
454 return ret;
455}
456
457static const struct file_operations fops = {
458 .owner = THIS_MODULE,
459 .open = simple_open,
460 .read = dbg_read,
461};
462
463static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
464 struct dentry *root, struct mlx5_rsc_debug **dbg,
465 int rsn, char **field, int nfile, void *data)
466{
467 struct mlx5_rsc_debug *d;
468 char resn[32];
469 int err;
470 int i;
471
472 d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
473 if (!d)
474 return -ENOMEM;
475
476 d->dev = dev;
477 d->object = data;
478 d->type = type;
479 sprintf(resn, "0x%x", rsn);
480 d->root = debugfs_create_dir(resn, root);
481 if (!d->root) {
482 err = -ENOMEM;
483 goto out_free;
484 }
485
486 for (i = 0; i < nfile; i++) {
487 d->fields[i].i = i;
488 d->fields[i].dent = debugfs_create_file(field[i], 0400,
489 d->root, &d->fields[i],
490 &fops);
491 if (!d->fields[i].dent) {
492 err = -ENOMEM;
493 goto out_rem;
494 }
495 }
496 *dbg = d;
497
498 return 0;
499out_rem:
500 debugfs_remove_recursive(d->root);
501
502out_free:
503 kfree(d);
504 return err;
505}
506
507static void rem_res_tree(struct mlx5_rsc_debug *d)
508{
509 debugfs_remove_recursive(d->root);
510 kfree(d);
511}
512
513int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
514{
515 int err;
516
517 if (!mlx5_debugfs_root)
518 return 0;
519
520 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
521 &qp->dbg, qp->qpn, qp_fields,
522 ARRAY_SIZE(qp_fields), qp);
523 if (err)
524 qp->dbg = NULL;
525
526 return err;
527}
528
529void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
530{
531 if (!mlx5_debugfs_root)
532 return;
533
534 if (qp->dbg)
535 rem_res_tree(qp->dbg);
536}
537
538
539int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
540{
541 int err;
542
543 if (!mlx5_debugfs_root)
544 return 0;
545
546 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
547 &eq->dbg, eq->eqn, eq_fields,
548 ARRAY_SIZE(eq_fields), eq);
549 if (err)
550 eq->dbg = NULL;
551
552 return err;
553}
554
555void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
556{
557 if (!mlx5_debugfs_root)
558 return;
559
560 if (eq->dbg)
561 rem_res_tree(eq->dbg);
562}
563
564int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
565{
566 int err;
567
568 if (!mlx5_debugfs_root)
569 return 0;
570
571 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
572 &cq->dbg, cq->cqn, cq_fields,
573 ARRAY_SIZE(cq_fields), cq);
574 if (err)
575 cq->dbg = NULL;
576
577 return err;
578}
579
580void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
581{
582 if (!mlx5_debugfs_root)
583 return;
584
585 if (cq->dbg)
586 rem_res_tree(cq->dbg);
587}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
new file mode 100644
index 000000000000..c02cbcfd0fb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -0,0 +1,521 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/interrupt.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39enum {
40 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
41 MLX5_EQE_OWNER_INIT_VAL = 0x1,
42};
43
44enum {
45 MLX5_EQ_STATE_ARMED = 0x9,
46 MLX5_EQ_STATE_FIRED = 0xa,
47 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
48};
49
50enum {
51 MLX5_NUM_SPARE_EQE = 0x80,
52 MLX5_NUM_ASYNC_EQE = 0x100,
53 MLX5_NUM_CMD_EQE = 32,
54};
55
56enum {
57 MLX5_EQ_DOORBEL_OFFSET = 0x40,
58};
59
60#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
61 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
62 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
63 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
64 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
65 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
66 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
68 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
69 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
71 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
72
73struct map_eq_in {
74 u64 mask;
75 u32 reserved;
76 u32 unmap_eqn;
77};
78
79struct cre_des_eq {
80 u8 reserved[15];
81 u8 eqn;
82};
83
84static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
85{
86 struct mlx5_destroy_eq_mbox_in in;
87 struct mlx5_destroy_eq_mbox_out out;
88 int err;
89
90 memset(&in, 0, sizeof(in));
91 memset(&out, 0, sizeof(out));
92 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
93 in.eqn = eqn;
94 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
95 if (!err)
96 goto ex;
97
98 if (out.hdr.status)
99 err = mlx5_cmd_status_to_err(&out.hdr);
100
101ex:
102 return err;
103}
104
105static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
106{
107 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
108}
109
110static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
111{
112 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
113
114 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
115}
116
117static const char *eqe_type_str(u8 type)
118{
119 switch (type) {
120 case MLX5_EVENT_TYPE_COMP:
121 return "MLX5_EVENT_TYPE_COMP";
122 case MLX5_EVENT_TYPE_PATH_MIG:
123 return "MLX5_EVENT_TYPE_PATH_MIG";
124 case MLX5_EVENT_TYPE_COMM_EST:
125 return "MLX5_EVENT_TYPE_COMM_EST";
126 case MLX5_EVENT_TYPE_SQ_DRAINED:
127 return "MLX5_EVENT_TYPE_SQ_DRAINED";
128 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
129 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
130 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
131 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
132 case MLX5_EVENT_TYPE_CQ_ERROR:
133 return "MLX5_EVENT_TYPE_CQ_ERROR";
134 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
135 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
136 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
137 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
138 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
139 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
140 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
141 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
142 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
143 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
144 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
145 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
146 case MLX5_EVENT_TYPE_PORT_CHANGE:
147 return "MLX5_EVENT_TYPE_PORT_CHANGE";
148 case MLX5_EVENT_TYPE_GPIO_EVENT:
149 return "MLX5_EVENT_TYPE_GPIO_EVENT";
150 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
151 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
153 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154 case MLX5_EVENT_TYPE_STALL_EVENT:
155 return "MLX5_EVENT_TYPE_STALL_EVENT";
156 case MLX5_EVENT_TYPE_CMD:
157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 default:
161 return "Unrecognized event";
162 }
163}
164
165static enum mlx5_dev_event port_subtype_event(u8 subtype)
166{
167 switch (subtype) {
168 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
169 return MLX5_DEV_EVENT_PORT_DOWN;
170 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
171 return MLX5_DEV_EVENT_PORT_UP;
172 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
173 return MLX5_DEV_EVENT_PORT_INITIALIZED;
174 case MLX5_PORT_CHANGE_SUBTYPE_LID:
175 return MLX5_DEV_EVENT_LID_CHANGE;
176 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
177 return MLX5_DEV_EVENT_PKEY_CHANGE;
178 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
179 return MLX5_DEV_EVENT_GUID_CHANGE;
180 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
181 return MLX5_DEV_EVENT_CLIENT_REREG;
182 }
183 return -1;
184}
185
186static void eq_update_ci(struct mlx5_eq *eq, int arm)
187{
188 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
189 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
190 __raw_writel((__force u32) cpu_to_be32(val), addr);
191 /* We still want ordering, just not swabbing, so add a barrier */
192 mb();
193}
194
195static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
196{
197 struct mlx5_eqe *eqe;
198 int eqes_found = 0;
199 int set_ci = 0;
200 u32 cqn;
201 u32 srqn;
202 u8 port;
203
204 while ((eqe = next_eqe_sw(eq))) {
205 /*
206 * Make sure we read EQ entry contents after we've
207 * checked the ownership bit.
208 */
209 rmb();
210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
215 mlx5_cq_completion(dev, cqn);
216 break;
217
218 case MLX5_EVENT_TYPE_PATH_MIG:
219 case MLX5_EVENT_TYPE_COMM_EST:
220 case MLX5_EVENT_TYPE_SQ_DRAINED:
221 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
222 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
223 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
224 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
225 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
226 mlx5_core_dbg(dev, "event %s(%d) arrived\n",
227 eqe_type_str(eqe->type), eqe->type);
228 mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff,
229 eqe->type);
230 break;
231
232 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
235 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
236 eqe_type_str(eqe->type), eqe->type, srqn);
237 mlx5_srq_event(dev, srqn, eqe->type);
238 break;
239
240 case MLX5_EVENT_TYPE_CMD:
241 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
242 break;
243
244 case MLX5_EVENT_TYPE_PORT_CHANGE:
245 port = (eqe->data.port.port >> 4) & 0xf;
246 switch (eqe->sub_type) {
247 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
248 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
249 case MLX5_PORT_CHANGE_SUBTYPE_LID:
250 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
251 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
252 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
253 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
254 dev->event(dev, port_subtype_event(eqe->sub_type), &port);
255 break;
256 default:
257 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
258 port, eqe->sub_type);
259 }
260 break;
261 case MLX5_EVENT_TYPE_CQ_ERROR:
262 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
263 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
264 cqn, eqe->data.cq_err.syndrome);
265 mlx5_cq_event(dev, cqn, eqe->type);
266 break;
267
268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages);
275 }
276 break;
277
278
279 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
281 break;
282 }
283
284 ++eq->cons_index;
285 eqes_found = 1;
286 ++set_ci;
287
288 /* The HCA will think the queue has overflowed if we
289 * don't tell it we've been processing events. We
290 * create our EQs with MLX5_NUM_SPARE_EQE extra
291 * entries, so we must update our consumer index at
292 * least that often.
293 */
294 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
295 eq_update_ci(eq, 0);
296 set_ci = 0;
297 }
298 }
299
300 eq_update_ci(eq, 1);
301
302 return eqes_found;
303}
304
305static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
306{
307 struct mlx5_eq *eq = eq_ptr;
308 struct mlx5_core_dev *dev = eq->dev;
309
310 mlx5_eq_int(dev, eq);
311
312 /* MSI-X vectors always belong to us */
313 return IRQ_HANDLED;
314}
315
316static void init_eq_buf(struct mlx5_eq *eq)
317{
318 struct mlx5_eqe *eqe;
319 int i;
320
321 for (i = 0; i < eq->nent; i++) {
322 eqe = get_eqe(eq, i);
323 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
324 }
325}
326
327int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
328 int nent, u64 mask, const char *name, struct mlx5_uar *uar)
329{
330 struct mlx5_eq_table *table = &dev->priv.eq_table;
331 struct mlx5_create_eq_mbox_in *in;
332 struct mlx5_create_eq_mbox_out out;
333 int err;
334 int inlen;
335
336 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
337 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
338 &eq->buf);
339 if (err)
340 return err;
341
342 init_eq_buf(eq);
343
344 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
345 in = mlx5_vzalloc(inlen);
346 if (!in) {
347 err = -ENOMEM;
348 goto err_buf;
349 }
350 memset(&out, 0, sizeof(out));
351
352 mlx5_fill_page_array(&eq->buf, in->pas);
353
354 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
355 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
356 in->ctx.intr = vecidx;
357 in->ctx.log_page_size = PAGE_SHIFT - 12;
358 in->events_mask = cpu_to_be64(mask);
359
360 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
361 if (err)
362 goto err_in;
363
364 if (out.hdr.status) {
365 err = mlx5_cmd_status_to_err(&out.hdr);
366 goto err_in;
367 }
368
369 eq->eqn = out.eq_number;
370 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
371 name, eq);
372 if (err)
373 goto err_eq;
374
375 eq->irqn = vecidx;
376 eq->dev = dev;
377 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
378
379 err = mlx5_debug_eq_add(dev, eq);
380 if (err)
381 goto err_irq;
382
383 /* EQs are created in ARMED state
384 */
385 eq_update_ci(eq, 1);
386
387 mlx5_vfree(in);
388 return 0;
389
390err_irq:
391 free_irq(table->msix_arr[vecidx].vector, eq);
392
393err_eq:
394 mlx5_cmd_destroy_eq(dev, eq->eqn);
395
396err_in:
397 mlx5_vfree(in);
398
399err_buf:
400 mlx5_buf_free(dev, &eq->buf);
401 return err;
402}
403EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
404
405int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
406{
407 struct mlx5_eq_table *table = &dev->priv.eq_table;
408 int err;
409
410 mlx5_debug_eq_remove(dev, eq);
411 free_irq(table->msix_arr[eq->irqn].vector, eq);
412 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
413 if (err)
414 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
415 eq->eqn);
416 mlx5_buf_free(dev, &eq->buf);
417
418 return err;
419}
420EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
421
422int mlx5_eq_init(struct mlx5_core_dev *dev)
423{
424 int err;
425
426 spin_lock_init(&dev->priv.eq_table.lock);
427
428 err = mlx5_eq_debugfs_init(dev);
429
430 return err;
431}
432
433
434void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
435{
436 mlx5_eq_debugfs_cleanup(dev);
437}
438
439int mlx5_start_eqs(struct mlx5_core_dev *dev)
440{
441 struct mlx5_eq_table *table = &dev->priv.eq_table;
442 int err;
443
444 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
445 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
446 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
447 if (err) {
448 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
449 return err;
450 }
451
452 mlx5_cmd_use_events(dev);
453
454 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
455 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
456 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
457 if (err) {
458 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
459 goto err1;
460 }
461
462 err = mlx5_create_map_eq(dev, &table->pages_eq,
463 MLX5_EQ_VEC_PAGES,
464 dev->caps.max_vf + 1,
465 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
466 &dev->priv.uuari.uars[0]);
467 if (err) {
468 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
469 goto err2;
470 }
471
472 return err;
473
474err2:
475 mlx5_destroy_unmap_eq(dev, &table->async_eq);
476
477err1:
478 mlx5_cmd_use_polling(dev);
479 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
480 return err;
481}
482
483int mlx5_stop_eqs(struct mlx5_core_dev *dev)
484{
485 struct mlx5_eq_table *table = &dev->priv.eq_table;
486 int err;
487
488 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
489 if (err)
490 return err;
491
492 mlx5_destroy_unmap_eq(dev, &table->async_eq);
493 mlx5_cmd_use_polling(dev);
494
495 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
496 if (err)
497 mlx5_cmd_use_events(dev);
498
499 return err;
500}
501
502int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
503 struct mlx5_query_eq_mbox_out *out, int outlen)
504{
505 struct mlx5_query_eq_mbox_in in;
506 int err;
507
508 memset(&in, 0, sizeof(in));
509 memset(out, 0, outlen);
510 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
511 in.eqn = eq->eqn;
512 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
513 if (err)
514 return err;
515
516 if (out->hdr.status)
517 err = mlx5_cmd_status_to_err(&out->hdr);
518
519 return err;
520}
521EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
new file mode 100644
index 000000000000..72a5222447f5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/cmd.h>
35#include <linux/module.h>
36#include "mlx5_core.h"
37
38int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
39{
40 struct mlx5_cmd_query_adapter_mbox_out *out;
41 struct mlx5_cmd_query_adapter_mbox_in in;
42 int err;
43
44 out = kzalloc(sizeof(*out), GFP_KERNEL);
45 if (!out)
46 return -ENOMEM;
47
48 memset(&in, 0, sizeof(in));
49 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
50 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
51 if (err)
52 goto out_out;
53
54 if (out->hdr.status) {
55 err = mlx5_cmd_status_to_err(&out->hdr);
56 goto out_out;
57 }
58
59 memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
60
61out_out:
62 kfree(out);
63
64 return err;
65}
66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
68 struct mlx5_caps *caps)
69{
70 struct mlx5_cmd_query_hca_cap_mbox_out *out;
71 struct mlx5_cmd_query_hca_cap_mbox_in in;
72 struct mlx5_query_special_ctxs_mbox_out ctx_out;
73 struct mlx5_query_special_ctxs_mbox_in ctx_in;
74 int err;
75 u16 t16;
76
77 out = kzalloc(sizeof(*out), GFP_KERNEL);
78 if (!out)
79 return -ENOMEM;
80
81 memset(&in, 0, sizeof(in));
82 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
83 in.hdr.opmod = cpu_to_be16(0x1);
84 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
85 if (err)
86 goto out_out;
87
88 if (out->hdr.status) {
89 err = mlx5_cmd_status_to_err(&out->hdr);
90 goto out_out;
91 }
92
93
94 caps->log_max_eq = out->hca_cap.log_max_eq & 0xf;
95 caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
96 caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
97 caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
98 caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
99 caps->flags = be64_to_cpu(out->hca_cap.flags);
100 caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
101 caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
102 caps->num_ports = out->hca_cap.num_ports & 0xf;
103 caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
104 if (caps->num_ports > MLX5_MAX_PORTS) {
105 mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n",
106 caps->num_ports, MLX5_MAX_PORTS);
107 err = -EINVAL;
108 goto out_out;
109 }
110 caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
111 caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
112 caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
120 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
121 if (t16 & 0x8000) {
122 caps->bf_reg_size = 1 << (t16 & 0x1f);
123 caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
124 } else {
125 caps->bf_reg_size = 0;
126 caps->bf_regs_per_page = 0;
127 }
128 caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1);
129
130 memset(&ctx_in, 0, sizeof(ctx_in));
131 memset(&ctx_out, 0, sizeof(ctx_out));
132 ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
133 err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in),
134 &ctx_out, sizeof(ctx_out));
135 if (err)
136 goto out_out;
137
138 if (ctx_out.hdr.status)
139 err = mlx5_cmd_status_to_err(&ctx_out.hdr);
140
141 caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey);
142
143out_out:
144 kfree(out);
145
146 return err;
147}
148
149int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
150{
151 struct mlx5_cmd_init_hca_mbox_in in;
152 struct mlx5_cmd_init_hca_mbox_out out;
153 int err;
154
155 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err)
160 return err;
161
162 if (out.hdr.status)
163 err = mlx5_cmd_status_to_err(&out.hdr);
164
165 return err;
166}
167
168int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
169{
170 struct mlx5_cmd_teardown_hca_mbox_in in;
171 struct mlx5_cmd_teardown_hca_mbox_out out;
172 int err;
173
174 memset(&in, 0, sizeof(in));
175 memset(&out, 0, sizeof(out));
176 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
177 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
178 if (err)
179 return err;
180
181 if (out.hdr.status)
182 err = mlx5_cmd_status_to_err(&out.hdr);
183
184 return err;
185}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
new file mode 100644
index 000000000000..ea4b9bca6d4a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/random.h>
36#include <linux/vmalloc.h>
37#include <linux/mlx5/driver.h>
38#include <linux/mlx5/cmd.h>
39#include "mlx5_core.h"
40
41enum {
42 MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
43 MAX_MISSES = 3,
44};
45
46enum {
47 MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
48 MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
49 MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
50 MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
51 MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
52 MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
53 MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
54 MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
55};
56
57static DEFINE_SPINLOCK(health_lock);
58
59static LIST_HEAD(health_list);
60static struct work_struct health_work;
61
62static health_handler_t reg_handler;
63int mlx5_register_health_report_handler(health_handler_t handler)
64{
65 spin_lock_irq(&health_lock);
66 if (reg_handler) {
67 spin_unlock_irq(&health_lock);
68 return -EEXIST;
69 }
70 reg_handler = handler;
71 spin_unlock_irq(&health_lock);
72
73 return 0;
74}
75EXPORT_SYMBOL(mlx5_register_health_report_handler);
76
77void mlx5_unregister_health_report_handler(void)
78{
79 spin_lock_irq(&health_lock);
80 reg_handler = NULL;
81 spin_unlock_irq(&health_lock);
82}
83EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
84
85static void health_care(struct work_struct *work)
86{
87 struct mlx5_core_health *health, *n;
88 struct mlx5_core_dev *dev;
89 struct mlx5_priv *priv;
90 LIST_HEAD(tlist);
91
92 spin_lock_irq(&health_lock);
93 list_splice_init(&health_list, &tlist);
94
95 spin_unlock_irq(&health_lock);
96
97 list_for_each_entry_safe(health, n, &tlist, list) {
98 priv = container_of(health, struct mlx5_priv, health);
99 dev = container_of(priv, struct mlx5_core_dev, priv);
100 mlx5_core_warn(dev, "handling bad device here\n");
101 spin_lock_irq(&health_lock);
102 if (reg_handler)
103 reg_handler(dev->pdev, health->health,
104 sizeof(health->health));
105
106 list_del_init(&health->list);
107 spin_unlock_irq(&health_lock);
108 }
109}
110
111static const char *hsynd_str(u8 synd)
112{
113 switch (synd) {
114 case MLX5_HEALTH_SYNDR_FW_ERR:
115 return "firmware internal error";
116 case MLX5_HEALTH_SYNDR_IRISC_ERR:
117 return "irisc not responding";
118 case MLX5_HEALTH_SYNDR_CRC_ERR:
119 return "firmware CRC error";
120 case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
121 return "ICM fetch PCI error";
122 case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
123 return "HW fatal error\n";
124 case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
125 return "async EQ buffer overrun";
126 case MLX5_HEALTH_SYNDR_EQ_ERR:
127 return "EQ error";
128 case MLX5_HEALTH_SYNDR_FFSER_ERR:
129 return "FFSER error";
130 default:
131 return "unrecognized error";
132 }
133}
134
135static void print_health_info(struct mlx5_core_dev *dev)
136{
137 struct mlx5_core_health *health = &dev->priv.health;
138 struct health_buffer __iomem *h = health->health;
139 int i;
140
141 for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
142 pr_info("assert_var[%d] 0x%08x\n", i, be32_to_cpu(h->assert_var[i]));
143
144 pr_info("assert_exit_ptr 0x%08x\n", be32_to_cpu(h->assert_exit_ptr));
145 pr_info("assert_callra 0x%08x\n", be32_to_cpu(h->assert_callra));
146 pr_info("fw_ver 0x%08x\n", be32_to_cpu(h->fw_ver));
147 pr_info("hw_id 0x%08x\n", be32_to_cpu(h->hw_id));
148 pr_info("irisc_index %d\n", h->irisc_index);
149 pr_info("synd 0x%x: %s\n", h->synd, hsynd_str(h->synd));
150 pr_info("ext_sync 0x%04x\n", be16_to_cpu(h->ext_sync));
151}
152
153static void poll_health(unsigned long data)
154{
155 struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
156 struct mlx5_core_health *health = &dev->priv.health;
157 unsigned long next;
158 u32 count;
159
160 count = ioread32be(health->health_counter);
161 if (count == health->prev)
162 ++health->miss_counter;
163 else
164 health->miss_counter = 0;
165
166 health->prev = count;
167 if (health->miss_counter == MAX_MISSES) {
168 mlx5_core_err(dev, "device's health compromised\n");
169 print_health_info(dev);
170 spin_lock_irq(&health_lock);
171 list_add_tail(&health->list, &health_list);
172 spin_unlock_irq(&health_lock);
173
174 queue_work(mlx5_core_wq, &health_work);
175 } else {
176 get_random_bytes(&next, sizeof(next));
177 next %= HZ;
178 next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
179 mod_timer(&health->timer, next);
180 }
181}
182
183void mlx5_start_health_poll(struct mlx5_core_dev *dev)
184{
185 struct mlx5_core_health *health = &dev->priv.health;
186
187 INIT_LIST_HEAD(&health->list);
188 init_timer(&health->timer);
189 health->health = &dev->iseg->health;
190 health->health_counter = &dev->iseg->health_counter;
191
192 health->timer.data = (unsigned long)dev;
193 health->timer.function = poll_health;
194 health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
195 add_timer(&health->timer);
196}
197
198void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
199{
200 struct mlx5_core_health *health = &dev->priv.health;
201
202 del_timer_sync(&health->timer);
203
204 spin_lock_irq(&health_lock);
205 if (!list_empty(&health->list))
206 list_del_init(&health->list);
207 spin_unlock_irq(&health_lock);
208}
209
210void mlx5_health_cleanup(void)
211{
212}
213
214void __init mlx5_health_init(void)
215{
216 INIT_WORK(&health_work, health_care);
217}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
new file mode 100644
index 000000000000..18d6fd5dd90b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
40 u16 opmod, int port)
41{
42 struct mlx5_mad_ifc_mbox_in *in = NULL;
43 struct mlx5_mad_ifc_mbox_out *out = NULL;
44 int err;
45
46 in = kzalloc(sizeof(*in), GFP_KERNEL);
47 if (!in)
48 return -ENOMEM;
49
50 out = kzalloc(sizeof(*out), GFP_KERNEL);
51 if (!out) {
52 err = -ENOMEM;
53 goto out;
54 }
55
56 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
57 in->hdr.opmod = cpu_to_be16(opmod);
58 in->port = port;
59
60 memcpy(in->data, inb, sizeof(in->data));
61
62 err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
63 if (err)
64 goto out;
65
66 if (out->hdr.status) {
67 err = mlx5_cmd_status_to_err(&out->hdr);
68 goto out;
69 }
70
71 memcpy(outb, out->data, sizeof(out->data));
72
73out:
74 kfree(out);
75 kfree(in);
76 return err;
77}
78EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
new file mode 100644
index 000000000000..f21cc397d1bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -0,0 +1,475 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/io-mapping.h>
41#include <linux/mlx5/driver.h>
42#include <linux/mlx5/cq.h>
43#include <linux/mlx5/qp.h>
44#include <linux/mlx5/srq.h>
45#include <linux/debugfs.h>
46#include "mlx5_core.h"
47
48#define DRIVER_NAME "mlx5_core"
49#define DRIVER_VERSION "1.0"
50#define DRIVER_RELDATE "June 2013"
51
52MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
54MODULE_LICENSE("Dual BSD/GPL");
55MODULE_VERSION(DRIVER_VERSION);
56
57int mlx5_core_debug_mask;
58module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
59MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
60
61struct workqueue_struct *mlx5_core_wq;
62
63static int set_dma_caps(struct pci_dev *pdev)
64{
65 int err;
66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
73 return err;
74 }
75 }
76
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) {
79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) {
83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n");
85 return err;
86 }
87 }
88
89 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
90 return err;
91}
92
93static int request_bar(struct pci_dev *pdev)
94{
95 int err = 0;
96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
99 return -ENODEV;
100 }
101
102 err = pci_request_regions(pdev, DRIVER_NAME);
103 if (err)
104 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
105
106 return err;
107}
108
109static void release_bar(struct pci_dev *pdev)
110{
111 pci_release_regions(pdev);
112}
113
114static int mlx5_enable_msix(struct mlx5_core_dev *dev)
115{
116 struct mlx5_eq_table *table = &dev->priv.eq_table;
117 int num_eqs = 1 << dev->caps.log_max_eq;
118 int nvec;
119 int err;
120 int i;
121
122 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
123 nvec = min_t(int, nvec, num_eqs);
124 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
125 return -ENOMEM;
126
127 table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
128 if (!table->msix_arr)
129 return -ENOMEM;
130
131 for (i = 0; i < nvec; i++)
132 table->msix_arr[i].entry = i;
133
134retry:
135 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
136 err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
137 if (err <= 0) {
138 return err;
139 } else if (err > 2) {
140 nvec = err;
141 goto retry;
142 }
143
144 mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
145
146 return 0;
147}
148
149static void mlx5_disable_msix(struct mlx5_core_dev *dev)
150{
151 struct mlx5_eq_table *table = &dev->priv.eq_table;
152
153 pci_disable_msix(dev->pdev);
154 kfree(table->msix_arr);
155}
156
157struct mlx5_reg_host_endianess {
158 u8 he;
159 u8 rsvd[15];
160};
161
162static int handle_hca_cap(struct mlx5_core_dev *dev)
163{
164 struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
167 struct mlx5_cmd_set_hca_cap_mbox_out set_out;
168 struct mlx5_profile *prof = dev->profile;
169 u64 flags;
170 int csum = 1;
171 int err;
172
173 memset(&query_ctx, 0, sizeof(query_ctx));
174 query_out = kzalloc(sizeof(*query_out), GFP_KERNEL);
175 if (!query_out)
176 return -ENOMEM;
177
178 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
179 if (!set_ctx) {
180 err = -ENOMEM;
181 goto query_ex;
182 }
183
184 query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
185 query_ctx.hdr.opmod = cpu_to_be16(0x1);
186 err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
187 query_out, sizeof(*query_out));
188 if (err)
189 goto query_ex;
190
191 err = mlx5_cmd_status_to_err(&query_out->hdr);
192 if (err) {
193 mlx5_core_warn(dev, "query hca cap failed, %d\n", err);
194 goto query_ex;
195 }
196
197 memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
198 sizeof(set_ctx->hca_cap));
199
200 if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
201 csum = !!prof->cmdif_csum;
202 flags = be64_to_cpu(set_ctx->hca_cap.flags);
203 if (csum)
204 flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
205 else
206 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
207
208 set_ctx->hca_cap.flags = cpu_to_be64(flags);
209 }
210
211 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
212 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
213
214 memset(&set_out, 0, sizeof(set_out));
215 set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
216 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
217 err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
218 &set_out, sizeof(set_out));
219 if (err) {
220 mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
221 goto query_ex;
222 }
223
224 err = mlx5_cmd_status_to_err(&set_out.hdr);
225 if (err)
226 goto query_ex;
227
228 if (!csum)
229 dev->cmd.checksum_disabled = 1;
230
231query_ex:
232 kfree(query_out);
233 kfree(set_ctx);
234
235 return err;
236}
237
238static int set_hca_ctrl(struct mlx5_core_dev *dev)
239{
240 struct mlx5_reg_host_endianess he_in;
241 struct mlx5_reg_host_endianess he_out;
242 int err;
243
244 memset(&he_in, 0, sizeof(he_in));
245 he_in.he = MLX5_SET_HOST_ENDIANNESS;
246 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
247 &he_out, sizeof(he_out),
248 MLX5_REG_HOST_ENDIANNESS, 0, 1);
249 return err;
250}
251
252int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
253{
254 struct mlx5_priv *priv = &dev->priv;
255 int err;
256
257 dev->pdev = pdev;
258 pci_set_drvdata(dev->pdev, dev);
259 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
260 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
261
262 mutex_init(&priv->pgdir_mutex);
263 INIT_LIST_HEAD(&priv->pgdir_list);
264 spin_lock_init(&priv->mkey_lock);
265
266 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
267 if (!priv->dbg_root)
268 return -ENOMEM;
269
270 err = pci_enable_device(pdev);
271 if (err) {
272 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
273 goto err_dbg;
274 }
275
276 err = request_bar(pdev);
277 if (err) {
278 dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
279 goto err_disable;
280 }
281
282 pci_set_master(pdev);
283
284 err = set_dma_caps(pdev);
285 if (err) {
286 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
287 goto err_clr_master;
288 }
289
290 dev->iseg_base = pci_resource_start(dev->pdev, 0);
291 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
292 if (!dev->iseg) {
293 err = -ENOMEM;
294 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
295 goto err_clr_master;
296 }
297 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
298 fw_rev_min(dev), fw_rev_sub(dev));
299
300 err = mlx5_cmd_init(dev);
301 if (err) {
302 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
303 goto err_unmap;
304 }
305
306 mlx5_pagealloc_init(dev);
307 err = set_hca_ctrl(dev);
308 if (err) {
309 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
310 goto err_pagealloc_cleanup;
311 }
312
313 err = handle_hca_cap(dev);
314 if (err) {
315 dev_err(&pdev->dev, "handle_hca_cap failed\n");
316 goto err_pagealloc_cleanup;
317 }
318
319 err = mlx5_satisfy_startup_pages(dev);
320 if (err) {
321 dev_err(&pdev->dev, "failed to allocate startup pages\n");
322 goto err_pagealloc_cleanup;
323 }
324
325 err = mlx5_pagealloc_start(dev);
326 if (err) {
327 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
328 goto err_reclaim_pages;
329 }
330
331 err = mlx5_cmd_init_hca(dev);
332 if (err) {
333 dev_err(&pdev->dev, "init hca failed\n");
334 goto err_pagealloc_stop;
335 }
336
337 mlx5_start_health_poll(dev);
338
339 err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
340 if (err) {
341 dev_err(&pdev->dev, "query hca failed\n");
342 goto err_stop_poll;
343 }
344
345 err = mlx5_cmd_query_adapter(dev);
346 if (err) {
347 dev_err(&pdev->dev, "query adapter failed\n");
348 goto err_stop_poll;
349 }
350
351 err = mlx5_enable_msix(dev);
352 if (err) {
353 dev_err(&pdev->dev, "enable msix failed\n");
354 goto err_stop_poll;
355 }
356
357 err = mlx5_eq_init(dev);
358 if (err) {
359 dev_err(&pdev->dev, "failed to initialize eq\n");
360 goto disable_msix;
361 }
362
363 err = mlx5_alloc_uuars(dev, &priv->uuari);
364 if (err) {
365 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
366 goto err_eq_cleanup;
367 }
368
369 err = mlx5_start_eqs(dev);
370 if (err) {
371 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
372 goto err_free_uar;
373 }
374
375 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
376
377 mlx5_init_cq_table(dev);
378 mlx5_init_qp_table(dev);
379 mlx5_init_srq_table(dev);
380
381 return 0;
382
383err_free_uar:
384 mlx5_free_uuars(dev, &priv->uuari);
385
386err_eq_cleanup:
387 mlx5_eq_cleanup(dev);
388
389disable_msix:
390 mlx5_disable_msix(dev);
391
392err_stop_poll:
393 mlx5_stop_health_poll(dev);
394 mlx5_cmd_teardown_hca(dev);
395
396err_pagealloc_stop:
397 mlx5_pagealloc_stop(dev);
398
399err_reclaim_pages:
400 mlx5_reclaim_startup_pages(dev);
401
402err_pagealloc_cleanup:
403 mlx5_pagealloc_cleanup(dev);
404 mlx5_cmd_cleanup(dev);
405
406err_unmap:
407 iounmap(dev->iseg);
408
409err_clr_master:
410 pci_clear_master(dev->pdev);
411 release_bar(dev->pdev);
412
413err_disable:
414 pci_disable_device(dev->pdev);
415
416err_dbg:
417 debugfs_remove(priv->dbg_root);
418 return err;
419}
420EXPORT_SYMBOL(mlx5_dev_init);
421
422void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
423{
424 struct mlx5_priv *priv = &dev->priv;
425
426 mlx5_cleanup_srq_table(dev);
427 mlx5_cleanup_qp_table(dev);
428 mlx5_cleanup_cq_table(dev);
429 mlx5_stop_eqs(dev);
430 mlx5_free_uuars(dev, &priv->uuari);
431 mlx5_eq_cleanup(dev);
432 mlx5_disable_msix(dev);
433 mlx5_stop_health_poll(dev);
434 mlx5_cmd_teardown_hca(dev);
435 mlx5_pagealloc_stop(dev);
436 mlx5_reclaim_startup_pages(dev);
437 mlx5_pagealloc_cleanup(dev);
438 mlx5_cmd_cleanup(dev);
439 iounmap(dev->iseg);
440 pci_clear_master(dev->pdev);
441 release_bar(dev->pdev);
442 pci_disable_device(dev->pdev);
443 debugfs_remove(priv->dbg_root);
444}
445EXPORT_SYMBOL(mlx5_dev_cleanup);
446
447static int __init init(void)
448{
449 int err;
450
451 mlx5_register_debugfs();
452 mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
453 if (!mlx5_core_wq) {
454 err = -ENOMEM;
455 goto err_debug;
456 }
457 mlx5_health_init();
458
459 return 0;
460
461 mlx5_health_cleanup();
462err_debug:
463 mlx5_unregister_debugfs();
464 return err;
465}
466
467static void __exit cleanup(void)
468{
469 mlx5_health_cleanup();
470 destroy_workqueue(mlx5_core_wq);
471 mlx5_unregister_debugfs();
472}
473
474module_init(init);
475module_exit(cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
new file mode 100644
index 000000000000..44837640bd7c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include <rdma/ib_verbs.h>
38#include "mlx5_core.h"
39
40struct mlx5_attach_mcg_mbox_in {
41 struct mlx5_inbox_hdr hdr;
42 __be32 qpn;
43 __be32 rsvd;
44 u8 gid[16];
45};
46
47struct mlx5_attach_mcg_mbox_out {
48 struct mlx5_outbox_hdr hdr;
49 u8 rsvf[8];
50};
51
52struct mlx5_detach_mcg_mbox_in {
53 struct mlx5_inbox_hdr hdr;
54 __be32 qpn;
55 __be32 rsvd;
56 u8 gid[16];
57};
58
59struct mlx5_detach_mcg_mbox_out {
60 struct mlx5_outbox_hdr hdr;
61 u8 rsvf[8];
62};
63
64int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
65{
66 struct mlx5_attach_mcg_mbox_in in;
67 struct mlx5_attach_mcg_mbox_out out;
68 int err;
69
70 memset(&in, 0, sizeof(in));
71 memset(&out, 0, sizeof(out));
72 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
73 memcpy(in.gid, mgid, sizeof(*mgid));
74 in.qpn = cpu_to_be32(qpn);
75 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
76 if (err)
77 return err;
78
79 if (out.hdr.status)
80 err = mlx5_cmd_status_to_err(&out.hdr);
81
82 return err;
83}
84EXPORT_SYMBOL(mlx5_core_attach_mcg);
85
86int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
87{
88 struct mlx5_detach_mcg_mbox_in in;
89 struct mlx5_detach_mcg_mbox_out out;
90 int err;
91
92 memset(&in, 0, sizeof(in));
93 memset(&out, 0, sizeof(out));
94 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
95 memcpy(in.gid, mgid, sizeof(*mgid));
96 in.qpn = cpu_to_be32(qpn);
97 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
98 if (err)
99 return err;
100
101 if (out.hdr.status)
102 err = mlx5_cmd_status_to_err(&out.hdr);
103
104 return err;
105}
106EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
new file mode 100644
index 000000000000..68b74e1ae1b0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39
40extern int mlx5_core_debug_mask;
41
42#define mlx5_core_dbg(dev, format, arg...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
44 current->pid, ##arg)
45
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
47do { \
48 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0)
52
53#define mlx5_core_err(dev, format, arg...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
55 current->pid, ##arg)
56
57#define mlx5_core_warn(dev, format, arg...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
59 current->pid, ##arg)
60
61enum {
62 MLX5_CMD_DATA, /* print command payload only */
63 MLX5_CMD_TIME, /* print command execution time */
64};
65
66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
68 struct mlx5_caps *caps);
69int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
70int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
71int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
72
73#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
new file mode 100644
index 000000000000..5b44e2e46daf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
40 struct mlx5_create_mkey_mbox_in *in, int inlen)
41{
42 struct mlx5_create_mkey_mbox_out out;
43 int err;
44 u8 key;
45
46 memset(&out, 0, sizeof(out));
47 spin_lock(&dev->priv.mkey_lock);
48 key = dev->priv.mkey_key++;
49 spin_unlock(&dev->priv.mkey_lock);
50 in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
51 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
52 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
53 if (err) {
54 mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
55 return err;
56 }
57
58 if (out.hdr.status) {
59 mlx5_core_dbg(dev, "status %d\n", out.hdr.status);
60 return mlx5_cmd_status_to_err(&out.hdr);
61 }
62
63 mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key;
64 mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key);
65
66 return err;
67}
68EXPORT_SYMBOL(mlx5_core_create_mkey);
69
70int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
71{
72 struct mlx5_destroy_mkey_mbox_in in;
73 struct mlx5_destroy_mkey_mbox_out out;
74 int err;
75
76 memset(&in, 0, sizeof(in));
77 memset(&out, 0, sizeof(out));
78
79 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
80 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
81 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
82 if (err)
83 return err;
84
85 if (out.hdr.status)
86 return mlx5_cmd_status_to_err(&out.hdr);
87
88 return err;
89}
90EXPORT_SYMBOL(mlx5_core_destroy_mkey);
91
92int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
93 struct mlx5_query_mkey_mbox_out *out, int outlen)
94{
95 struct mlx5_destroy_mkey_mbox_in in;
96 int err;
97
98 memset(&in, 0, sizeof(in));
99 memset(out, 0, outlen);
100
101 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
102 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
103 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
104 if (err)
105 return err;
106
107 if (out->hdr.status)
108 return mlx5_cmd_status_to_err(&out->hdr);
109
110 return err;
111}
112EXPORT_SYMBOL(mlx5_core_query_mkey);
113
114int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
115 u32 *mkey)
116{
117 struct mlx5_query_special_ctxs_mbox_in in;
118 struct mlx5_query_special_ctxs_mbox_out out;
119 int err;
120
121 memset(&in, 0, sizeof(in));
122 memset(&out, 0, sizeof(out));
123
124 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
125 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
126 if (err)
127 return err;
128
129 if (out.hdr.status)
130 return mlx5_cmd_status_to_err(&out.hdr);
131
132 *mkey = be32_to_cpu(out.dump_fill_mkey);
133
134 return err;
135}
136EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
new file mode 100644
index 000000000000..f0bf46339b28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -0,0 +1,435 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include "mlx5_core.h"
39
40enum {
41 MLX5_PAGES_CANT_GIVE = 0,
42 MLX5_PAGES_GIVE = 1,
43 MLX5_PAGES_TAKE = 2
44};
45
46struct mlx5_pages_req {
47 struct mlx5_core_dev *dev;
48 u32 func_id;
49 s16 npages;
50 struct work_struct work;
51};
52
53struct fw_page {
54 struct rb_node rb_node;
55 u64 addr;
56 struct page *page;
57 u16 func_id;
58};
59
60struct mlx5_query_pages_inbox {
61 struct mlx5_inbox_hdr hdr;
62 u8 rsvd[8];
63};
64
65struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr;
67 u8 reserved[2];
68 __be16 func_id;
69 __be16 init_pages;
70 __be16 num_pages;
71};
72
73struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0;
76 __be16 func_id;
77 __be16 rsvd1;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0];
81};
82
83struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2];
86 __be16 num_entries;
87 u8 rsvd1[20];
88 __be64 pas[0];
89};
90
91static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
92{
93 struct rb_root *root = &dev->priv.page_root;
94 struct rb_node **new = &root->rb_node;
95 struct rb_node *parent = NULL;
96 struct fw_page *nfp;
97 struct fw_page *tfp;
98
99 while (*new) {
100 parent = *new;
101 tfp = rb_entry(parent, struct fw_page, rb_node);
102 if (tfp->addr < addr)
103 new = &parent->rb_left;
104 else if (tfp->addr > addr)
105 new = &parent->rb_right;
106 else
107 return -EEXIST;
108 }
109
110 nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
111 if (!nfp)
112 return -ENOMEM;
113
114 nfp->addr = addr;
115 nfp->page = page;
116 nfp->func_id = func_id;
117
118 rb_link_node(&nfp->rb_node, parent, new);
119 rb_insert_color(&nfp->rb_node, root);
120
121 return 0;
122}
123
124static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
125{
126 struct rb_root *root = &dev->priv.page_root;
127 struct rb_node *tmp = root->rb_node;
128 struct page *result = NULL;
129 struct fw_page *tfp;
130
131 while (tmp) {
132 tfp = rb_entry(tmp, struct fw_page, rb_node);
133 if (tfp->addr < addr) {
134 tmp = tmp->rb_left;
135 } else if (tfp->addr > addr) {
136 tmp = tmp->rb_right;
137 } else {
138 rb_erase(&tfp->rb_node, root);
139 result = tfp->page;
140 kfree(tfp);
141 break;
142 }
143 }
144
145 return result;
146}
147
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages)
150{
151 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out;
153 int err;
154
155 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err)
160 return err;
161
162 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr);
164
165 if (pages)
166 *pages = be16_to_cpu(out.num_pages);
167 if (init_pages)
168 *init_pages = be16_to_cpu(out.init_pages);
169 *func_id = be16_to_cpu(out.func_id);
170
171 return err;
172}
173
174static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
175 int notify_fail)
176{
177 struct mlx5_manage_pages_inbox *in;
178 struct mlx5_manage_pages_outbox out;
179 struct page *page;
180 int inlen;
181 u64 addr;
182 int err;
183 int i;
184
185 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
186 in = mlx5_vzalloc(inlen);
187 if (!in) {
188 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
189 return -ENOMEM;
190 }
191 memset(&out, 0, sizeof(out));
192
193 for (i = 0; i < npages; i++) {
194 page = alloc_page(GFP_HIGHUSER);
195 if (!page) {
196 err = -ENOMEM;
197 mlx5_core_warn(dev, "failed to allocate page\n");
198 goto out_alloc;
199 }
200 addr = dma_map_page(&dev->pdev->dev, page, 0,
201 PAGE_SIZE, DMA_BIDIRECTIONAL);
202 if (dma_mapping_error(&dev->pdev->dev, addr)) {
203 mlx5_core_warn(dev, "failed dma mapping page\n");
204 __free_page(page);
205 err = -ENOMEM;
206 goto out_alloc;
207 }
208 err = insert_page(dev, addr, page, func_id);
209 if (err) {
210 mlx5_core_err(dev, "failed to track allocated page\n");
211 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
212 __free_page(page);
213 err = -ENOMEM;
214 goto out_alloc;
215 }
216 in->pas[i] = cpu_to_be64(addr);
217 }
218
219 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
220 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
221 in->func_id = cpu_to_be16(func_id);
222 in->num_entries = cpu_to_be16(npages);
223 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
224 mlx5_core_dbg(dev, "err %d\n", err);
225 if (err) {
226 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
227 goto out_alloc;
228 }
229 dev->priv.fw_pages += npages;
230
231 if (out.hdr.status) {
232 err = mlx5_cmd_status_to_err(&out.hdr);
233 if (err) {
234 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
235 goto out_alloc;
236 }
237 }
238
239 mlx5_core_dbg(dev, "err %d\n", err);
240
241 goto out_free;
242
243out_alloc:
244 if (notify_fail) {
245 memset(in, 0, inlen);
246 memset(&out, 0, sizeof(out));
247 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
248 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
249 if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
250 mlx5_core_warn(dev, "\n");
251 }
252 for (i--; i >= 0; i--) {
253 addr = be64_to_cpu(in->pas[i]);
254 page = remove_page(dev, addr);
255 if (!page) {
256 mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
257 addr);
258 continue;
259 }
260 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
261 __free_page(page);
262 }
263
264out_free:
265 mlx5_vfree(in);
266 return err;
267}
268
269static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
270 int *nclaimed)
271{
272 struct mlx5_manage_pages_inbox in;
273 struct mlx5_manage_pages_outbox *out;
274 struct page *page;
275 int num_claimed;
276 int outlen;
277 u64 addr;
278 int err;
279 int i;
280
281 memset(&in, 0, sizeof(in));
282 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
283 out = mlx5_vzalloc(outlen);
284 if (!out)
285 return -ENOMEM;
286
287 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
288 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
289 in.func_id = cpu_to_be16(func_id);
290 in.num_entries = cpu_to_be16(npages);
291 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
292 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
293 if (err) {
294 mlx5_core_err(dev, "failed recliaming pages\n");
295 goto out_free;
296 }
297 dev->priv.fw_pages -= npages;
298
299 if (out->hdr.status) {
300 err = mlx5_cmd_status_to_err(&out->hdr);
301 goto out_free;
302 }
303
304 num_claimed = be16_to_cpu(out->num_entries);
305 if (nclaimed)
306 *nclaimed = num_claimed;
307
308 for (i = 0; i < num_claimed; i++) {
309 addr = be64_to_cpu(out->pas[i]);
310 page = remove_page(dev, addr);
311 if (!page) {
312 mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
313 } else {
314 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
315 __free_page(page);
316 }
317 }
318
319out_free:
320 mlx5_vfree(out);
321 return err;
322}
323
324static void pages_work_handler(struct work_struct *work)
325{
326 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
327 struct mlx5_core_dev *dev = req->dev;
328 int err = 0;
329
330 if (req->npages < 0)
331 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
332 else if (req->npages > 0)
333 err = give_pages(dev, req->func_id, req->npages, 1);
334
335 if (err)
336 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
337 "reclaim" : "give", err);
338
339 kfree(req);
340}
341
342void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
343 s16 npages)
344{
345 struct mlx5_pages_req *req;
346
347 req = kzalloc(sizeof(*req), GFP_ATOMIC);
348 if (!req) {
349 mlx5_core_warn(dev, "failed to allocate pages request\n");
350 return;
351 }
352
353 req->dev = dev;
354 req->func_id = func_id;
355 req->npages = npages;
356 INIT_WORK(&req->work, pages_work_handler);
357 queue_work(dev->priv.pg_wq, &req->work);
358}
359
360int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
361{
362 s16 uninitialized_var(init_pages);
363 u16 uninitialized_var(func_id);
364 int err;
365
366 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
367 if (err)
368 return err;
369
370 mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
371
372 return give_pages(dev, func_id, init_pages, 0);
373}
374
375static int optimal_reclaimed_pages(void)
376{
377 struct mlx5_cmd_prot_block *block;
378 struct mlx5_cmd_layout *lay;
379 int ret;
380
381 ret = (sizeof(lay->in) + sizeof(block->data) -
382 sizeof(struct mlx5_manage_pages_outbox)) / 8;
383
384 return ret;
385}
386
387int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
388{
389 unsigned long end = jiffies + msecs_to_jiffies(5000);
390 struct fw_page *fwp;
391 struct rb_node *p;
392 int err;
393
394 do {
395 p = rb_first(&dev->priv.page_root);
396 if (p) {
397 fwp = rb_entry(p, struct fw_page, rb_node);
398 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
399 if (err) {
400 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
401 return err;
402 }
403 }
404 if (time_after(jiffies, end)) {
405 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
406 break;
407 }
408 } while (p);
409
410 return 0;
411}
412
413void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
414{
415 dev->priv.page_root = RB_ROOT;
416}
417
418void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
419{
420 /* nothing */
421}
422
423int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
424{
425 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
426 if (!dev->priv.pg_wq)
427 return -ENOMEM;
428
429 return 0;
430}
431
432void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
433{
434 destroy_workqueue(dev->priv.pg_wq);
435}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
new file mode 100644
index 000000000000..790da5c4ca4f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39struct mlx5_alloc_pd_mbox_in {
40 struct mlx5_inbox_hdr hdr;
41 u8 rsvd[8];
42};
43
44struct mlx5_alloc_pd_mbox_out {
45 struct mlx5_outbox_hdr hdr;
46 __be32 pdn;
47 u8 rsvd[4];
48};
49
50struct mlx5_dealloc_pd_mbox_in {
51 struct mlx5_inbox_hdr hdr;
52 __be32 pdn;
53 u8 rsvd[4];
54};
55
56struct mlx5_dealloc_pd_mbox_out {
57 struct mlx5_outbox_hdr hdr;
58 u8 rsvd[8];
59};
60
61int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
62{
63 struct mlx5_alloc_pd_mbox_in in;
64 struct mlx5_alloc_pd_mbox_out out;
65 int err;
66
67 memset(&in, 0, sizeof(in));
68 memset(&out, 0, sizeof(out));
69 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
70 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
71 if (err)
72 return err;
73
74 if (out.hdr.status)
75 return mlx5_cmd_status_to_err(&out.hdr);
76
77 *pdn = be32_to_cpu(out.pdn) & 0xffffff;
78 return err;
79}
80EXPORT_SYMBOL(mlx5_core_alloc_pd);
81
82int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
83{
84 struct mlx5_dealloc_pd_mbox_in in;
85 struct mlx5_dealloc_pd_mbox_out out;
86 int err;
87
88 memset(&in, 0, sizeof(in));
89 memset(&out, 0, sizeof(out));
90 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
91 in.pdn = cpu_to_be32(pdn);
92 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
93 if (err)
94 return err;
95
96 if (out.hdr.status)
97 return mlx5_cmd_status_to_err(&out.hdr);
98
99 return err;
100}
101EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
new file mode 100644
index 000000000000..f6afe7b5a675
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/cmd.h>
36#include "mlx5_core.h"
37
38int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
39 int size_in, void *data_out, int size_out,
40 u16 reg_num, int arg, int write)
41{
42 struct mlx5_access_reg_mbox_in *in = NULL;
43 struct mlx5_access_reg_mbox_out *out = NULL;
44 int err = -ENOMEM;
45
46 in = mlx5_vzalloc(sizeof(*in) + size_in);
47 if (!in)
48 return -ENOMEM;
49
50 out = mlx5_vzalloc(sizeof(*out) + size_out);
51 if (!out)
52 goto ex1;
53
54 memcpy(in->data, data_in, size_in);
55 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
56 in->hdr.opmod = cpu_to_be16(!write);
57 in->arg = cpu_to_be32(arg);
58 in->register_id = cpu_to_be16(reg_num);
59 err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
60 sizeof(out) + size_out);
61 if (err)
62 goto ex2;
63
64 if (out->hdr.status)
65 err = mlx5_cmd_status_to_err(&out->hdr);
66
67 if (!err)
68 memcpy(data_out, out->data, size_out);
69
70ex2:
71 mlx5_vfree(out);
72ex1:
73 mlx5_vfree(in);
74 return err;
75}
76EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
77
78
79struct mlx5_reg_pcap {
80 u8 rsvd0;
81 u8 port_num;
82 u8 rsvd1[2];
83 __be32 caps_127_96;
84 __be32 caps_95_64;
85 __be32 caps_63_32;
86 __be32 caps_31_0;
87};
88
89int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
90{
91 struct mlx5_reg_pcap in;
92 struct mlx5_reg_pcap out;
93 int err;
94
95 memset(&in, 0, sizeof(in));
96 in.caps_127_96 = cpu_to_be32(caps);
97 in.port_num = port_num;
98
99 err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
100 sizeof(out), MLX5_REG_PCAP, 0, 1);
101
102 return err;
103}
104EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
new file mode 100644
index 000000000000..54faf8bfcaf4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -0,0 +1,301 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/gfp.h>
35#include <linux/export.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/qp.h>
38#include <linux/mlx5/driver.h>
39
40#include "mlx5_core.h"
41
42void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type)
43{
44 struct mlx5_qp_table *table = &dev->priv.qp_table;
45 struct mlx5_core_qp *qp;
46
47 spin_lock(&table->lock);
48
49 qp = radix_tree_lookup(&table->tree, qpn);
50 if (qp)
51 atomic_inc(&qp->refcount);
52
53 spin_unlock(&table->lock);
54
55 if (!qp) {
56 mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn);
57 return;
58 }
59
60 qp->event(qp, event_type);
61
62 if (atomic_dec_and_test(&qp->refcount))
63 complete(&qp->free);
64}
65
66int mlx5_core_create_qp(struct mlx5_core_dev *dev,
67 struct mlx5_core_qp *qp,
68 struct mlx5_create_qp_mbox_in *in,
69 int inlen)
70{
71 struct mlx5_qp_table *table = &dev->priv.qp_table;
72 struct mlx5_create_qp_mbox_out out;
73 struct mlx5_destroy_qp_mbox_in din;
74 struct mlx5_destroy_qp_mbox_out dout;
75 int err;
76
77 memset(&dout, 0, sizeof(dout));
78 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err);
83 return err;
84 }
85
86 if (out.hdr.status) {
87 pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
88 return mlx5_cmd_status_to_err(&out.hdr);
89 }
90
91 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
92 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
93
94 spin_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, qp->qpn, qp);
96 spin_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "err %d", err);
99 goto err_cmd;
100 }
101
102 err = mlx5_debug_qp_add(dev, qp);
103 if (err)
104 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
105 qp->qpn);
106
107 qp->pid = current->pid;
108 atomic_set(&qp->refcount, 1);
109 atomic_inc(&dev->num_qps);
110 init_completion(&qp->free);
111
112 return 0;
113
114err_cmd:
115 memset(&din, 0, sizeof(din));
116 memset(&dout, 0, sizeof(dout));
117 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
118 din.qpn = cpu_to_be32(qp->qpn);
119 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
120
121 return err;
122}
123EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
124
125int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
126 struct mlx5_core_qp *qp)
127{
128 struct mlx5_destroy_qp_mbox_in in;
129 struct mlx5_destroy_qp_mbox_out out;
130 struct mlx5_qp_table *table = &dev->priv.qp_table;
131 unsigned long flags;
132 int err;
133
134 mlx5_debug_qp_remove(dev, qp);
135
136 spin_lock_irqsave(&table->lock, flags);
137 radix_tree_delete(&table->tree, qp->qpn);
138 spin_unlock_irqrestore(&table->lock, flags);
139
140 if (atomic_dec_and_test(&qp->refcount))
141 complete(&qp->free);
142 wait_for_completion(&qp->free);
143
144 memset(&in, 0, sizeof(in));
145 memset(&out, 0, sizeof(out));
146 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
147 in.qpn = cpu_to_be32(qp->qpn);
148 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
149 if (err)
150 return err;
151
152 if (out.hdr.status)
153 return mlx5_cmd_status_to_err(&out.hdr);
154
155 atomic_dec(&dev->num_qps);
156 return 0;
157}
158EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
159
160int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
161 enum mlx5_qp_state new_state,
162 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
163 struct mlx5_core_qp *qp)
164{
165 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
166 [MLX5_QP_STATE_RST] = {
167 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
168 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
169 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
170 },
171 [MLX5_QP_STATE_INIT] = {
172 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
173 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
174 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
175 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
176 },
177 [MLX5_QP_STATE_RTR] = {
178 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
179 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
180 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
181 },
182 [MLX5_QP_STATE_RTS] = {
183 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
184 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
185 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
186 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP,
187 },
188 [MLX5_QP_STATE_SQD] = {
189 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
190 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
191 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP,
192 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP,
193 },
194 [MLX5_QP_STATE_SQER] = {
195 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
196 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
197 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
198 },
199 [MLX5_QP_STATE_ERR] = {
200 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
201 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
202 }
203 };
204
205 struct mlx5_modify_qp_mbox_out out;
206 int err = 0;
207 u16 op;
208
209 if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
210 !optab[cur_state][new_state])
211 return -EINVAL;
212
213 memset(&out, 0, sizeof(out));
214 op = optab[cur_state][new_state];
215 in->hdr.opcode = cpu_to_be16(op);
216 in->qpn = cpu_to_be32(qp->qpn);
217 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
218 if (err)
219 return err;
220
221 return mlx5_cmd_status_to_err(&out.hdr);
222}
223EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
224
225void mlx5_init_qp_table(struct mlx5_core_dev *dev)
226{
227 struct mlx5_qp_table *table = &dev->priv.qp_table;
228
229 spin_lock_init(&table->lock);
230 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
231 mlx5_qp_debugfs_init(dev);
232}
233
234void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
235{
236 mlx5_qp_debugfs_cleanup(dev);
237}
238
239int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
240 struct mlx5_query_qp_mbox_out *out, int outlen)
241{
242 struct mlx5_query_qp_mbox_in in;
243 int err;
244
245 memset(&in, 0, sizeof(in));
246 memset(out, 0, outlen);
247 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
248 in.qpn = cpu_to_be32(qp->qpn);
249 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
250 if (err)
251 return err;
252
253 if (out->hdr.status)
254 return mlx5_cmd_status_to_err(&out->hdr);
255
256 return err;
257}
258EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
259
260int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
261{
262 struct mlx5_alloc_xrcd_mbox_in in;
263 struct mlx5_alloc_xrcd_mbox_out out;
264 int err;
265
266 memset(&in, 0, sizeof(in));
267 memset(&out, 0, sizeof(out));
268 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
269 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
270 if (err)
271 return err;
272
273 if (out.hdr.status)
274 err = mlx5_cmd_status_to_err(&out.hdr);
275 else
276 *xrcdn = be32_to_cpu(out.xrcdn);
277
278 return err;
279}
280EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
281
282int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
283{
284 struct mlx5_dealloc_xrcd_mbox_in in;
285 struct mlx5_dealloc_xrcd_mbox_out out;
286 int err;
287
288 memset(&in, 0, sizeof(in));
289 memset(&out, 0, sizeof(out));
290 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
291 in.xrcdn = cpu_to_be32(xrcdn);
292 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
293 if (err)
294 return err;
295
296 if (out.hdr.status)
297 err = mlx5_cmd_status_to_err(&out.hdr);
298
299 return err;
300}
301EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
new file mode 100644
index 000000000000..38bce93f8314
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/srq.h>
38#include <rdma/ib_verbs.h>
39#include "mlx5_core.h"
40
41void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
42{
43 struct mlx5_srq_table *table = &dev->priv.srq_table;
44 struct mlx5_core_srq *srq;
45
46 spin_lock(&table->lock);
47
48 srq = radix_tree_lookup(&table->tree, srqn);
49 if (srq)
50 atomic_inc(&srq->refcount);
51
52 spin_unlock(&table->lock);
53
54 if (!srq) {
55 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
56 return;
57 }
58
59 srq->event(srq, event_type);
60
61 if (atomic_dec_and_test(&srq->refcount))
62 complete(&srq->free);
63}
64
65struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
66{
67 struct mlx5_srq_table *table = &dev->priv.srq_table;
68 struct mlx5_core_srq *srq;
69
70 spin_lock(&table->lock);
71
72 srq = radix_tree_lookup(&table->tree, srqn);
73 if (srq)
74 atomic_inc(&srq->refcount);
75
76 spin_unlock(&table->lock);
77
78 return srq;
79}
80EXPORT_SYMBOL(mlx5_core_get_srq);
81
82int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
83 struct mlx5_create_srq_mbox_in *in, int inlen)
84{
85 struct mlx5_create_srq_mbox_out out;
86 struct mlx5_srq_table *table = &dev->priv.srq_table;
87 struct mlx5_destroy_srq_mbox_in din;
88 struct mlx5_destroy_srq_mbox_out dout;
89 int err;
90
91 memset(&out, 0, sizeof(out));
92 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
93 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
94 if (err)
95 return err;
96
97 if (out.hdr.status)
98 return mlx5_cmd_status_to_err(&out.hdr);
99
100 srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
101
102 atomic_set(&srq->refcount, 1);
103 init_completion(&srq->free);
104
105 spin_lock_irq(&table->lock);
106 err = radix_tree_insert(&table->tree, srq->srqn, srq);
107 spin_unlock_irq(&table->lock);
108 if (err) {
109 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
110 goto err_cmd;
111 }
112
113 return 0;
114
115err_cmd:
116 memset(&din, 0, sizeof(din));
117 memset(&dout, 0, sizeof(dout));
118 din.srqn = cpu_to_be32(srq->srqn);
119 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
120 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
121 return err;
122}
123EXPORT_SYMBOL(mlx5_core_create_srq);
124
125int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
126{
127 struct mlx5_destroy_srq_mbox_in in;
128 struct mlx5_destroy_srq_mbox_out out;
129 struct mlx5_srq_table *table = &dev->priv.srq_table;
130 struct mlx5_core_srq *tmp;
131 int err;
132
133 spin_lock_irq(&table->lock);
134 tmp = radix_tree_delete(&table->tree, srq->srqn);
135 spin_unlock_irq(&table->lock);
136 if (!tmp) {
137 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
138 return -EINVAL;
139 }
140 if (tmp != srq) {
141 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
142 return -EINVAL;
143 }
144
145 memset(&in, 0, sizeof(in));
146 memset(&out, 0, sizeof(out));
147 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
148 in.srqn = cpu_to_be32(srq->srqn);
149 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
150 if (err)
151 return err;
152
153 if (out.hdr.status)
154 return mlx5_cmd_status_to_err(&out.hdr);
155
156 if (atomic_dec_and_test(&srq->refcount))
157 complete(&srq->free);
158 wait_for_completion(&srq->free);
159
160 return 0;
161}
162EXPORT_SYMBOL(mlx5_core_destroy_srq);
163
164int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
165 struct mlx5_query_srq_mbox_out *out)
166{
167 struct mlx5_query_srq_mbox_in in;
168 int err;
169
170 memset(&in, 0, sizeof(in));
171 memset(out, 0, sizeof(*out));
172
173 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
174 in.srqn = cpu_to_be32(srq->srqn);
175 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
176 if (err)
177 return err;
178
179 if (out->hdr.status)
180 return mlx5_cmd_status_to_err(&out->hdr);
181
182 return err;
183}
184EXPORT_SYMBOL(mlx5_core_query_srq);
185
186int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
187 u16 lwm, int is_srq)
188{
189 struct mlx5_arm_srq_mbox_in in;
190 struct mlx5_arm_srq_mbox_out out;
191 int err;
192
193 memset(&in, 0, sizeof(in));
194 memset(&out, 0, sizeof(out));
195
196 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
197 in.hdr.opmod = cpu_to_be16(!!is_srq);
198 in.srqn = cpu_to_be32(srq->srqn);
199 in.lwm = cpu_to_be16(lwm);
200
201 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
202 if (err)
203 return err;
204
205 if (out.hdr.status)
206 return mlx5_cmd_status_to_err(&out.hdr);
207
208 return err;
209}
210EXPORT_SYMBOL(mlx5_core_arm_srq);
211
212void mlx5_init_srq_table(struct mlx5_core_dev *dev)
213{
214 struct mlx5_srq_table *table = &dev->priv.srq_table;
215
216 spin_lock_init(&table->lock);
217 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
218}
219
220void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
221{
222 /* nothing */
223}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
new file mode 100644
index 000000000000..71d4a3937200
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39enum {
40 NUM_DRIVER_UARS = 4,
41 NUM_LOW_LAT_UUARS = 4,
42};
43
44
45struct mlx5_alloc_uar_mbox_in {
46 struct mlx5_inbox_hdr hdr;
47 u8 rsvd[8];
48};
49
50struct mlx5_alloc_uar_mbox_out {
51 struct mlx5_outbox_hdr hdr;
52 __be32 uarn;
53 u8 rsvd[4];
54};
55
56struct mlx5_free_uar_mbox_in {
57 struct mlx5_inbox_hdr hdr;
58 __be32 uarn;
59 u8 rsvd[4];
60};
61
62struct mlx5_free_uar_mbox_out {
63 struct mlx5_outbox_hdr hdr;
64 u8 rsvd[8];
65};
66
67int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
68{
69 struct mlx5_alloc_uar_mbox_in in;
70 struct mlx5_alloc_uar_mbox_out out;
71 int err;
72
73 memset(&in, 0, sizeof(in));
74 memset(&out, 0, sizeof(out));
75 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
76 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
77 if (err)
78 goto ex;
79
80 if (out.hdr.status) {
81 err = mlx5_cmd_status_to_err(&out.hdr);
82 goto ex;
83 }
84
85 *uarn = be32_to_cpu(out.uarn) & 0xffffff;
86
87ex:
88 return err;
89}
90EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
91
92int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
93{
94 struct mlx5_free_uar_mbox_in in;
95 struct mlx5_free_uar_mbox_out out;
96 int err;
97
98 memset(&in, 0, sizeof(in));
99 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
100 in.uarn = cpu_to_be32(uarn);
101 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
102 if (err)
103 goto ex;
104
105 if (out.hdr.status)
106 err = mlx5_cmd_status_to_err(&out.hdr);
107
108ex:
109 return err;
110}
111EXPORT_SYMBOL(mlx5_cmd_free_uar);
112
113static int need_uuar_lock(int uuarn)
114{
115 int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
116
117 if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
118 return 0;
119
120 return 1;
121}
122
123int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
124{
125 int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
126 struct mlx5_bf *bf;
127 phys_addr_t addr;
128 int err;
129 int i;
130
131 uuari->num_uars = NUM_DRIVER_UARS;
132 uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
133
134 mutex_init(&uuari->lock);
135 uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
136 if (!uuari->uars)
137 return -ENOMEM;
138
139 uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
140 if (!uuari->bfs) {
141 err = -ENOMEM;
142 goto out_uars;
143 }
144
145 uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
146 GFP_KERNEL);
147 if (!uuari->bitmap) {
148 err = -ENOMEM;
149 goto out_bfs;
150 }
151
152 uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
153 if (!uuari->count) {
154 err = -ENOMEM;
155 goto out_bitmap;
156 }
157
158 for (i = 0; i < uuari->num_uars; i++) {
159 err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
160 if (err)
161 goto out_count;
162
163 addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
165 if (!uuari->uars[i].map) {
166 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
167 goto out_count;
168 }
169 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
170 uuari->uars[i].index, uuari->uars[i].map);
171 }
172
173 for (i = 0; i < tot_uuars; i++) {
174 bf = &uuari->bfs[i];
175
176 bf->buf_size = dev->caps.bf_reg_size / 2;
177 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
178 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
179 bf->reg = NULL; /* Add WC support */
180 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size +
181 MLX5_BF_OFFSET;
182 bf->need_lock = need_uuar_lock(i);
183 spin_lock_init(&bf->lock);
184 spin_lock_init(&bf->lock32);
185 bf->uuarn = i;
186 }
187
188 return 0;
189
190out_count:
191 for (i--; i >= 0; i--) {
192 iounmap(uuari->uars[i].map);
193 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
194 }
195 kfree(uuari->count);
196
197out_bitmap:
198 kfree(uuari->bitmap);
199
200out_bfs:
201 kfree(uuari->bfs);
202
203out_uars:
204 kfree(uuari->uars);
205 return err;
206}
207
208int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
209{
210 int i = uuari->num_uars;
211
212 for (i--; i >= 0; i--) {
213 iounmap(uuari->uars[i].map);
214 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
215 }
216
217 kfree(uuari->count);
218 kfree(uuari->bitmap);
219 kfree(uuari->bfs);
220 kfree(uuari->uars);
221
222 return 0;
223}
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
new file mode 100644
index 000000000000..2826a4b6071e
--- /dev/null
+++ b/include/linux/mlx5/cmd.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_CMD_H
34#define MLX5_CMD_H
35
36#include <linux/types.h>
37
38struct manage_pages_layout {
39 u64 ptr;
40 u32 reserved;
41 u16 num_entries;
42 u16 func_id;
43};
44
45
46struct mlx5_cmd_alloc_uar_imm_out {
47 u32 rsvd[3];
48 u32 uarn;
49};
50
51#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
new file mode 100644
index 000000000000..3db67f73d96d
--- /dev/null
+++ b/include/linux/mlx5/cq.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_CORE_CQ_H
34#define MLX5_CORE_CQ_H
35
36#include <rdma/ib_verbs.h>
37#include <linux/mlx5/driver.h>
38
39
40struct mlx5_core_cq {
41 u32 cqn;
42 int cqe_sz;
43 __be32 *set_ci_db;
44 __be32 *arm_db;
45 atomic_t refcount;
46 struct completion free;
47 unsigned vector;
48 int irqn;
49 void (*comp) (struct mlx5_core_cq *);
50 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
51 struct mlx5_uar *uar;
52 u32 cons_index;
53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg;
55 int pid;
56};
57
58
59enum {
60 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
61 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
62 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
63 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
64 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
65 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
66 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
67 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
68 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
69 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
70 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
71 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
72 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
73};
74
75enum {
76 MLX5_CQE_OWNER_MASK = 1,
77 MLX5_CQE_REQ = 0,
78 MLX5_CQE_RESP_WR_IMM = 1,
79 MLX5_CQE_RESP_SEND = 2,
80 MLX5_CQE_RESP_SEND_IMM = 3,
81 MLX5_CQE_RESP_SEND_INV = 4,
82 MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */
83 MLX5_CQE_REQ_ERR = 13,
84 MLX5_CQE_RESP_ERR = 14,
85};
86
87enum {
88 MLX5_CQ_MODIFY_RESEIZE = 0,
89 MLX5_CQ_MODIFY_MODER = 1,
90 MLX5_CQ_MODIFY_MAPPING = 2,
91};
92
93struct mlx5_cq_modify_params {
94 int type;
95 union {
96 struct {
97 u32 page_offset;
98 u8 log_cq_size;
99 } resize;
100
101 struct {
102 } moder;
103
104 struct {
105 } mapping;
106 } params;
107};
108
109enum {
110 CQE_SIZE_64 = 0,
111 CQE_SIZE_128 = 1,
112};
113
114static inline int cqe_sz_to_mlx_sz(u8 size)
115{
116 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
117}
118
119static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
120{
121 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
122}
123
124enum {
125 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
126 MLX5_CQ_DB_REQ_NOT = 0 << 24
127};
128
129static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
130 void __iomem *uar_page,
131 spinlock_t *doorbell_lock)
132{
133 __be32 doorbell[2];
134 u32 sn;
135 u32 ci;
136
137 sn = cq->arm_sn & 3;
138 ci = cq->cons_index & 0xffffff;
139
140 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
141
142 /* Make sure that the doorbell record in host memory is
143 * written before ringing the doorbell via PCI MMIO.
144 */
145 wmb();
146
147 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
148 doorbell[1] = cpu_to_be32(cq->cqn);
149
150 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
151}
152
153int mlx5_init_cq_table(struct mlx5_core_dev *dev);
154void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
155int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
156 struct mlx5_create_cq_mbox_in *in, int inlen);
157int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
158int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
159 struct mlx5_query_cq_mbox_out *out);
160int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
161 int type, struct mlx5_cq_modify_params *params);
162int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
163void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
164
165#endif /* MLX5_CORE_CQ_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
new file mode 100644
index 000000000000..51390915e538
--- /dev/null
+++ b/include/linux/mlx5/device.h
@@ -0,0 +1,893 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DEVICE_H
34#define MLX5_DEVICE_H
35
36#include <linux/types.h>
37#include <rdma/ib_verbs.h>
38
39#if defined(__LITTLE_ENDIAN)
40#define MLX5_SET_HOST_ENDIANNESS 0
41#elif defined(__BIG_ENDIAN)
42#define MLX5_SET_HOST_ENDIANNESS 0x80
43#else
44#error Host endianness not defined
45#endif
46
47enum {
48 MLX5_MAX_COMMANDS = 32,
49 MLX5_CMD_DATA_BLOCK_SIZE = 512,
50 MLX5_PCI_CMD_XPORT = 7,
51};
52
53enum {
54 MLX5_EXTENDED_UD_AV = 0x80000000,
55};
56
57enum {
58 MLX5_CQ_STATE_ARMED = 9,
59 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
60 MLX5_CQ_STATE_FIRED = 0xa,
61};
62
63enum {
64 MLX5_STAT_RATE_OFFSET = 5,
65};
66
67enum {
68 MLX5_INLINE_SEG = 0x80000000,
69};
70
71enum {
72 MLX5_PERM_LOCAL_READ = 1 << 2,
73 MLX5_PERM_LOCAL_WRITE = 1 << 3,
74 MLX5_PERM_REMOTE_READ = 1 << 4,
75 MLX5_PERM_REMOTE_WRITE = 1 << 5,
76 MLX5_PERM_ATOMIC = 1 << 6,
77 MLX5_PERM_UMR_EN = 1 << 7,
78};
79
80enum {
81 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
82 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
83 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
84 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
85 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
86};
87
88enum {
89 MLX5_ACCESS_MODE_PA = 0,
90 MLX5_ACCESS_MODE_MTT = 1,
91 MLX5_ACCESS_MODE_KLM = 2
92};
93
94enum {
95 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
96 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
97 MLX5_MKEY_BSF_EN = 1 << 30,
98 MLX5_MKEY_LEN64 = 1 << 31,
99};
100
101enum {
102 MLX5_EN_RD = (u64)1,
103 MLX5_EN_WR = (u64)2
104};
105
106enum {
107 MLX5_BF_REGS_PER_PAGE = 4,
108 MLX5_MAX_UAR_PAGES = 1 << 8,
109 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE,
110};
111
112enum {
113 MLX5_MKEY_MASK_LEN = 1ull << 0,
114 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
115 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
116 MLX5_MKEY_MASK_PD = 1ull << 7,
117 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
118 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
119 MLX5_MKEY_MASK_KEY = 1ull << 13,
120 MLX5_MKEY_MASK_QPN = 1ull << 14,
121 MLX5_MKEY_MASK_LR = 1ull << 17,
122 MLX5_MKEY_MASK_LW = 1ull << 18,
123 MLX5_MKEY_MASK_RR = 1ull << 19,
124 MLX5_MKEY_MASK_RW = 1ull << 20,
125 MLX5_MKEY_MASK_A = 1ull << 21,
126 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
127 MLX5_MKEY_MASK_FREE = 1ull << 29,
128};
129
130enum mlx5_event {
131 MLX5_EVENT_TYPE_COMP = 0x0,
132
133 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
134 MLX5_EVENT_TYPE_COMM_EST = 0x02,
135 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
136 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
137 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
138
139 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
140 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
141 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
142 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
143 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
144 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
145
146 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
147 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
148 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
149 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
150
151 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
152 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
153
154 MLX5_EVENT_TYPE_CMD = 0x0a,
155 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
156};
157
158enum {
159 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
160 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
161 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
162 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
163 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
164 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
165 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
166};
167
168enum {
169 MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
170 MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
171 MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
172 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
173 MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
174 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
175 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
176 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
177 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
178 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
179 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
180 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
181 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
182 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
183 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
184 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
185};
186
187enum {
188 MLX5_OPCODE_NOP = 0x00,
189 MLX5_OPCODE_SEND_INVAL = 0x01,
190 MLX5_OPCODE_RDMA_WRITE = 0x08,
191 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
192 MLX5_OPCODE_SEND = 0x0a,
193 MLX5_OPCODE_SEND_IMM = 0x0b,
194 MLX5_OPCODE_RDMA_READ = 0x10,
195 MLX5_OPCODE_ATOMIC_CS = 0x11,
196 MLX5_OPCODE_ATOMIC_FA = 0x12,
197 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
198 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
199 MLX5_OPCODE_BIND_MW = 0x18,
200 MLX5_OPCODE_CONFIG_CMD = 0x1f,
201
202 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
203 MLX5_RECV_OPCODE_SEND = 0x01,
204 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
205 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
206
207 MLX5_CQE_OPCODE_ERROR = 0x1e,
208 MLX5_CQE_OPCODE_RESIZE = 0x16,
209
210 MLX5_OPCODE_SET_PSV = 0x20,
211 MLX5_OPCODE_GET_PSV = 0x21,
212 MLX5_OPCODE_CHECK_PSV = 0x22,
213 MLX5_OPCODE_RGET_PSV = 0x26,
214 MLX5_OPCODE_RCHECK_PSV = 0x27,
215
216 MLX5_OPCODE_UMR = 0x25,
217
218};
219
220enum {
221 MLX5_SET_PORT_RESET_QKEY = 0,
222 MLX5_SET_PORT_GUID0 = 16,
223 MLX5_SET_PORT_NODE_GUID = 17,
224 MLX5_SET_PORT_SYS_GUID = 18,
225 MLX5_SET_PORT_GID_TABLE = 19,
226 MLX5_SET_PORT_PKEY_TABLE = 20,
227};
228
229enum {
230 MLX5_MAX_PAGE_SHIFT = 31
231};
232
233struct mlx5_inbox_hdr {
234 __be16 opcode;
235 u8 rsvd[4];
236 __be16 opmod;
237};
238
239struct mlx5_outbox_hdr {
240 u8 status;
241 u8 rsvd[3];
242 __be32 syndrome;
243};
244
245struct mlx5_cmd_query_adapter_mbox_in {
246 struct mlx5_inbox_hdr hdr;
247 u8 rsvd[8];
248};
249
250struct mlx5_cmd_query_adapter_mbox_out {
251 struct mlx5_outbox_hdr hdr;
252 u8 rsvd0[24];
253 u8 intapin;
254 u8 rsvd1[13];
255 __be16 vsd_vendor_id;
256 u8 vsd[208];
257 u8 vsd_psid[16];
258};
259
260struct mlx5_hca_cap {
261 u8 rsvd1[16];
262 u8 log_max_srq_sz;
263 u8 log_max_qp_sz;
264 u8 rsvd2;
265 u8 log_max_qp;
266 u8 log_max_strq_sz;
267 u8 log_max_srqs;
268 u8 rsvd4[2];
269 u8 rsvd5;
270 u8 log_max_cq_sz;
271 u8 rsvd6;
272 u8 log_max_cq;
273 u8 log_max_eq_sz;
274 u8 log_max_mkey;
275 u8 rsvd7;
276 u8 log_max_eq;
277 u8 max_indirection;
278 u8 log_max_mrw_sz;
279 u8 log_max_bsf_list_sz;
280 u8 log_max_klm_list_sz;
281 u8 rsvd_8_0;
282 u8 log_max_ra_req_dc;
283 u8 rsvd_8_1;
284 u8 log_max_ra_res_dc;
285 u8 rsvd9;
286 u8 log_max_ra_req_qp;
287 u8 rsvd10;
288 u8 log_max_ra_res_qp;
289 u8 rsvd11[4];
290 __be16 max_qp_count;
291 __be16 rsvd12;
292 u8 rsvd13;
293 u8 local_ca_ack_delay;
294 u8 rsvd14;
295 u8 num_ports;
296 u8 log_max_msg;
297 u8 rsvd15[3];
298 __be16 stat_rate_support;
299 u8 rsvd16[2];
300 __be64 flags;
301 u8 rsvd17;
302 u8 uar_sz;
303 u8 rsvd18;
304 u8 log_pg_sz;
305 __be16 bf_log_bf_reg_size;
306 u8 rsvd19[4];
307 __be16 max_desc_sz_sq;
308 u8 rsvd20[2];
309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4];
313 __be16 max_qp_mcg;
314 u8 rsvd23;
315 u8 log_max_mcg;
316 u8 rsvd24;
317 u8 log_max_pd;
318 u8 rsvd25;
319 u8 log_max_xrcd;
320 u8 rsvd26[40];
321 __be32 uar_page_sz;
322 u8 rsvd27[28];
323 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2];
325 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76];
327};
328
329
330struct mlx5_cmd_query_hca_cap_mbox_in {
331 struct mlx5_inbox_hdr hdr;
332 u8 rsvd[8];
333};
334
335
336struct mlx5_cmd_query_hca_cap_mbox_out {
337 struct mlx5_outbox_hdr hdr;
338 u8 rsvd0[8];
339 struct mlx5_hca_cap hca_cap;
340};
341
342
343struct mlx5_cmd_set_hca_cap_mbox_in {
344 struct mlx5_inbox_hdr hdr;
345 u8 rsvd[8];
346 struct mlx5_hca_cap hca_cap;
347};
348
349
350struct mlx5_cmd_set_hca_cap_mbox_out {
351 struct mlx5_outbox_hdr hdr;
352 u8 rsvd0[8];
353};
354
355
356struct mlx5_cmd_init_hca_mbox_in {
357 struct mlx5_inbox_hdr hdr;
358 u8 rsvd0[2];
359 __be16 profile;
360 u8 rsvd1[4];
361};
362
363struct mlx5_cmd_init_hca_mbox_out {
364 struct mlx5_outbox_hdr hdr;
365 u8 rsvd[8];
366};
367
368struct mlx5_cmd_teardown_hca_mbox_in {
369 struct mlx5_inbox_hdr hdr;
370 u8 rsvd0[2];
371 __be16 profile;
372 u8 rsvd1[4];
373};
374
375struct mlx5_cmd_teardown_hca_mbox_out {
376 struct mlx5_outbox_hdr hdr;
377 u8 rsvd[8];
378};
379
380struct mlx5_cmd_layout {
381 u8 type;
382 u8 rsvd0[3];
383 __be32 inlen;
384 __be64 in_ptr;
385 __be32 in[4];
386 __be32 out[4];
387 __be64 out_ptr;
388 __be32 outlen;
389 u8 token;
390 u8 sig;
391 u8 rsvd1;
392 u8 status_own;
393};
394
395
396struct health_buffer {
397 __be32 assert_var[5];
398 __be32 rsvd0[3];
399 __be32 assert_exit_ptr;
400 __be32 assert_callra;
401 __be32 rsvd1[2];
402 __be32 fw_ver;
403 __be32 hw_id;
404 __be32 rsvd2;
405 u8 irisc_index;
406 u8 synd;
407 __be16 ext_sync;
408};
409
410struct mlx5_init_seg {
411 __be32 fw_rev;
412 __be32 cmdif_rev_fw_sub;
413 __be32 rsvd0[2];
414 __be32 cmdq_addr_h;
415 __be32 cmdq_addr_l_sz;
416 __be32 cmd_dbell;
417 __be32 rsvd1[121];
418 struct health_buffer health;
419 __be32 rsvd2[884];
420 __be32 health_counter;
421 __be32 rsvd3[1023];
422 __be64 ieee1588_clk;
423 __be32 ieee1588_clk_type;
424 __be32 clr_intx;
425};
426
427struct mlx5_eqe_comp {
428 __be32 reserved[6];
429 __be32 cqn;
430};
431
432struct mlx5_eqe_qp_srq {
433 __be32 reserved[6];
434 __be32 qp_srq_n;
435};
436
437struct mlx5_eqe_cq_err {
438 __be32 cqn;
439 u8 reserved1[7];
440 u8 syndrome;
441};
442
443struct mlx5_eqe_dropped_packet {
444};
445
446struct mlx5_eqe_port_state {
447 u8 reserved0[8];
448 u8 port;
449};
450
451struct mlx5_eqe_gpio {
452 __be32 reserved0[2];
453 __be64 gpio_event;
454};
455
456struct mlx5_eqe_congestion {
457 u8 type;
458 u8 rsvd0;
459 u8 congestion_level;
460};
461
462struct mlx5_eqe_stall_vl {
463 u8 rsvd0[3];
464 u8 port_vl;
465};
466
467struct mlx5_eqe_cmd {
468 __be32 vector;
469 __be32 rsvd[6];
470};
471
472struct mlx5_eqe_page_req {
473 u8 rsvd0[2];
474 __be16 func_id;
475 u8 rsvd1[2];
476 __be16 num_pages;
477 __be32 rsvd2[5];
478};
479
480union ev_data {
481 __be32 raw[7];
482 struct mlx5_eqe_cmd cmd;
483 struct mlx5_eqe_comp comp;
484 struct mlx5_eqe_qp_srq qp_srq;
485 struct mlx5_eqe_cq_err cq_err;
486 struct mlx5_eqe_dropped_packet dp;
487 struct mlx5_eqe_port_state port;
488 struct mlx5_eqe_gpio gpio;
489 struct mlx5_eqe_congestion cong;
490 struct mlx5_eqe_stall_vl stall_vl;
491 struct mlx5_eqe_page_req req_pages;
492} __packed;
493
494struct mlx5_eqe {
495 u8 rsvd0;
496 u8 type;
497 u8 rsvd1;
498 u8 sub_type;
499 __be32 rsvd2[7];
500 union ev_data data;
501 __be16 rsvd3;
502 u8 signature;
503 u8 owner;
504} __packed;
505
506struct mlx5_cmd_prot_block {
507 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
508 u8 rsvd0[48];
509 __be64 next;
510 __be32 block_num;
511 u8 rsvd1;
512 u8 token;
513 u8 ctrl_sig;
514 u8 sig;
515};
516
517struct mlx5_err_cqe {
518 u8 rsvd0[32];
519 __be32 srqn;
520 u8 rsvd1[18];
521 u8 vendor_err_synd;
522 u8 syndrome;
523 __be32 s_wqe_opcode_qpn;
524 __be16 wqe_counter;
525 u8 signature;
526 u8 op_own;
527};
528
529struct mlx5_cqe64 {
530 u8 rsvd0[17];
531 u8 ml_path;
532 u8 rsvd20[4];
533 __be16 slid;
534 __be32 flags_rqpn;
535 u8 rsvd28[4];
536 __be32 srqn;
537 __be32 imm_inval_pkey;
538 u8 rsvd40[4];
539 __be32 byte_cnt;
540 __be64 timestamp;
541 __be32 sop_drop_qpn;
542 __be16 wqe_counter;
543 u8 signature;
544 u8 op_own;
545};
546
547struct mlx5_wqe_srq_next_seg {
548 u8 rsvd0[2];
549 __be16 next_wqe_index;
550 u8 signature;
551 u8 rsvd1[11];
552};
553
554union mlx5_ext_cqe {
555 struct ib_grh grh;
556 u8 inl[64];
557};
558
559struct mlx5_cqe128 {
560 union mlx5_ext_cqe inl_grh;
561 struct mlx5_cqe64 cqe64;
562};
563
564struct mlx5_srq_ctx {
565 u8 state_log_sz;
566 u8 rsvd0[3];
567 __be32 flags_xrcd;
568 __be32 pgoff_cqn;
569 u8 rsvd1[4];
570 u8 log_pg_sz;
571 u8 rsvd2[7];
572 __be32 pd;
573 __be16 lwm;
574 __be16 wqe_cnt;
575 u8 rsvd3[8];
576 __be64 db_record;
577};
578
579struct mlx5_create_srq_mbox_in {
580 struct mlx5_inbox_hdr hdr;
581 __be32 input_srqn;
582 u8 rsvd0[4];
583 struct mlx5_srq_ctx ctx;
584 u8 rsvd1[208];
585 __be64 pas[0];
586};
587
588struct mlx5_create_srq_mbox_out {
589 struct mlx5_outbox_hdr hdr;
590 __be32 srqn;
591 u8 rsvd[4];
592};
593
594struct mlx5_destroy_srq_mbox_in {
595 struct mlx5_inbox_hdr hdr;
596 __be32 srqn;
597 u8 rsvd[4];
598};
599
600struct mlx5_destroy_srq_mbox_out {
601 struct mlx5_outbox_hdr hdr;
602 u8 rsvd[8];
603};
604
605struct mlx5_query_srq_mbox_in {
606 struct mlx5_inbox_hdr hdr;
607 __be32 srqn;
608 u8 rsvd0[4];
609};
610
611struct mlx5_query_srq_mbox_out {
612 struct mlx5_outbox_hdr hdr;
613 u8 rsvd0[8];
614 struct mlx5_srq_ctx ctx;
615 u8 rsvd1[32];
616 __be64 pas[0];
617};
618
619struct mlx5_arm_srq_mbox_in {
620 struct mlx5_inbox_hdr hdr;
621 __be32 srqn;
622 __be16 rsvd;
623 __be16 lwm;
624};
625
626struct mlx5_arm_srq_mbox_out {
627 struct mlx5_outbox_hdr hdr;
628 u8 rsvd[8];
629};
630
631struct mlx5_cq_context {
632 u8 status;
633 u8 cqe_sz_flags;
634 u8 st;
635 u8 rsvd3;
636 u8 rsvd4[6];
637 __be16 page_offset;
638 __be32 log_sz_usr_page;
639 __be16 cq_period;
640 __be16 cq_max_count;
641 __be16 rsvd20;
642 __be16 c_eqn;
643 u8 log_pg_sz;
644 u8 rsvd25[7];
645 __be32 last_notified_index;
646 __be32 solicit_producer_index;
647 __be32 consumer_counter;
648 __be32 producer_counter;
649 u8 rsvd48[8];
650 __be64 db_record_addr;
651};
652
653struct mlx5_create_cq_mbox_in {
654 struct mlx5_inbox_hdr hdr;
655 __be32 input_cqn;
656 u8 rsvdx[4];
657 struct mlx5_cq_context ctx;
658 u8 rsvd6[192];
659 __be64 pas[0];
660};
661
662struct mlx5_create_cq_mbox_out {
663 struct mlx5_outbox_hdr hdr;
664 __be32 cqn;
665 u8 rsvd0[4];
666};
667
668struct mlx5_destroy_cq_mbox_in {
669 struct mlx5_inbox_hdr hdr;
670 __be32 cqn;
671 u8 rsvd0[4];
672};
673
674struct mlx5_destroy_cq_mbox_out {
675 struct mlx5_outbox_hdr hdr;
676 u8 rsvd0[8];
677};
678
679struct mlx5_query_cq_mbox_in {
680 struct mlx5_inbox_hdr hdr;
681 __be32 cqn;
682 u8 rsvd0[4];
683};
684
685struct mlx5_query_cq_mbox_out {
686 struct mlx5_outbox_hdr hdr;
687 u8 rsvd0[8];
688 struct mlx5_cq_context ctx;
689 u8 rsvd6[16];
690 __be64 pas[0];
691};
692
693struct mlx5_eq_context {
694 u8 status;
695 u8 ec_oi;
696 u8 st;
697 u8 rsvd2[7];
698 __be16 page_pffset;
699 __be32 log_sz_usr_page;
700 u8 rsvd3[7];
701 u8 intr;
702 u8 log_page_size;
703 u8 rsvd4[15];
704 __be32 consumer_counter;
705 __be32 produser_counter;
706 u8 rsvd5[16];
707};
708
709struct mlx5_create_eq_mbox_in {
710 struct mlx5_inbox_hdr hdr;
711 u8 rsvd0[3];
712 u8 input_eqn;
713 u8 rsvd1[4];
714 struct mlx5_eq_context ctx;
715 u8 rsvd2[8];
716 __be64 events_mask;
717 u8 rsvd3[176];
718 __be64 pas[0];
719};
720
721struct mlx5_create_eq_mbox_out {
722 struct mlx5_outbox_hdr hdr;
723 u8 rsvd0[3];
724 u8 eq_number;
725 u8 rsvd1[4];
726};
727
728struct mlx5_destroy_eq_mbox_in {
729 struct mlx5_inbox_hdr hdr;
730 u8 rsvd0[3];
731 u8 eqn;
732 u8 rsvd1[4];
733};
734
735struct mlx5_destroy_eq_mbox_out {
736 struct mlx5_outbox_hdr hdr;
737 u8 rsvd[8];
738};
739
740struct mlx5_map_eq_mbox_in {
741 struct mlx5_inbox_hdr hdr;
742 __be64 mask;
743 u8 mu;
744 u8 rsvd0[2];
745 u8 eqn;
746 u8 rsvd1[24];
747};
748
749struct mlx5_map_eq_mbox_out {
750 struct mlx5_outbox_hdr hdr;
751 u8 rsvd[8];
752};
753
754struct mlx5_query_eq_mbox_in {
755 struct mlx5_inbox_hdr hdr;
756 u8 rsvd0[3];
757 u8 eqn;
758 u8 rsvd1[4];
759};
760
761struct mlx5_query_eq_mbox_out {
762 struct mlx5_outbox_hdr hdr;
763 u8 rsvd[8];
764 struct mlx5_eq_context ctx;
765};
766
767struct mlx5_mkey_seg {
768 /* This is a two bit field occupying bits 31-30.
769 * bit 31 is always 0,
770 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
771 */
772 u8 status;
773 u8 pcie_control;
774 u8 flags;
775 u8 version;
776 __be32 qpn_mkey7_0;
777 u8 rsvd1[4];
778 __be32 flags_pd;
779 __be64 start_addr;
780 __be64 len;
781 __be32 bsfs_octo_size;
782 u8 rsvd2[16];
783 __be32 xlt_oct_size;
784 u8 rsvd3[3];
785 u8 log2_page_size;
786 u8 rsvd4[4];
787};
788
789struct mlx5_query_special_ctxs_mbox_in {
790 struct mlx5_inbox_hdr hdr;
791 u8 rsvd[8];
792};
793
794struct mlx5_query_special_ctxs_mbox_out {
795 struct mlx5_outbox_hdr hdr;
796 __be32 dump_fill_mkey;
797 __be32 reserved_lkey;
798};
799
800struct mlx5_create_mkey_mbox_in {
801 struct mlx5_inbox_hdr hdr;
802 __be32 input_mkey_index;
803 u8 rsvd0[4];
804 struct mlx5_mkey_seg seg;
805 u8 rsvd1[16];
806 __be32 xlat_oct_act_size;
807 __be32 bsf_coto_act_size;
808 u8 rsvd2[168];
809 __be64 pas[0];
810};
811
812struct mlx5_create_mkey_mbox_out {
813 struct mlx5_outbox_hdr hdr;
814 __be32 mkey;
815 u8 rsvd[4];
816};
817
818struct mlx5_destroy_mkey_mbox_in {
819 struct mlx5_inbox_hdr hdr;
820 __be32 mkey;
821 u8 rsvd[4];
822};
823
824struct mlx5_destroy_mkey_mbox_out {
825 struct mlx5_outbox_hdr hdr;
826 u8 rsvd[8];
827};
828
829struct mlx5_query_mkey_mbox_in {
830 struct mlx5_inbox_hdr hdr;
831 __be32 mkey;
832};
833
834struct mlx5_query_mkey_mbox_out {
835 struct mlx5_outbox_hdr hdr;
836 __be64 pas[0];
837};
838
839struct mlx5_modify_mkey_mbox_in {
840 struct mlx5_inbox_hdr hdr;
841 __be32 mkey;
842 __be64 pas[0];
843};
844
845struct mlx5_modify_mkey_mbox_out {
846 struct mlx5_outbox_hdr hdr;
847};
848
849struct mlx5_dump_mkey_mbox_in {
850 struct mlx5_inbox_hdr hdr;
851};
852
853struct mlx5_dump_mkey_mbox_out {
854 struct mlx5_outbox_hdr hdr;
855 __be32 mkey;
856};
857
858struct mlx5_mad_ifc_mbox_in {
859 struct mlx5_inbox_hdr hdr;
860 __be16 remote_lid;
861 u8 rsvd0;
862 u8 port;
863 u8 rsvd1[4];
864 u8 data[256];
865};
866
867struct mlx5_mad_ifc_mbox_out {
868 struct mlx5_outbox_hdr hdr;
869 u8 rsvd[8];
870 u8 data[256];
871};
872
873struct mlx5_access_reg_mbox_in {
874 struct mlx5_inbox_hdr hdr;
875 u8 rsvd0[2];
876 __be16 register_id;
877 __be32 arg;
878 __be32 data[0];
879};
880
881struct mlx5_access_reg_mbox_out {
882 struct mlx5_outbox_hdr hdr;
883 u8 rsvd[8];
884 __be32 data[0];
885};
886
887#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
888
889enum {
890 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
891};
892
893#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
new file mode 100644
index 000000000000..163a818411e7
--- /dev/null
+++ b/include/linux/mlx5/doorbell.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DOORBELL_H
34#define MLX5_DOORBELL_H
35
36#define MLX5_BF_OFFSET 0x800
37#define MLX5_CQ_DOORBELL 0x20
38
39#if BITS_PER_LONG == 64
40/* Assume that we can just write a 64-bit doorbell atomically. s390
41 * actually doesn't have writeq() but S/390 systems don't even have
42 * PCI so we won't worry about it.
43 */
44
45#define MLX5_DECLARE_DOORBELL_LOCK(name)
46#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
47#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
48
49static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
50 spinlock_t *doorbell_lock)
51{
52 __raw_writeq(*(u64 *)val, dest);
53}
54
55#else
56
57/* Just fall back to a spinlock to protect the doorbell if
58 * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
59 * MMIO writes.
60 */
61
62#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
63#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
64#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
65
66static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
67 spinlock_t *doorbell_lock)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(doorbell_lock, flags);
72 __raw_writel((__force u32) val[0], dest);
73 __raw_writel((__force u32) val[1], dest + 4);
74 spin_unlock_irqrestore(doorbell_lock, flags);
75}
76
77#endif
78
79#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
new file mode 100644
index 000000000000..e47f1e4c9b03
--- /dev/null
+++ b/include/linux/mlx5/driver.h
@@ -0,0 +1,769 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h>
41#include <linux/vmalloc.h>
42#include <linux/radix-tree.h>
43#include <linux/mlx5/device.h>
44#include <linux/mlx5/doorbell.h>
45
46enum {
47 MLX5_BOARD_ID_LEN = 64,
48 MLX5_MAX_NAME_LEN = 16,
49};
50
51enum {
52 /* one minute for the sake of bringup. Generally, commands must always
53 * complete and we may need to increase this timeout value
54 */
55 MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
56 MLX5_CMD_WQ_MAX_NAME = 32,
57};
58
59enum {
60 CMD_OWNER_SW = 0x0,
61 CMD_OWNER_HW = 0x1,
62 CMD_STATUS_SUCCESS = 0,
63};
64
65enum mlx5_sqp_t {
66 MLX5_SQP_SMI = 0,
67 MLX5_SQP_GSI = 1,
68 MLX5_SQP_IEEE_1588 = 2,
69 MLX5_SQP_SNIFFER = 3,
70 MLX5_SQP_SYNC_UMR = 4,
71};
72
73enum {
74 MLX5_MAX_PORTS = 2,
75};
76
77enum {
78 MLX5_EQ_VEC_PAGES = 0,
79 MLX5_EQ_VEC_CMD = 1,
80 MLX5_EQ_VEC_ASYNC = 2,
81 MLX5_EQ_VEC_COMP_BASE,
82};
83
84enum {
85 MLX5_MAX_EQ_NAME = 20
86};
87
88enum {
89 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
90 MLX5_ATOMIC_MODE_CX = 2 << 16,
91 MLX5_ATOMIC_MODE_8B = 3 << 16,
92 MLX5_ATOMIC_MODE_16B = 4 << 16,
93 MLX5_ATOMIC_MODE_32B = 5 << 16,
94 MLX5_ATOMIC_MODE_64B = 6 << 16,
95 MLX5_ATOMIC_MODE_128B = 7 << 16,
96 MLX5_ATOMIC_MODE_256B = 8 << 16,
97};
98
99enum {
100 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
102 MLX5_CMD_OP_INIT_HCA = 0x102,
103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
104 MLX5_CMD_OP_QUERY_PAGES = 0x107,
105 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
106 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
107
108 MLX5_CMD_OP_CREATE_MKEY = 0x200,
109 MLX5_CMD_OP_QUERY_MKEY = 0x201,
110 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
111 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
112
113 MLX5_CMD_OP_CREATE_EQ = 0x301,
114 MLX5_CMD_OP_DESTROY_EQ = 0x302,
115 MLX5_CMD_OP_QUERY_EQ = 0x303,
116
117 MLX5_CMD_OP_CREATE_CQ = 0x400,
118 MLX5_CMD_OP_DESTROY_CQ = 0x401,
119 MLX5_CMD_OP_QUERY_CQ = 0x402,
120 MLX5_CMD_OP_MODIFY_CQ = 0x403,
121
122 MLX5_CMD_OP_CREATE_QP = 0x500,
123 MLX5_CMD_OP_DESTROY_QP = 0x501,
124 MLX5_CMD_OP_RST2INIT_QP = 0x502,
125 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
126 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
127 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
128 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
129 MLX5_CMD_OP_2ERR_QP = 0x507,
130 MLX5_CMD_OP_RTS2SQD_QP = 0x508,
131 MLX5_CMD_OP_SQD2RTS_QP = 0x509,
132 MLX5_CMD_OP_2RST_QP = 0x50a,
133 MLX5_CMD_OP_QUERY_QP = 0x50b,
134 MLX5_CMD_OP_CONF_SQP = 0x50c,
135 MLX5_CMD_OP_MAD_IFC = 0x50d,
136 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
137 MLX5_CMD_OP_SUSPEND_QP = 0x50f,
138 MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
139 MLX5_CMD_OP_SQD2SQD_QP = 0x511,
140 MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
141 MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
142 MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
143
144 MLX5_CMD_OP_CREATE_PSV = 0x600,
145 MLX5_CMD_OP_DESTROY_PSV = 0x601,
146 MLX5_CMD_OP_QUERY_PSV = 0x602,
147 MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
148 MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
149
150 MLX5_CMD_OP_CREATE_SRQ = 0x700,
151 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
152 MLX5_CMD_OP_QUERY_SRQ = 0x702,
153 MLX5_CMD_OP_ARM_RQ = 0x703,
154 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
155
156 MLX5_CMD_OP_ALLOC_PD = 0x800,
157 MLX5_CMD_OP_DEALLOC_PD = 0x801,
158 MLX5_CMD_OP_ALLOC_UAR = 0x802,
159 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
160
161 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
162 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
163
164
165 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
166 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
167
168 MLX5_CMD_OP_ACCESS_REG = 0x805,
169 MLX5_CMD_OP_MAX = 0x810,
170};
171
172enum {
173 MLX5_REG_PCAP = 0x5001,
174 MLX5_REG_PMTU = 0x5003,
175 MLX5_REG_PTYS = 0x5004,
176 MLX5_REG_PAOS = 0x5006,
177 MLX5_REG_PMAOS = 0x5012,
178 MLX5_REG_PUDE = 0x5009,
179 MLX5_REG_PMPE = 0x5010,
180 MLX5_REG_PELC = 0x500e,
181 MLX5_REG_PMLP = 0, /* TBD */
182 MLX5_REG_NODE_DESC = 0x6001,
183 MLX5_REG_HOST_ENDIANNESS = 0x7004,
184};
185
186enum dbg_rsc_type {
187 MLX5_DBG_RSC_QP,
188 MLX5_DBG_RSC_EQ,
189 MLX5_DBG_RSC_CQ,
190};
191
192struct mlx5_field_desc {
193 struct dentry *dent;
194 int i;
195};
196
197struct mlx5_rsc_debug {
198 struct mlx5_core_dev *dev;
199 void *object;
200 enum dbg_rsc_type type;
201 struct dentry *root;
202 struct mlx5_field_desc fields[0];
203};
204
205enum mlx5_dev_event {
206 MLX5_DEV_EVENT_SYS_ERROR,
207 MLX5_DEV_EVENT_PORT_UP,
208 MLX5_DEV_EVENT_PORT_DOWN,
209 MLX5_DEV_EVENT_PORT_INITIALIZED,
210 MLX5_DEV_EVENT_LID_CHANGE,
211 MLX5_DEV_EVENT_PKEY_CHANGE,
212 MLX5_DEV_EVENT_GUID_CHANGE,
213 MLX5_DEV_EVENT_CLIENT_REREG,
214};
215
216struct mlx5_uuar_info {
217 struct mlx5_uar *uars;
218 int num_uars;
219 int num_low_latency_uuars;
220 unsigned long *bitmap;
221 unsigned int *count;
222 struct mlx5_bf *bfs;
223
224 /*
225 * protect uuar allocation data structs
226 */
227 struct mutex lock;
228};
229
230struct mlx5_bf {
231 void __iomem *reg;
232 void __iomem *regreg;
233 int buf_size;
234 struct mlx5_uar *uar;
235 unsigned long offset;
236 int need_lock;
237 /* protect blue flame buffer selection when needed
238 */
239 spinlock_t lock;
240
241 /* serialize 64 bit writes when done as two 32 bit accesses
242 */
243 spinlock_t lock32;
244 int uuarn;
245};
246
247struct mlx5_cmd_first {
248 __be32 data[4];
249};
250
251struct mlx5_cmd_msg {
252 struct list_head list;
253 struct cache_ent *cache;
254 u32 len;
255 struct mlx5_cmd_first first;
256 struct mlx5_cmd_mailbox *next;
257};
258
259struct mlx5_cmd_debug {
260 struct dentry *dbg_root;
261 struct dentry *dbg_in;
262 struct dentry *dbg_out;
263 struct dentry *dbg_outlen;
264 struct dentry *dbg_status;
265 struct dentry *dbg_run;
266 void *in_msg;
267 void *out_msg;
268 u8 status;
269 u16 inlen;
270 u16 outlen;
271};
272
273struct cache_ent {
274 /* protect block chain allocations
275 */
276 spinlock_t lock;
277 struct list_head head;
278};
279
280struct cmd_msg_cache {
281 struct cache_ent large;
282 struct cache_ent med;
283
284};
285
286struct mlx5_cmd_stats {
287 u64 sum;
288 u64 n;
289 struct dentry *root;
290 struct dentry *avg;
291 struct dentry *count;
292 /* protect command average calculations */
293 spinlock_t lock;
294};
295
296struct mlx5_cmd {
297 void *cmd_buf;
298 dma_addr_t dma;
299 u16 cmdif_rev;
300 u8 log_sz;
301 u8 log_stride;
302 int max_reg_cmds;
303 int events;
304 u32 __iomem *vector;
305
306 /* protect command queue allocations
307 */
308 spinlock_t alloc_lock;
309
310 /* protect token allocations
311 */
312 spinlock_t token_lock;
313 u8 token;
314 unsigned long bitmask;
315 char wq_name[MLX5_CMD_WQ_MAX_NAME];
316 struct workqueue_struct *wq;
317 struct semaphore sem;
318 struct semaphore pages_sem;
319 int mode;
320 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
321 struct pci_pool *pool;
322 struct mlx5_cmd_debug dbg;
323 struct cmd_msg_cache cache;
324 int checksum_disabled;
325 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
326};
327
328struct mlx5_port_caps {
329 int gid_table_len;
330 int pkey_table_len;
331};
332
333struct mlx5_caps {
334 u8 log_max_eq;
335 u8 log_max_cq;
336 u8 log_max_qp;
337 u8 log_max_mkey;
338 u8 log_max_pd;
339 u8 log_max_srq;
340 u32 max_cqes;
341 int max_wqes;
342 int max_sq_desc_sz;
343 int max_rq_desc_sz;
344 u64 flags;
345 u16 stat_rate_support;
346 int log_max_msg;
347 int num_ports;
348 int max_ra_res_qp;
349 int max_ra_req_qp;
350 int max_srq_wqes;
351 int bf_reg_size;
352 int bf_regs_per_page;
353 struct mlx5_port_caps port[MLX5_MAX_PORTS];
354 u8 ext_port_cap[MLX5_MAX_PORTS];
355 int max_vf;
356 u32 reserved_lkey;
357 u8 local_ca_ack_delay;
358 u8 log_max_mcg;
359 u16 max_qp_mcg;
360 int min_page_sz;
361};
362
363struct mlx5_cmd_mailbox {
364 void *buf;
365 dma_addr_t dma;
366 struct mlx5_cmd_mailbox *next;
367};
368
369struct mlx5_buf_list {
370 void *buf;
371 dma_addr_t map;
372};
373
374struct mlx5_buf {
375 struct mlx5_buf_list direct;
376 struct mlx5_buf_list *page_list;
377 int nbufs;
378 int npages;
379 int page_shift;
380 int size;
381};
382
383struct mlx5_eq {
384 struct mlx5_core_dev *dev;
385 __be32 __iomem *doorbell;
386 u32 cons_index;
387 struct mlx5_buf buf;
388 int size;
389 u8 irqn;
390 u8 eqn;
391 int nent;
392 u64 mask;
393 char name[MLX5_MAX_EQ_NAME];
394 struct list_head list;
395 int index;
396 struct mlx5_rsc_debug *dbg;
397};
398
399
400struct mlx5_core_mr {
401 u64 iova;
402 u64 size;
403 u32 key;
404 u32 pd;
405 u32 access;
406};
407
408struct mlx5_core_srq {
409 u32 srqn;
410 int max;
411 int max_gs;
412 int max_avail_gather;
413 int wqe_shift;
414 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
415
416 atomic_t refcount;
417 struct completion free;
418};
419
420struct mlx5_eq_table {
421 void __iomem *update_ci;
422 void __iomem *update_arm_ci;
423 struct list_head *comp_eq_head;
424 struct mlx5_eq pages_eq;
425 struct mlx5_eq async_eq;
426 struct mlx5_eq cmd_eq;
427 struct msix_entry *msix_arr;
428 int num_comp_vectors;
429 /* protect EQs list
430 */
431 spinlock_t lock;
432};
433
434struct mlx5_uar {
435 u32 index;
436 struct list_head bf_list;
437 unsigned free_bf_bmap;
438 void __iomem *wc_map;
439 void __iomem *map;
440};
441
442
443struct mlx5_core_health {
444 struct health_buffer __iomem *health;
445 __be32 __iomem *health_counter;
446 struct timer_list timer;
447 struct list_head list;
448 u32 prev;
449 int miss_counter;
450};
451
452struct mlx5_cq_table {
453 /* protect radix tree
454 */
455 spinlock_t lock;
456 struct radix_tree_root tree;
457};
458
459struct mlx5_qp_table {
460 /* protect radix tree
461 */
462 spinlock_t lock;
463 struct radix_tree_root tree;
464};
465
466struct mlx5_srq_table {
467 /* protect radix tree
468 */
469 spinlock_t lock;
470 struct radix_tree_root tree;
471};
472
473struct mlx5_priv {
474 char name[MLX5_MAX_NAME_LEN];
475 struct mlx5_eq_table eq_table;
476 struct mlx5_uuar_info uuari;
477 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
478
479 /* pages stuff */
480 struct workqueue_struct *pg_wq;
481 struct rb_root page_root;
482 int fw_pages;
483 int reg_pages;
484
485 struct mlx5_core_health health;
486
487 struct mlx5_srq_table srq_table;
488
489 /* start: qp staff */
490 struct mlx5_qp_table qp_table;
491 struct dentry *qp_debugfs;
492 struct dentry *eq_debugfs;
493 struct dentry *cq_debugfs;
494 struct dentry *cmdif_debugfs;
495 /* end: qp staff */
496
497 /* start: cq staff */
498 struct mlx5_cq_table cq_table;
499 /* end: cq staff */
500
501 /* start: alloc staff */
502 struct mutex pgdir_mutex;
503 struct list_head pgdir_list;
504 /* end: alloc staff */
505 struct dentry *dbg_root;
506
507 /* protect mkey key part */
508 spinlock_t mkey_lock;
509 u8 mkey_key;
510};
511
512struct mlx5_core_dev {
513 struct pci_dev *pdev;
514 u8 rev_id;
515 char board_id[MLX5_BOARD_ID_LEN];
516 struct mlx5_cmd cmd;
517 struct mlx5_caps caps;
518 phys_addr_t iseg_base;
519 struct mlx5_init_seg __iomem *iseg;
520 void (*event) (struct mlx5_core_dev *dev,
521 enum mlx5_dev_event event,
522 void *data);
523 struct mlx5_priv priv;
524 struct mlx5_profile *profile;
525 atomic_t num_qps;
526};
527
528struct mlx5_db {
529 __be32 *db;
530 union {
531 struct mlx5_db_pgdir *pgdir;
532 struct mlx5_ib_user_db_page *user_page;
533 } u;
534 dma_addr_t dma;
535 int index;
536};
537
538enum {
539 MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
540};
541
542enum {
543 MLX5_COMP_EQ_SIZE = 1024,
544};
545
546struct mlx5_db_pgdir {
547 struct list_head list;
548 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
549 __be32 *db_page;
550 dma_addr_t db_dma;
551};
552
553typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
554
555struct mlx5_cmd_work_ent {
556 struct mlx5_cmd_msg *in;
557 struct mlx5_cmd_msg *out;
558 mlx5_cmd_cbk_t callback;
559 void *context;
560 int idx;
561 struct completion done;
562 struct mlx5_cmd *cmd;
563 struct work_struct work;
564 struct mlx5_cmd_layout *lay;
565 int ret;
566 int page_queue;
567 u8 status;
568 u8 token;
569 struct timespec ts1;
570 struct timespec ts2;
571};
572
573struct mlx5_pas {
574 u64 pa;
575 u8 log_sz;
576};
577
578static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
579{
580 if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
581 return buf->direct.buf + offset;
582 else
583 return buf->page_list[offset >> PAGE_SHIFT].buf +
584 (offset & (PAGE_SIZE - 1));
585}
586
587extern struct workqueue_struct *mlx5_core_wq;
588
589#define STRUCT_FIELD(header, field) \
590 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
591 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
592
593struct ib_field {
594 size_t struct_offset_bytes;
595 size_t struct_size_bytes;
596 int offset_bits;
597 int size_bits;
598};
599
600static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
601{
602 return pci_get_drvdata(pdev);
603}
604
605extern struct dentry *mlx5_debugfs_root;
606
607static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
608{
609 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
610}
611
612static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
613{
614 return ioread32be(&dev->iseg->fw_rev) >> 16;
615}
616
617static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
618{
619 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
620}
621
622static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
623{
624 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
625}
626
627static inline void *mlx5_vzalloc(unsigned long size)
628{
629 void *rtn;
630
631 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
632 if (!rtn)
633 rtn = vzalloc(size);
634 return rtn;
635}
636
637static inline void mlx5_vfree(const void *addr)
638{
639 if (addr && is_vmalloc_addr(addr))
640 vfree(addr);
641 else
642 kfree(addr);
643}
644
645int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
646void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
647int mlx5_cmd_init(struct mlx5_core_dev *dev);
648void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
649void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
650void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
651int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
652int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
653 int out_size);
654int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
655int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
656int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
657int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
658void mlx5_health_cleanup(void);
659void __init mlx5_health_init(void);
660void mlx5_start_health_poll(struct mlx5_core_dev *dev);
661void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
662int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
663 struct mlx5_buf *buf);
664void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
665struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
666 gfp_t flags, int npages);
667void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
668 struct mlx5_cmd_mailbox *head);
669int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
670 struct mlx5_create_srq_mbox_in *in, int inlen);
671int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
672int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
673 struct mlx5_query_srq_mbox_out *out);
674int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
675 u16 lwm, int is_srq);
676int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
677 struct mlx5_create_mkey_mbox_in *in, int inlen);
678int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
679int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
680 struct mlx5_query_mkey_mbox_out *out, int outlen);
681int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 u32 *mkey);
683int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
684int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
685int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
686 u16 opmod, int port);
687void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
688void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
689int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
690void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
691void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
692 s16 npages);
693int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
694int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
695void mlx5_register_debugfs(void);
696void mlx5_unregister_debugfs(void);
697int mlx5_eq_init(struct mlx5_core_dev *dev);
698void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
699void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
700void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
701void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
702void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
703struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
704void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
705void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
706int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
707 int nent, u64 mask, const char *name, struct mlx5_uar *uar);
708int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
709int mlx5_start_eqs(struct mlx5_core_dev *dev);
710int mlx5_stop_eqs(struct mlx5_core_dev *dev);
711int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
712int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
713
714int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
715void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
716int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
717 int size_in, void *data_out, int size_out,
718 u16 reg_num, int arg, int write);
719int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
720
721int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
722void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
723int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
724 struct mlx5_query_eq_mbox_out *out, int outlen);
725int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
726void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
727int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
728void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
729int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
730void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
731
732typedef void (*health_handler_t)(struct pci_dev *pdev, void *buf, int size);
733int mlx5_register_health_report_handler(health_handler_t handler);
734void mlx5_unregister_health_report_handler(void);
735const char *mlx5_command_str(int command);
736int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
737void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
738
739static inline u32 mlx5_mkey_to_idx(u32 mkey)
740{
741 return mkey >> 8;
742}
743
744static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745{
746 return mkey_idx << 8;
747}
748
749enum {
750 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
751 MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
752 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
753};
754
755enum {
756 MAX_MR_CACHE_ENTRIES = 16,
757};
758
759struct mlx5_profile {
760 u64 mask;
761 u32 log_max_qp;
762 int cmdif_csum;
763 struct {
764 int size;
765 int limit;
766 } mr_cache[MAX_MR_CACHE_ENTRIES];
767};
768
769#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
new file mode 100644
index 000000000000..d9e3eacb3a7f
--- /dev/null
+++ b/include/linux/mlx5/qp.h
@@ -0,0 +1,467 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_QP_H
34#define MLX5_QP_H
35
36#include <linux/mlx5/device.h>
37#include <linux/mlx5/driver.h>
38
39#define MLX5_INVALID_LKEY 0x100
40
41enum mlx5_qp_optpar {
42 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
43 MLX5_QP_OPTPAR_RRE = 1 << 1,
44 MLX5_QP_OPTPAR_RAE = 1 << 2,
45 MLX5_QP_OPTPAR_RWE = 1 << 3,
46 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
47 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
48 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
49 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
50 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
51 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
52 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
53 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
54 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
55 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
56 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
57 MLX5_QP_OPTPAR_SRQN = 1 << 18,
58 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
59 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
60 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
61};
62
63enum mlx5_qp_state {
64 MLX5_QP_STATE_RST = 0,
65 MLX5_QP_STATE_INIT = 1,
66 MLX5_QP_STATE_RTR = 2,
67 MLX5_QP_STATE_RTS = 3,
68 MLX5_QP_STATE_SQER = 4,
69 MLX5_QP_STATE_SQD = 5,
70 MLX5_QP_STATE_ERR = 6,
71 MLX5_QP_STATE_SQ_DRAINING = 7,
72 MLX5_QP_STATE_SUSPENDED = 9,
73 MLX5_QP_NUM_STATE
74};
75
76enum {
77 MLX5_QP_ST_RC = 0x0,
78 MLX5_QP_ST_UC = 0x1,
79 MLX5_QP_ST_UD = 0x2,
80 MLX5_QP_ST_XRC = 0x3,
81 MLX5_QP_ST_MLX = 0x4,
82 MLX5_QP_ST_DCI = 0x5,
83 MLX5_QP_ST_DCT = 0x6,
84 MLX5_QP_ST_QP0 = 0x7,
85 MLX5_QP_ST_QP1 = 0x8,
86 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
87 MLX5_QP_ST_RAW_IPV6 = 0xa,
88 MLX5_QP_ST_SNIFFER = 0xb,
89 MLX5_QP_ST_SYNC_UMR = 0xe,
90 MLX5_QP_ST_PTP_1588 = 0xd,
91 MLX5_QP_ST_REG_UMR = 0xc,
92 MLX5_QP_ST_MAX
93};
94
95enum {
96 MLX5_QP_PM_MIGRATED = 0x3,
97 MLX5_QP_PM_ARMED = 0x0,
98 MLX5_QP_PM_REARM = 0x1
99};
100
101enum {
102 MLX5_NON_ZERO_RQ = 0 << 24,
103 MLX5_SRQ_RQ = 1 << 24,
104 MLX5_CRQ_RQ = 2 << 24,
105 MLX5_ZERO_LEN_RQ = 3 << 24
106};
107
108enum {
109 /* params1 */
110 MLX5_QP_BIT_SRE = 1 << 15,
111 MLX5_QP_BIT_SWE = 1 << 14,
112 MLX5_QP_BIT_SAE = 1 << 13,
113 /* params2 */
114 MLX5_QP_BIT_RRE = 1 << 15,
115 MLX5_QP_BIT_RWE = 1 << 14,
116 MLX5_QP_BIT_RAE = 1 << 13,
117 MLX5_QP_BIT_RIC = 1 << 4,
118};
119
120enum {
121 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
122 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
123};
124
125enum {
126 MLX5_SEND_WQE_BB = 64,
127};
128
129enum {
130 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
131 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
132 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
133 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
134 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
135};
136
137enum {
138 MLX5_FENCE_MODE_NONE = 0 << 5,
139 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
140 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
141 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
142};
143
144enum {
145 MLX5_QP_LAT_SENSITIVE = 1 << 28,
146 MLX5_QP_ENABLE_SIG = 1 << 31,
147};
148
149enum {
150 MLX5_RCV_DBR = 0,
151 MLX5_SND_DBR = 1,
152};
153
154struct mlx5_wqe_fmr_seg {
155 __be32 flags;
156 __be32 mem_key;
157 __be64 buf_list;
158 __be64 start_addr;
159 __be64 reg_len;
160 __be32 offset;
161 __be32 page_size;
162 u32 reserved[2];
163};
164
165struct mlx5_wqe_ctrl_seg {
166 __be32 opmod_idx_opcode;
167 __be32 qpn_ds;
168 u8 signature;
169 u8 rsvd[2];
170 u8 fm_ce_se;
171 __be32 imm;
172};
173
174struct mlx5_wqe_xrc_seg {
175 __be32 xrc_srqn;
176 u8 rsvd[12];
177};
178
179struct mlx5_wqe_masked_atomic_seg {
180 __be64 swap_add;
181 __be64 compare;
182 __be64 swap_add_mask;
183 __be64 compare_mask;
184};
185
186struct mlx5_av {
187 union {
188 struct {
189 __be32 qkey;
190 __be32 reserved;
191 } qkey;
192 __be64 dc_key;
193 } key;
194 __be32 dqp_dct;
195 u8 stat_rate_sl;
196 u8 fl_mlid;
197 __be16 rlid;
198 u8 reserved0[10];
199 u8 tclass;
200 u8 hop_limit;
201 __be32 grh_gid_fl;
202 u8 rgid[16];
203};
204
205struct mlx5_wqe_datagram_seg {
206 struct mlx5_av av;
207};
208
209struct mlx5_wqe_raddr_seg {
210 __be64 raddr;
211 __be32 rkey;
212 u32 reserved;
213};
214
215struct mlx5_wqe_atomic_seg {
216 __be64 swap_add;
217 __be64 compare;
218};
219
220struct mlx5_wqe_data_seg {
221 __be32 byte_count;
222 __be32 lkey;
223 __be64 addr;
224};
225
226struct mlx5_wqe_umr_ctrl_seg {
227 u8 flags;
228 u8 rsvd0[3];
229 __be16 klm_octowords;
230 __be16 bsf_octowords;
231 __be64 mkey_mask;
232 u8 rsvd1[32];
233};
234
235struct mlx5_seg_set_psv {
236 __be32 psv_num;
237 __be16 syndrome;
238 __be16 status;
239 __be32 transient_sig;
240 __be32 ref_tag;
241};
242
243struct mlx5_seg_get_psv {
244 u8 rsvd[19];
245 u8 num_psv;
246 __be32 l_key;
247 __be64 va;
248 __be32 psv_index[4];
249};
250
251struct mlx5_seg_check_psv {
252 u8 rsvd0[2];
253 __be16 err_coalescing_op;
254 u8 rsvd1[2];
255 __be16 xport_err_op;
256 u8 rsvd2[2];
257 __be16 xport_err_mask;
258 u8 rsvd3[7];
259 u8 num_psv;
260 __be32 l_key;
261 __be64 va;
262 __be32 psv_index[4];
263};
264
265struct mlx5_rwqe_sig {
266 u8 rsvd0[4];
267 u8 signature;
268 u8 rsvd1[11];
269};
270
271struct mlx5_wqe_signature_seg {
272 u8 rsvd0[4];
273 u8 signature;
274 u8 rsvd1[11];
275};
276
277struct mlx5_wqe_inline_seg {
278 __be32 byte_count;
279};
280
281struct mlx5_core_qp {
282 void (*event) (struct mlx5_core_qp *, int);
283 int qpn;
284 atomic_t refcount;
285 struct completion free;
286 struct mlx5_rsc_debug *dbg;
287 int pid;
288};
289
290struct mlx5_qp_path {
291 u8 fl;
292 u8 rsvd3;
293 u8 free_ar;
294 u8 pkey_index;
295 u8 rsvd0;
296 u8 grh_mlid;
297 __be16 rlid;
298 u8 ackto_lt;
299 u8 mgid_index;
300 u8 static_rate;
301 u8 hop_limit;
302 __be32 tclass_flowlabel;
303 u8 rgid[16];
304 u8 rsvd1[4];
305 u8 sl;
306 u8 port;
307 u8 rsvd2[6];
308};
309
310struct mlx5_qp_context {
311 __be32 flags;
312 __be32 flags_pd;
313 u8 mtu_msgmax;
314 u8 rq_size_stride;
315 __be16 sq_crq_size;
316 __be32 qp_counter_set_usr_page;
317 __be32 wire_qpn;
318 __be32 log_pg_sz_remote_qpn;
319 struct mlx5_qp_path pri_path;
320 struct mlx5_qp_path alt_path;
321 __be32 params1;
322 u8 reserved2[4];
323 __be32 next_send_psn;
324 __be32 cqn_send;
325 u8 reserved3[8];
326 __be32 last_acked_psn;
327 __be32 ssn;
328 __be32 params2;
329 __be32 rnr_nextrecvpsn;
330 __be32 xrcd;
331 __be32 cqn_recv;
332 __be64 db_rec_addr;
333 __be32 qkey;
334 __be32 rq_type_srqn;
335 __be32 rmsn;
336 __be16 hw_sq_wqe_counter;
337 __be16 sw_sq_wqe_counter;
338 __be16 hw_rcyclic_byte_counter;
339 __be16 hw_rq_counter;
340 __be16 sw_rcyclic_byte_counter;
341 __be16 sw_rq_counter;
342 u8 rsvd0[5];
343 u8 cgs;
344 u8 cs_req;
345 u8 cs_res;
346 __be64 dc_access_key;
347 u8 rsvd1[24];
348};
349
350struct mlx5_create_qp_mbox_in {
351 struct mlx5_inbox_hdr hdr;
352 __be32 input_qpn;
353 u8 rsvd0[4];
354 __be32 opt_param_mask;
355 u8 rsvd1[4];
356 struct mlx5_qp_context ctx;
357 u8 rsvd3[16];
358 __be64 pas[0];
359};
360
361struct mlx5_create_qp_mbox_out {
362 struct mlx5_outbox_hdr hdr;
363 __be32 qpn;
364 u8 rsvd0[4];
365};
366
367struct mlx5_destroy_qp_mbox_in {
368 struct mlx5_inbox_hdr hdr;
369 __be32 qpn;
370 u8 rsvd0[4];
371};
372
373struct mlx5_destroy_qp_mbox_out {
374 struct mlx5_outbox_hdr hdr;
375 u8 rsvd0[8];
376};
377
378struct mlx5_modify_qp_mbox_in {
379 struct mlx5_inbox_hdr hdr;
380 __be32 qpn;
381 u8 rsvd1[4];
382 __be32 optparam;
383 u8 rsvd0[4];
384 struct mlx5_qp_context ctx;
385};
386
387struct mlx5_modify_qp_mbox_out {
388 struct mlx5_outbox_hdr hdr;
389 u8 rsvd0[8];
390};
391
392struct mlx5_query_qp_mbox_in {
393 struct mlx5_inbox_hdr hdr;
394 __be32 qpn;
395 u8 rsvd[4];
396};
397
398struct mlx5_query_qp_mbox_out {
399 struct mlx5_outbox_hdr hdr;
400 u8 rsvd1[8];
401 __be32 optparam;
402 u8 rsvd0[4];
403 struct mlx5_qp_context ctx;
404 u8 rsvd2[16];
405 __be64 pas[0];
406};
407
408struct mlx5_conf_sqp_mbox_in {
409 struct mlx5_inbox_hdr hdr;
410 __be32 qpn;
411 u8 rsvd[3];
412 u8 type;
413};
414
415struct mlx5_conf_sqp_mbox_out {
416 struct mlx5_outbox_hdr hdr;
417 u8 rsvd[8];
418};
419
420struct mlx5_alloc_xrcd_mbox_in {
421 struct mlx5_inbox_hdr hdr;
422 u8 rsvd[8];
423};
424
425struct mlx5_alloc_xrcd_mbox_out {
426 struct mlx5_outbox_hdr hdr;
427 __be32 xrcdn;
428 u8 rsvd[4];
429};
430
431struct mlx5_dealloc_xrcd_mbox_in {
432 struct mlx5_inbox_hdr hdr;
433 __be32 xrcdn;
434 u8 rsvd[4];
435};
436
437struct mlx5_dealloc_xrcd_mbox_out {
438 struct mlx5_outbox_hdr hdr;
439 u8 rsvd[8];
440};
441
442static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
443{
444 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
445}
446
447int mlx5_core_create_qp(struct mlx5_core_dev *dev,
448 struct mlx5_core_qp *qp,
449 struct mlx5_create_qp_mbox_in *in,
450 int inlen);
451int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
452 enum mlx5_qp_state new_state,
453 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
454 struct mlx5_core_qp *qp);
455int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
456 struct mlx5_core_qp *qp);
457int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
458 struct mlx5_query_qp_mbox_out *out, int outlen);
459
460int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
461int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
462void mlx5_init_qp_table(struct mlx5_core_dev *dev);
463void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
464int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
465void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
466
467#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
new file mode 100644
index 000000000000..e1a363a33663
--- /dev/null
+++ b/include/linux/mlx5/srq.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_SRQ_H
34#define MLX5_SRQ_H
35
36#include <linux/mlx5/driver.h>
37
38void mlx5_init_srq_table(struct mlx5_core_dev *dev);
39void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
40
41#endif /* MLX5_SRQ_H */