aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 15:57:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 15:57:21 -0400
commitc55244137306b626bc64023fd7160985443205a7 (patch)
tree459acfb5c9b41e3e1616fb36aafda68a07ddbf54 /include
parent858655116bfc722837e3aec0909b8e9d08f96996 (diff)
parente04abfa2436e3ab016b23eb1afb2c5578b8dc2cf (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes from Roland Dreier: - AF_IB (native IB addressing) for CMA from Sean Hefty - new mlx5 driver for Mellanox Connect-IB adapters (including post merge request fixes) - SRP fixes from Bart Van Assche (including fix to first merge request) - qib HW driver updates - resurrection of ocrdma HW driver development - uverbs conversion to create fds with O_CLOEXEC set - other small changes and fixes * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (66 commits) mlx5: Return -EFAULT instead of -EPERM IB/qib: Log all SDMA errors unconditionally IB/qib: Fix module-level leak mlx5_core: Adjust hca_cap.uar_page_sz to conform to Connect-IB spec IB/srp: Let srp_abort() return FAST_IO_FAIL if TL offline IB/uverbs: Use get_unused_fd_flags(O_CLOEXEC) instead of get_unused_fd() mlx5_core: Fixes for sparse warnings IB/mlx5: Make profile[] static in main.c mlx5: Fix parameter type of health_handler_t mlx5: Add driver for Mellanox Connect-IB adapters IB/core: Add reserved values to enums for low-level driver use IB/srp: Bump driver version and release date IB/srp: Make HCA completion vector configurable IB/srp: Maintain a single connection per I_T nexus IB/srp: Fail I/O fast if target offline IB/srp: Skip host settle delay IB/srp: Avoid skipping srp_reset_host() after a transport error IB/srp: Fix remove_one crash due to resource exhaustion IB/qib: New transmitter tunning settings for Dell 1.1 backplane IB/core: Fix error return code in add_port() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h165
-rw-r--r--include/linux/mlx5/device.h893
-rw-r--r--include/linux/mlx5/doorbell.h79
-rw-r--r--include/linux/mlx5/driver.h769
-rw-r--r--include/linux/mlx5/qp.h467
-rw-r--r--include/linux/mlx5/srq.h41
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/rdma/ib.h89
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_sa.h7
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--include/rdma/rdma_cm.h13
-rw-r--r--include/uapi/rdma/rdma_user_cm.h73
14 files changed, 2673 insertions, 17 deletions
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
new file mode 100644
index 000000000000..2826a4b6071e
--- /dev/null
+++ b/include/linux/mlx5/cmd.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_CMD_H
34#define MLX5_CMD_H
35
36#include <linux/types.h>
37
38struct manage_pages_layout {
39 u64 ptr;
40 u32 reserved;
41 u16 num_entries;
42 u16 func_id;
43};
44
45
46struct mlx5_cmd_alloc_uar_imm_out {
47 u32 rsvd[3];
48 u32 uarn;
49};
50
51#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
new file mode 100644
index 000000000000..3db67f73d96d
--- /dev/null
+++ b/include/linux/mlx5/cq.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_CORE_CQ_H
34#define MLX5_CORE_CQ_H
35
36#include <rdma/ib_verbs.h>
37#include <linux/mlx5/driver.h>
38
39
40struct mlx5_core_cq {
41 u32 cqn;
42 int cqe_sz;
43 __be32 *set_ci_db;
44 __be32 *arm_db;
45 atomic_t refcount;
46 struct completion free;
47 unsigned vector;
48 int irqn;
49 void (*comp) (struct mlx5_core_cq *);
50 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
51 struct mlx5_uar *uar;
52 u32 cons_index;
53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg;
55 int pid;
56};
57
58
59enum {
60 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
61 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
62 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
63 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
64 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
65 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
66 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
67 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
68 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
69 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
70 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
71 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
72 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
73};
74
75enum {
76 MLX5_CQE_OWNER_MASK = 1,
77 MLX5_CQE_REQ = 0,
78 MLX5_CQE_RESP_WR_IMM = 1,
79 MLX5_CQE_RESP_SEND = 2,
80 MLX5_CQE_RESP_SEND_IMM = 3,
81 MLX5_CQE_RESP_SEND_INV = 4,
82 MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */
83 MLX5_CQE_REQ_ERR = 13,
84 MLX5_CQE_RESP_ERR = 14,
85};
86
87enum {
88 MLX5_CQ_MODIFY_RESEIZE = 0,
89 MLX5_CQ_MODIFY_MODER = 1,
90 MLX5_CQ_MODIFY_MAPPING = 2,
91};
92
93struct mlx5_cq_modify_params {
94 int type;
95 union {
96 struct {
97 u32 page_offset;
98 u8 log_cq_size;
99 } resize;
100
101 struct {
102 } moder;
103
104 struct {
105 } mapping;
106 } params;
107};
108
109enum {
110 CQE_SIZE_64 = 0,
111 CQE_SIZE_128 = 1,
112};
113
114static inline int cqe_sz_to_mlx_sz(u8 size)
115{
116 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
117}
118
119static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
120{
121 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
122}
123
124enum {
125 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
126 MLX5_CQ_DB_REQ_NOT = 0 << 24
127};
128
129static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
130 void __iomem *uar_page,
131 spinlock_t *doorbell_lock)
132{
133 __be32 doorbell[2];
134 u32 sn;
135 u32 ci;
136
137 sn = cq->arm_sn & 3;
138 ci = cq->cons_index & 0xffffff;
139
140 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
141
142 /* Make sure that the doorbell record in host memory is
143 * written before ringing the doorbell via PCI MMIO.
144 */
145 wmb();
146
147 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
148 doorbell[1] = cpu_to_be32(cq->cqn);
149
150 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
151}
152
153int mlx5_init_cq_table(struct mlx5_core_dev *dev);
154void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
155int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
156 struct mlx5_create_cq_mbox_in *in, int inlen);
157int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
158int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
159 struct mlx5_query_cq_mbox_out *out);
160int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
161 int type, struct mlx5_cq_modify_params *params);
162int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
163void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
164
165#endif /* MLX5_CORE_CQ_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
new file mode 100644
index 000000000000..8de8d8f22384
--- /dev/null
+++ b/include/linux/mlx5/device.h
@@ -0,0 +1,893 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DEVICE_H
34#define MLX5_DEVICE_H
35
36#include <linux/types.h>
37#include <rdma/ib_verbs.h>
38
39#if defined(__LITTLE_ENDIAN)
40#define MLX5_SET_HOST_ENDIANNESS 0
41#elif defined(__BIG_ENDIAN)
42#define MLX5_SET_HOST_ENDIANNESS 0x80
43#else
44#error Host endianness not defined
45#endif
46
47enum {
48 MLX5_MAX_COMMANDS = 32,
49 MLX5_CMD_DATA_BLOCK_SIZE = 512,
50 MLX5_PCI_CMD_XPORT = 7,
51};
52
53enum {
54 MLX5_EXTENDED_UD_AV = 0x80000000,
55};
56
57enum {
58 MLX5_CQ_STATE_ARMED = 9,
59 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
60 MLX5_CQ_STATE_FIRED = 0xa,
61};
62
63enum {
64 MLX5_STAT_RATE_OFFSET = 5,
65};
66
67enum {
68 MLX5_INLINE_SEG = 0x80000000,
69};
70
71enum {
72 MLX5_PERM_LOCAL_READ = 1 << 2,
73 MLX5_PERM_LOCAL_WRITE = 1 << 3,
74 MLX5_PERM_REMOTE_READ = 1 << 4,
75 MLX5_PERM_REMOTE_WRITE = 1 << 5,
76 MLX5_PERM_ATOMIC = 1 << 6,
77 MLX5_PERM_UMR_EN = 1 << 7,
78};
79
80enum {
81 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
82 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
83 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
84 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
85 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
86};
87
88enum {
89 MLX5_ACCESS_MODE_PA = 0,
90 MLX5_ACCESS_MODE_MTT = 1,
91 MLX5_ACCESS_MODE_KLM = 2
92};
93
94enum {
95 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
96 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
97 MLX5_MKEY_BSF_EN = 1 << 30,
98 MLX5_MKEY_LEN64 = 1 << 31,
99};
100
101enum {
102 MLX5_EN_RD = (u64)1,
103 MLX5_EN_WR = (u64)2
104};
105
106enum {
107 MLX5_BF_REGS_PER_PAGE = 4,
108 MLX5_MAX_UAR_PAGES = 1 << 8,
109 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE,
110};
111
112enum {
113 MLX5_MKEY_MASK_LEN = 1ull << 0,
114 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
115 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
116 MLX5_MKEY_MASK_PD = 1ull << 7,
117 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
118 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
119 MLX5_MKEY_MASK_KEY = 1ull << 13,
120 MLX5_MKEY_MASK_QPN = 1ull << 14,
121 MLX5_MKEY_MASK_LR = 1ull << 17,
122 MLX5_MKEY_MASK_LW = 1ull << 18,
123 MLX5_MKEY_MASK_RR = 1ull << 19,
124 MLX5_MKEY_MASK_RW = 1ull << 20,
125 MLX5_MKEY_MASK_A = 1ull << 21,
126 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
127 MLX5_MKEY_MASK_FREE = 1ull << 29,
128};
129
130enum mlx5_event {
131 MLX5_EVENT_TYPE_COMP = 0x0,
132
133 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
134 MLX5_EVENT_TYPE_COMM_EST = 0x02,
135 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
136 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
137 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
138
139 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
140 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
141 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
142 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
143 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
144 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
145
146 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
147 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
148 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
149 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
150
151 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
152 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
153
154 MLX5_EVENT_TYPE_CMD = 0x0a,
155 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
156};
157
158enum {
159 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
160 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
161 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
162 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
163 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
164 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
165 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
166};
167
168enum {
169 MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
170 MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
171 MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
172 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
173 MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
174 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
175 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
176 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
177 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
178 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
179 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
180 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
181 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
182 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
183 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
184 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
185};
186
187enum {
188 MLX5_OPCODE_NOP = 0x00,
189 MLX5_OPCODE_SEND_INVAL = 0x01,
190 MLX5_OPCODE_RDMA_WRITE = 0x08,
191 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
192 MLX5_OPCODE_SEND = 0x0a,
193 MLX5_OPCODE_SEND_IMM = 0x0b,
194 MLX5_OPCODE_RDMA_READ = 0x10,
195 MLX5_OPCODE_ATOMIC_CS = 0x11,
196 MLX5_OPCODE_ATOMIC_FA = 0x12,
197 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
198 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
199 MLX5_OPCODE_BIND_MW = 0x18,
200 MLX5_OPCODE_CONFIG_CMD = 0x1f,
201
202 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
203 MLX5_RECV_OPCODE_SEND = 0x01,
204 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
205 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
206
207 MLX5_CQE_OPCODE_ERROR = 0x1e,
208 MLX5_CQE_OPCODE_RESIZE = 0x16,
209
210 MLX5_OPCODE_SET_PSV = 0x20,
211 MLX5_OPCODE_GET_PSV = 0x21,
212 MLX5_OPCODE_CHECK_PSV = 0x22,
213 MLX5_OPCODE_RGET_PSV = 0x26,
214 MLX5_OPCODE_RCHECK_PSV = 0x27,
215
216 MLX5_OPCODE_UMR = 0x25,
217
218};
219
220enum {
221 MLX5_SET_PORT_RESET_QKEY = 0,
222 MLX5_SET_PORT_GUID0 = 16,
223 MLX5_SET_PORT_NODE_GUID = 17,
224 MLX5_SET_PORT_SYS_GUID = 18,
225 MLX5_SET_PORT_GID_TABLE = 19,
226 MLX5_SET_PORT_PKEY_TABLE = 20,
227};
228
229enum {
230 MLX5_MAX_PAGE_SHIFT = 31
231};
232
233struct mlx5_inbox_hdr {
234 __be16 opcode;
235 u8 rsvd[4];
236 __be16 opmod;
237};
238
239struct mlx5_outbox_hdr {
240 u8 status;
241 u8 rsvd[3];
242 __be32 syndrome;
243};
244
245struct mlx5_cmd_query_adapter_mbox_in {
246 struct mlx5_inbox_hdr hdr;
247 u8 rsvd[8];
248};
249
250struct mlx5_cmd_query_adapter_mbox_out {
251 struct mlx5_outbox_hdr hdr;
252 u8 rsvd0[24];
253 u8 intapin;
254 u8 rsvd1[13];
255 __be16 vsd_vendor_id;
256 u8 vsd[208];
257 u8 vsd_psid[16];
258};
259
260struct mlx5_hca_cap {
261 u8 rsvd1[16];
262 u8 log_max_srq_sz;
263 u8 log_max_qp_sz;
264 u8 rsvd2;
265 u8 log_max_qp;
266 u8 log_max_strq_sz;
267 u8 log_max_srqs;
268 u8 rsvd4[2];
269 u8 rsvd5;
270 u8 log_max_cq_sz;
271 u8 rsvd6;
272 u8 log_max_cq;
273 u8 log_max_eq_sz;
274 u8 log_max_mkey;
275 u8 rsvd7;
276 u8 log_max_eq;
277 u8 max_indirection;
278 u8 log_max_mrw_sz;
279 u8 log_max_bsf_list_sz;
280 u8 log_max_klm_list_sz;
281 u8 rsvd_8_0;
282 u8 log_max_ra_req_dc;
283 u8 rsvd_8_1;
284 u8 log_max_ra_res_dc;
285 u8 rsvd9;
286 u8 log_max_ra_req_qp;
287 u8 rsvd10;
288 u8 log_max_ra_res_qp;
289 u8 rsvd11[4];
290 __be16 max_qp_count;
291 __be16 rsvd12;
292 u8 rsvd13;
293 u8 local_ca_ack_delay;
294 u8 rsvd14;
295 u8 num_ports;
296 u8 log_max_msg;
297 u8 rsvd15[3];
298 __be16 stat_rate_support;
299 u8 rsvd16[2];
300 __be64 flags;
301 u8 rsvd17;
302 u8 uar_sz;
303 u8 rsvd18;
304 u8 log_pg_sz;
305 __be16 bf_log_bf_reg_size;
306 u8 rsvd19[4];
307 __be16 max_desc_sz_sq;
308 u8 rsvd20[2];
309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4];
313 __be16 max_qp_mcg;
314 u8 rsvd23;
315 u8 log_max_mcg;
316 u8 rsvd24;
317 u8 log_max_pd;
318 u8 rsvd25;
319 u8 log_max_xrcd;
320 u8 rsvd26[42];
321 __be16 log_uar_page_sz;
322 u8 rsvd27[28];
323 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2];
325 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76];
327};
328
329
330struct mlx5_cmd_query_hca_cap_mbox_in {
331 struct mlx5_inbox_hdr hdr;
332 u8 rsvd[8];
333};
334
335
336struct mlx5_cmd_query_hca_cap_mbox_out {
337 struct mlx5_outbox_hdr hdr;
338 u8 rsvd0[8];
339 struct mlx5_hca_cap hca_cap;
340};
341
342
343struct mlx5_cmd_set_hca_cap_mbox_in {
344 struct mlx5_inbox_hdr hdr;
345 u8 rsvd[8];
346 struct mlx5_hca_cap hca_cap;
347};
348
349
350struct mlx5_cmd_set_hca_cap_mbox_out {
351 struct mlx5_outbox_hdr hdr;
352 u8 rsvd0[8];
353};
354
355
356struct mlx5_cmd_init_hca_mbox_in {
357 struct mlx5_inbox_hdr hdr;
358 u8 rsvd0[2];
359 __be16 profile;
360 u8 rsvd1[4];
361};
362
363struct mlx5_cmd_init_hca_mbox_out {
364 struct mlx5_outbox_hdr hdr;
365 u8 rsvd[8];
366};
367
368struct mlx5_cmd_teardown_hca_mbox_in {
369 struct mlx5_inbox_hdr hdr;
370 u8 rsvd0[2];
371 __be16 profile;
372 u8 rsvd1[4];
373};
374
375struct mlx5_cmd_teardown_hca_mbox_out {
376 struct mlx5_outbox_hdr hdr;
377 u8 rsvd[8];
378};
379
380struct mlx5_cmd_layout {
381 u8 type;
382 u8 rsvd0[3];
383 __be32 inlen;
384 __be64 in_ptr;
385 __be32 in[4];
386 __be32 out[4];
387 __be64 out_ptr;
388 __be32 outlen;
389 u8 token;
390 u8 sig;
391 u8 rsvd1;
392 u8 status_own;
393};
394
395
396struct health_buffer {
397 __be32 assert_var[5];
398 __be32 rsvd0[3];
399 __be32 assert_exit_ptr;
400 __be32 assert_callra;
401 __be32 rsvd1[2];
402 __be32 fw_ver;
403 __be32 hw_id;
404 __be32 rsvd2;
405 u8 irisc_index;
406 u8 synd;
407 __be16 ext_sync;
408};
409
410struct mlx5_init_seg {
411 __be32 fw_rev;
412 __be32 cmdif_rev_fw_sub;
413 __be32 rsvd0[2];
414 __be32 cmdq_addr_h;
415 __be32 cmdq_addr_l_sz;
416 __be32 cmd_dbell;
417 __be32 rsvd1[121];
418 struct health_buffer health;
419 __be32 rsvd2[884];
420 __be32 health_counter;
421 __be32 rsvd3[1023];
422 __be64 ieee1588_clk;
423 __be32 ieee1588_clk_type;
424 __be32 clr_intx;
425};
426
427struct mlx5_eqe_comp {
428 __be32 reserved[6];
429 __be32 cqn;
430};
431
432struct mlx5_eqe_qp_srq {
433 __be32 reserved[6];
434 __be32 qp_srq_n;
435};
436
437struct mlx5_eqe_cq_err {
438 __be32 cqn;
439 u8 reserved1[7];
440 u8 syndrome;
441};
442
443struct mlx5_eqe_dropped_packet {
444};
445
446struct mlx5_eqe_port_state {
447 u8 reserved0[8];
448 u8 port;
449};
450
451struct mlx5_eqe_gpio {
452 __be32 reserved0[2];
453 __be64 gpio_event;
454};
455
456struct mlx5_eqe_congestion {
457 u8 type;
458 u8 rsvd0;
459 u8 congestion_level;
460};
461
462struct mlx5_eqe_stall_vl {
463 u8 rsvd0[3];
464 u8 port_vl;
465};
466
467struct mlx5_eqe_cmd {
468 __be32 vector;
469 __be32 rsvd[6];
470};
471
472struct mlx5_eqe_page_req {
473 u8 rsvd0[2];
474 __be16 func_id;
475 u8 rsvd1[2];
476 __be16 num_pages;
477 __be32 rsvd2[5];
478};
479
480union ev_data {
481 __be32 raw[7];
482 struct mlx5_eqe_cmd cmd;
483 struct mlx5_eqe_comp comp;
484 struct mlx5_eqe_qp_srq qp_srq;
485 struct mlx5_eqe_cq_err cq_err;
486 struct mlx5_eqe_dropped_packet dp;
487 struct mlx5_eqe_port_state port;
488 struct mlx5_eqe_gpio gpio;
489 struct mlx5_eqe_congestion cong;
490 struct mlx5_eqe_stall_vl stall_vl;
491 struct mlx5_eqe_page_req req_pages;
492} __packed;
493
494struct mlx5_eqe {
495 u8 rsvd0;
496 u8 type;
497 u8 rsvd1;
498 u8 sub_type;
499 __be32 rsvd2[7];
500 union ev_data data;
501 __be16 rsvd3;
502 u8 signature;
503 u8 owner;
504} __packed;
505
506struct mlx5_cmd_prot_block {
507 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
508 u8 rsvd0[48];
509 __be64 next;
510 __be32 block_num;
511 u8 rsvd1;
512 u8 token;
513 u8 ctrl_sig;
514 u8 sig;
515};
516
517struct mlx5_err_cqe {
518 u8 rsvd0[32];
519 __be32 srqn;
520 u8 rsvd1[18];
521 u8 vendor_err_synd;
522 u8 syndrome;
523 __be32 s_wqe_opcode_qpn;
524 __be16 wqe_counter;
525 u8 signature;
526 u8 op_own;
527};
528
529struct mlx5_cqe64 {
530 u8 rsvd0[17];
531 u8 ml_path;
532 u8 rsvd20[4];
533 __be16 slid;
534 __be32 flags_rqpn;
535 u8 rsvd28[4];
536 __be32 srqn;
537 __be32 imm_inval_pkey;
538 u8 rsvd40[4];
539 __be32 byte_cnt;
540 __be64 timestamp;
541 __be32 sop_drop_qpn;
542 __be16 wqe_counter;
543 u8 signature;
544 u8 op_own;
545};
546
547struct mlx5_wqe_srq_next_seg {
548 u8 rsvd0[2];
549 __be16 next_wqe_index;
550 u8 signature;
551 u8 rsvd1[11];
552};
553
554union mlx5_ext_cqe {
555 struct ib_grh grh;
556 u8 inl[64];
557};
558
559struct mlx5_cqe128 {
560 union mlx5_ext_cqe inl_grh;
561 struct mlx5_cqe64 cqe64;
562};
563
564struct mlx5_srq_ctx {
565 u8 state_log_sz;
566 u8 rsvd0[3];
567 __be32 flags_xrcd;
568 __be32 pgoff_cqn;
569 u8 rsvd1[4];
570 u8 log_pg_sz;
571 u8 rsvd2[7];
572 __be32 pd;
573 __be16 lwm;
574 __be16 wqe_cnt;
575 u8 rsvd3[8];
576 __be64 db_record;
577};
578
579struct mlx5_create_srq_mbox_in {
580 struct mlx5_inbox_hdr hdr;
581 __be32 input_srqn;
582 u8 rsvd0[4];
583 struct mlx5_srq_ctx ctx;
584 u8 rsvd1[208];
585 __be64 pas[0];
586};
587
588struct mlx5_create_srq_mbox_out {
589 struct mlx5_outbox_hdr hdr;
590 __be32 srqn;
591 u8 rsvd[4];
592};
593
594struct mlx5_destroy_srq_mbox_in {
595 struct mlx5_inbox_hdr hdr;
596 __be32 srqn;
597 u8 rsvd[4];
598};
599
600struct mlx5_destroy_srq_mbox_out {
601 struct mlx5_outbox_hdr hdr;
602 u8 rsvd[8];
603};
604
605struct mlx5_query_srq_mbox_in {
606 struct mlx5_inbox_hdr hdr;
607 __be32 srqn;
608 u8 rsvd0[4];
609};
610
611struct mlx5_query_srq_mbox_out {
612 struct mlx5_outbox_hdr hdr;
613 u8 rsvd0[8];
614 struct mlx5_srq_ctx ctx;
615 u8 rsvd1[32];
616 __be64 pas[0];
617};
618
619struct mlx5_arm_srq_mbox_in {
620 struct mlx5_inbox_hdr hdr;
621 __be32 srqn;
622 __be16 rsvd;
623 __be16 lwm;
624};
625
626struct mlx5_arm_srq_mbox_out {
627 struct mlx5_outbox_hdr hdr;
628 u8 rsvd[8];
629};
630
631struct mlx5_cq_context {
632 u8 status;
633 u8 cqe_sz_flags;
634 u8 st;
635 u8 rsvd3;
636 u8 rsvd4[6];
637 __be16 page_offset;
638 __be32 log_sz_usr_page;
639 __be16 cq_period;
640 __be16 cq_max_count;
641 __be16 rsvd20;
642 __be16 c_eqn;
643 u8 log_pg_sz;
644 u8 rsvd25[7];
645 __be32 last_notified_index;
646 __be32 solicit_producer_index;
647 __be32 consumer_counter;
648 __be32 producer_counter;
649 u8 rsvd48[8];
650 __be64 db_record_addr;
651};
652
653struct mlx5_create_cq_mbox_in {
654 struct mlx5_inbox_hdr hdr;
655 __be32 input_cqn;
656 u8 rsvdx[4];
657 struct mlx5_cq_context ctx;
658 u8 rsvd6[192];
659 __be64 pas[0];
660};
661
662struct mlx5_create_cq_mbox_out {
663 struct mlx5_outbox_hdr hdr;
664 __be32 cqn;
665 u8 rsvd0[4];
666};
667
668struct mlx5_destroy_cq_mbox_in {
669 struct mlx5_inbox_hdr hdr;
670 __be32 cqn;
671 u8 rsvd0[4];
672};
673
674struct mlx5_destroy_cq_mbox_out {
675 struct mlx5_outbox_hdr hdr;
676 u8 rsvd0[8];
677};
678
679struct mlx5_query_cq_mbox_in {
680 struct mlx5_inbox_hdr hdr;
681 __be32 cqn;
682 u8 rsvd0[4];
683};
684
685struct mlx5_query_cq_mbox_out {
686 struct mlx5_outbox_hdr hdr;
687 u8 rsvd0[8];
688 struct mlx5_cq_context ctx;
689 u8 rsvd6[16];
690 __be64 pas[0];
691};
692
693struct mlx5_eq_context {
694 u8 status;
695 u8 ec_oi;
696 u8 st;
697 u8 rsvd2[7];
698 __be16 page_pffset;
699 __be32 log_sz_usr_page;
700 u8 rsvd3[7];
701 u8 intr;
702 u8 log_page_size;
703 u8 rsvd4[15];
704 __be32 consumer_counter;
705 __be32 produser_counter;
706 u8 rsvd5[16];
707};
708
709struct mlx5_create_eq_mbox_in {
710 struct mlx5_inbox_hdr hdr;
711 u8 rsvd0[3];
712 u8 input_eqn;
713 u8 rsvd1[4];
714 struct mlx5_eq_context ctx;
715 u8 rsvd2[8];
716 __be64 events_mask;
717 u8 rsvd3[176];
718 __be64 pas[0];
719};
720
721struct mlx5_create_eq_mbox_out {
722 struct mlx5_outbox_hdr hdr;
723 u8 rsvd0[3];
724 u8 eq_number;
725 u8 rsvd1[4];
726};
727
728struct mlx5_destroy_eq_mbox_in {
729 struct mlx5_inbox_hdr hdr;
730 u8 rsvd0[3];
731 u8 eqn;
732 u8 rsvd1[4];
733};
734
735struct mlx5_destroy_eq_mbox_out {
736 struct mlx5_outbox_hdr hdr;
737 u8 rsvd[8];
738};
739
740struct mlx5_map_eq_mbox_in {
741 struct mlx5_inbox_hdr hdr;
742 __be64 mask;
743 u8 mu;
744 u8 rsvd0[2];
745 u8 eqn;
746 u8 rsvd1[24];
747};
748
749struct mlx5_map_eq_mbox_out {
750 struct mlx5_outbox_hdr hdr;
751 u8 rsvd[8];
752};
753
754struct mlx5_query_eq_mbox_in {
755 struct mlx5_inbox_hdr hdr;
756 u8 rsvd0[3];
757 u8 eqn;
758 u8 rsvd1[4];
759};
760
761struct mlx5_query_eq_mbox_out {
762 struct mlx5_outbox_hdr hdr;
763 u8 rsvd[8];
764 struct mlx5_eq_context ctx;
765};
766
767struct mlx5_mkey_seg {
768 /* This is a two bit field occupying bits 31-30.
769 * bit 31 is always 0,
770 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
771 */
772 u8 status;
773 u8 pcie_control;
774 u8 flags;
775 u8 version;
776 __be32 qpn_mkey7_0;
777 u8 rsvd1[4];
778 __be32 flags_pd;
779 __be64 start_addr;
780 __be64 len;
781 __be32 bsfs_octo_size;
782 u8 rsvd2[16];
783 __be32 xlt_oct_size;
784 u8 rsvd3[3];
785 u8 log2_page_size;
786 u8 rsvd4[4];
787};
788
789struct mlx5_query_special_ctxs_mbox_in {
790 struct mlx5_inbox_hdr hdr;
791 u8 rsvd[8];
792};
793
794struct mlx5_query_special_ctxs_mbox_out {
795 struct mlx5_outbox_hdr hdr;
796 __be32 dump_fill_mkey;
797 __be32 reserved_lkey;
798};
799
800struct mlx5_create_mkey_mbox_in {
801 struct mlx5_inbox_hdr hdr;
802 __be32 input_mkey_index;
803 u8 rsvd0[4];
804 struct mlx5_mkey_seg seg;
805 u8 rsvd1[16];
806 __be32 xlat_oct_act_size;
807 __be32 bsf_coto_act_size;
808 u8 rsvd2[168];
809 __be64 pas[0];
810};
811
812struct mlx5_create_mkey_mbox_out {
813 struct mlx5_outbox_hdr hdr;
814 __be32 mkey;
815 u8 rsvd[4];
816};
817
818struct mlx5_destroy_mkey_mbox_in {
819 struct mlx5_inbox_hdr hdr;
820 __be32 mkey;
821 u8 rsvd[4];
822};
823
824struct mlx5_destroy_mkey_mbox_out {
825 struct mlx5_outbox_hdr hdr;
826 u8 rsvd[8];
827};
828
829struct mlx5_query_mkey_mbox_in {
830 struct mlx5_inbox_hdr hdr;
831 __be32 mkey;
832};
833
834struct mlx5_query_mkey_mbox_out {
835 struct mlx5_outbox_hdr hdr;
836 __be64 pas[0];
837};
838
839struct mlx5_modify_mkey_mbox_in {
840 struct mlx5_inbox_hdr hdr;
841 __be32 mkey;
842 __be64 pas[0];
843};
844
845struct mlx5_modify_mkey_mbox_out {
846 struct mlx5_outbox_hdr hdr;
847};
848
849struct mlx5_dump_mkey_mbox_in {
850 struct mlx5_inbox_hdr hdr;
851};
852
853struct mlx5_dump_mkey_mbox_out {
854 struct mlx5_outbox_hdr hdr;
855 __be32 mkey;
856};
857
858struct mlx5_mad_ifc_mbox_in {
859 struct mlx5_inbox_hdr hdr;
860 __be16 remote_lid;
861 u8 rsvd0;
862 u8 port;
863 u8 rsvd1[4];
864 u8 data[256];
865};
866
867struct mlx5_mad_ifc_mbox_out {
868 struct mlx5_outbox_hdr hdr;
869 u8 rsvd[8];
870 u8 data[256];
871};
872
873struct mlx5_access_reg_mbox_in {
874 struct mlx5_inbox_hdr hdr;
875 u8 rsvd0[2];
876 __be16 register_id;
877 __be32 arg;
878 __be32 data[0];
879};
880
881struct mlx5_access_reg_mbox_out {
882 struct mlx5_outbox_hdr hdr;
883 u8 rsvd[8];
884 __be32 data[0];
885};
886
887#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
888
889enum {
890 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
891};
892
893#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
new file mode 100644
index 000000000000..163a818411e7
--- /dev/null
+++ b/include/linux/mlx5/doorbell.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DOORBELL_H
34#define MLX5_DOORBELL_H
35
36#define MLX5_BF_OFFSET 0x800
37#define MLX5_CQ_DOORBELL 0x20
38
39#if BITS_PER_LONG == 64
40/* Assume that we can just write a 64-bit doorbell atomically. s390
41 * actually doesn't have writeq() but S/390 systems don't even have
42 * PCI so we won't worry about it.
43 */
44
45#define MLX5_DECLARE_DOORBELL_LOCK(name)
46#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
47#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
48
49static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
50 spinlock_t *doorbell_lock)
51{
52 __raw_writeq(*(u64 *)val, dest);
53}
54
55#else
56
57/* Just fall back to a spinlock to protect the doorbell if
58 * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
59 * MMIO writes.
60 */
61
62#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
63#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
64#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
65
66static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
67 spinlock_t *doorbell_lock)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(doorbell_lock, flags);
72 __raw_writel((__force u32) val[0], dest);
73 __raw_writel((__force u32) val[1], dest + 4);
74 spin_unlock_irqrestore(doorbell_lock, flags);
75}
76
77#endif
78
79#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
new file mode 100644
index 000000000000..f22e4419839b
--- /dev/null
+++ b/include/linux/mlx5/driver.h
@@ -0,0 +1,769 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h>
41#include <linux/vmalloc.h>
42#include <linux/radix-tree.h>
43#include <linux/mlx5/device.h>
44#include <linux/mlx5/doorbell.h>
45
46enum {
47 MLX5_BOARD_ID_LEN = 64,
48 MLX5_MAX_NAME_LEN = 16,
49};
50
51enum {
52 /* one minute for the sake of bringup. Generally, commands must always
53 * complete and we may need to increase this timeout value
54 */
55 MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
56 MLX5_CMD_WQ_MAX_NAME = 32,
57};
58
59enum {
60 CMD_OWNER_SW = 0x0,
61 CMD_OWNER_HW = 0x1,
62 CMD_STATUS_SUCCESS = 0,
63};
64
65enum mlx5_sqp_t {
66 MLX5_SQP_SMI = 0,
67 MLX5_SQP_GSI = 1,
68 MLX5_SQP_IEEE_1588 = 2,
69 MLX5_SQP_SNIFFER = 3,
70 MLX5_SQP_SYNC_UMR = 4,
71};
72
73enum {
74 MLX5_MAX_PORTS = 2,
75};
76
77enum {
78 MLX5_EQ_VEC_PAGES = 0,
79 MLX5_EQ_VEC_CMD = 1,
80 MLX5_EQ_VEC_ASYNC = 2,
81 MLX5_EQ_VEC_COMP_BASE,
82};
83
84enum {
85 MLX5_MAX_EQ_NAME = 20
86};
87
88enum {
89 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
90 MLX5_ATOMIC_MODE_CX = 2 << 16,
91 MLX5_ATOMIC_MODE_8B = 3 << 16,
92 MLX5_ATOMIC_MODE_16B = 4 << 16,
93 MLX5_ATOMIC_MODE_32B = 5 << 16,
94 MLX5_ATOMIC_MODE_64B = 6 << 16,
95 MLX5_ATOMIC_MODE_128B = 7 << 16,
96 MLX5_ATOMIC_MODE_256B = 8 << 16,
97};
98
99enum {
100 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
102 MLX5_CMD_OP_INIT_HCA = 0x102,
103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
104 MLX5_CMD_OP_QUERY_PAGES = 0x107,
105 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
106 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
107
108 MLX5_CMD_OP_CREATE_MKEY = 0x200,
109 MLX5_CMD_OP_QUERY_MKEY = 0x201,
110 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
111 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
112
113 MLX5_CMD_OP_CREATE_EQ = 0x301,
114 MLX5_CMD_OP_DESTROY_EQ = 0x302,
115 MLX5_CMD_OP_QUERY_EQ = 0x303,
116
117 MLX5_CMD_OP_CREATE_CQ = 0x400,
118 MLX5_CMD_OP_DESTROY_CQ = 0x401,
119 MLX5_CMD_OP_QUERY_CQ = 0x402,
120 MLX5_CMD_OP_MODIFY_CQ = 0x403,
121
122 MLX5_CMD_OP_CREATE_QP = 0x500,
123 MLX5_CMD_OP_DESTROY_QP = 0x501,
124 MLX5_CMD_OP_RST2INIT_QP = 0x502,
125 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
126 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
127 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
128 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
129 MLX5_CMD_OP_2ERR_QP = 0x507,
130 MLX5_CMD_OP_RTS2SQD_QP = 0x508,
131 MLX5_CMD_OP_SQD2RTS_QP = 0x509,
132 MLX5_CMD_OP_2RST_QP = 0x50a,
133 MLX5_CMD_OP_QUERY_QP = 0x50b,
134 MLX5_CMD_OP_CONF_SQP = 0x50c,
135 MLX5_CMD_OP_MAD_IFC = 0x50d,
136 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
137 MLX5_CMD_OP_SUSPEND_QP = 0x50f,
138 MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
139 MLX5_CMD_OP_SQD2SQD_QP = 0x511,
140 MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
141 MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
142 MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
143
144 MLX5_CMD_OP_CREATE_PSV = 0x600,
145 MLX5_CMD_OP_DESTROY_PSV = 0x601,
146 MLX5_CMD_OP_QUERY_PSV = 0x602,
147 MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
148 MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
149
150 MLX5_CMD_OP_CREATE_SRQ = 0x700,
151 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
152 MLX5_CMD_OP_QUERY_SRQ = 0x702,
153 MLX5_CMD_OP_ARM_RQ = 0x703,
154 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
155
156 MLX5_CMD_OP_ALLOC_PD = 0x800,
157 MLX5_CMD_OP_DEALLOC_PD = 0x801,
158 MLX5_CMD_OP_ALLOC_UAR = 0x802,
159 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
160
161 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
162 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
163
164
165 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
166 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
167
168 MLX5_CMD_OP_ACCESS_REG = 0x805,
169 MLX5_CMD_OP_MAX = 0x810,
170};
171
172enum {
173 MLX5_REG_PCAP = 0x5001,
174 MLX5_REG_PMTU = 0x5003,
175 MLX5_REG_PTYS = 0x5004,
176 MLX5_REG_PAOS = 0x5006,
177 MLX5_REG_PMAOS = 0x5012,
178 MLX5_REG_PUDE = 0x5009,
179 MLX5_REG_PMPE = 0x5010,
180 MLX5_REG_PELC = 0x500e,
181 MLX5_REG_PMLP = 0, /* TBD */
182 MLX5_REG_NODE_DESC = 0x6001,
183 MLX5_REG_HOST_ENDIANNESS = 0x7004,
184};
185
186enum dbg_rsc_type {
187 MLX5_DBG_RSC_QP,
188 MLX5_DBG_RSC_EQ,
189 MLX5_DBG_RSC_CQ,
190};
191
192struct mlx5_field_desc {
193 struct dentry *dent;
194 int i;
195};
196
197struct mlx5_rsc_debug {
198 struct mlx5_core_dev *dev;
199 void *object;
200 enum dbg_rsc_type type;
201 struct dentry *root;
202 struct mlx5_field_desc fields[0];
203};
204
205enum mlx5_dev_event {
206 MLX5_DEV_EVENT_SYS_ERROR,
207 MLX5_DEV_EVENT_PORT_UP,
208 MLX5_DEV_EVENT_PORT_DOWN,
209 MLX5_DEV_EVENT_PORT_INITIALIZED,
210 MLX5_DEV_EVENT_LID_CHANGE,
211 MLX5_DEV_EVENT_PKEY_CHANGE,
212 MLX5_DEV_EVENT_GUID_CHANGE,
213 MLX5_DEV_EVENT_CLIENT_REREG,
214};
215
216struct mlx5_uuar_info {
217 struct mlx5_uar *uars;
218 int num_uars;
219 int num_low_latency_uuars;
220 unsigned long *bitmap;
221 unsigned int *count;
222 struct mlx5_bf *bfs;
223
224 /*
225 * protect uuar allocation data structs
226 */
227 struct mutex lock;
228};
229
230struct mlx5_bf {
231 void __iomem *reg;
232 void __iomem *regreg;
233 int buf_size;
234 struct mlx5_uar *uar;
235 unsigned long offset;
236 int need_lock;
237 /* protect blue flame buffer selection when needed
238 */
239 spinlock_t lock;
240
241 /* serialize 64 bit writes when done as two 32 bit accesses
242 */
243 spinlock_t lock32;
244 int uuarn;
245};
246
247struct mlx5_cmd_first {
248 __be32 data[4];
249};
250
251struct mlx5_cmd_msg {
252 struct list_head list;
253 struct cache_ent *cache;
254 u32 len;
255 struct mlx5_cmd_first first;
256 struct mlx5_cmd_mailbox *next;
257};
258
259struct mlx5_cmd_debug {
260 struct dentry *dbg_root;
261 struct dentry *dbg_in;
262 struct dentry *dbg_out;
263 struct dentry *dbg_outlen;
264 struct dentry *dbg_status;
265 struct dentry *dbg_run;
266 void *in_msg;
267 void *out_msg;
268 u8 status;
269 u16 inlen;
270 u16 outlen;
271};
272
273struct cache_ent {
274 /* protect block chain allocations
275 */
276 spinlock_t lock;
277 struct list_head head;
278};
279
280struct cmd_msg_cache {
281 struct cache_ent large;
282 struct cache_ent med;
283
284};
285
286struct mlx5_cmd_stats {
287 u64 sum;
288 u64 n;
289 struct dentry *root;
290 struct dentry *avg;
291 struct dentry *count;
292 /* protect command average calculations */
293 spinlock_t lock;
294};
295
296struct mlx5_cmd {
297 void *cmd_buf;
298 dma_addr_t dma;
299 u16 cmdif_rev;
300 u8 log_sz;
301 u8 log_stride;
302 int max_reg_cmds;
303 int events;
304 u32 __iomem *vector;
305
306 /* protect command queue allocations
307 */
308 spinlock_t alloc_lock;
309
310 /* protect token allocations
311 */
312 spinlock_t token_lock;
313 u8 token;
314 unsigned long bitmask;
315 char wq_name[MLX5_CMD_WQ_MAX_NAME];
316 struct workqueue_struct *wq;
317 struct semaphore sem;
318 struct semaphore pages_sem;
319 int mode;
320 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
321 struct pci_pool *pool;
322 struct mlx5_cmd_debug dbg;
323 struct cmd_msg_cache cache;
324 int checksum_disabled;
325 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
326};
327
328struct mlx5_port_caps {
329 int gid_table_len;
330 int pkey_table_len;
331};
332
333struct mlx5_caps {
334 u8 log_max_eq;
335 u8 log_max_cq;
336 u8 log_max_qp;
337 u8 log_max_mkey;
338 u8 log_max_pd;
339 u8 log_max_srq;
340 u32 max_cqes;
341 int max_wqes;
342 int max_sq_desc_sz;
343 int max_rq_desc_sz;
344 u64 flags;
345 u16 stat_rate_support;
346 int log_max_msg;
347 int num_ports;
348 int max_ra_res_qp;
349 int max_ra_req_qp;
350 int max_srq_wqes;
351 int bf_reg_size;
352 int bf_regs_per_page;
353 struct mlx5_port_caps port[MLX5_MAX_PORTS];
354 u8 ext_port_cap[MLX5_MAX_PORTS];
355 int max_vf;
356 u32 reserved_lkey;
357 u8 local_ca_ack_delay;
358 u8 log_max_mcg;
359 u16 max_qp_mcg;
360 int min_page_sz;
361};
362
363struct mlx5_cmd_mailbox {
364 void *buf;
365 dma_addr_t dma;
366 struct mlx5_cmd_mailbox *next;
367};
368
369struct mlx5_buf_list {
370 void *buf;
371 dma_addr_t map;
372};
373
374struct mlx5_buf {
375 struct mlx5_buf_list direct;
376 struct mlx5_buf_list *page_list;
377 int nbufs;
378 int npages;
379 int page_shift;
380 int size;
381};
382
383struct mlx5_eq {
384 struct mlx5_core_dev *dev;
385 __be32 __iomem *doorbell;
386 u32 cons_index;
387 struct mlx5_buf buf;
388 int size;
389 u8 irqn;
390 u8 eqn;
391 int nent;
392 u64 mask;
393 char name[MLX5_MAX_EQ_NAME];
394 struct list_head list;
395 int index;
396 struct mlx5_rsc_debug *dbg;
397};
398
399
400struct mlx5_core_mr {
401 u64 iova;
402 u64 size;
403 u32 key;
404 u32 pd;
405 u32 access;
406};
407
408struct mlx5_core_srq {
409 u32 srqn;
410 int max;
411 int max_gs;
412 int max_avail_gather;
413 int wqe_shift;
414 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
415
416 atomic_t refcount;
417 struct completion free;
418};
419
420struct mlx5_eq_table {
421 void __iomem *update_ci;
422 void __iomem *update_arm_ci;
423 struct list_head *comp_eq_head;
424 struct mlx5_eq pages_eq;
425 struct mlx5_eq async_eq;
426 struct mlx5_eq cmd_eq;
427 struct msix_entry *msix_arr;
428 int num_comp_vectors;
429 /* protect EQs list
430 */
431 spinlock_t lock;
432};
433
434struct mlx5_uar {
435 u32 index;
436 struct list_head bf_list;
437 unsigned free_bf_bmap;
438 void __iomem *wc_map;
439 void __iomem *map;
440};
441
442
443struct mlx5_core_health {
444 struct health_buffer __iomem *health;
445 __be32 __iomem *health_counter;
446 struct timer_list timer;
447 struct list_head list;
448 u32 prev;
449 int miss_counter;
450};
451
452struct mlx5_cq_table {
453 /* protect radix tree
454 */
455 spinlock_t lock;
456 struct radix_tree_root tree;
457};
458
459struct mlx5_qp_table {
460 /* protect radix tree
461 */
462 spinlock_t lock;
463 struct radix_tree_root tree;
464};
465
466struct mlx5_srq_table {
467 /* protect radix tree
468 */
469 spinlock_t lock;
470 struct radix_tree_root tree;
471};
472
473struct mlx5_priv {
474 char name[MLX5_MAX_NAME_LEN];
475 struct mlx5_eq_table eq_table;
476 struct mlx5_uuar_info uuari;
477 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
478
479 /* pages stuff */
480 struct workqueue_struct *pg_wq;
481 struct rb_root page_root;
482 int fw_pages;
483 int reg_pages;
484
485 struct mlx5_core_health health;
486
487 struct mlx5_srq_table srq_table;
488
489 /* start: qp staff */
490 struct mlx5_qp_table qp_table;
491 struct dentry *qp_debugfs;
492 struct dentry *eq_debugfs;
493 struct dentry *cq_debugfs;
494 struct dentry *cmdif_debugfs;
495 /* end: qp staff */
496
497 /* start: cq staff */
498 struct mlx5_cq_table cq_table;
499 /* end: cq staff */
500
501 /* start: alloc staff */
502 struct mutex pgdir_mutex;
503 struct list_head pgdir_list;
504 /* end: alloc staff */
505 struct dentry *dbg_root;
506
507 /* protect mkey key part */
508 spinlock_t mkey_lock;
509 u8 mkey_key;
510};
511
512struct mlx5_core_dev {
513 struct pci_dev *pdev;
514 u8 rev_id;
515 char board_id[MLX5_BOARD_ID_LEN];
516 struct mlx5_cmd cmd;
517 struct mlx5_caps caps;
518 phys_addr_t iseg_base;
519 struct mlx5_init_seg __iomem *iseg;
520 void (*event) (struct mlx5_core_dev *dev,
521 enum mlx5_dev_event event,
522 void *data);
523 struct mlx5_priv priv;
524 struct mlx5_profile *profile;
525 atomic_t num_qps;
526};
527
528struct mlx5_db {
529 __be32 *db;
530 union {
531 struct mlx5_db_pgdir *pgdir;
532 struct mlx5_ib_user_db_page *user_page;
533 } u;
534 dma_addr_t dma;
535 int index;
536};
537
538enum {
539 MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
540};
541
542enum {
543 MLX5_COMP_EQ_SIZE = 1024,
544};
545
546struct mlx5_db_pgdir {
547 struct list_head list;
548 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
549 __be32 *db_page;
550 dma_addr_t db_dma;
551};
552
553typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
554
555struct mlx5_cmd_work_ent {
556 struct mlx5_cmd_msg *in;
557 struct mlx5_cmd_msg *out;
558 mlx5_cmd_cbk_t callback;
559 void *context;
560 int idx;
561 struct completion done;
562 struct mlx5_cmd *cmd;
563 struct work_struct work;
564 struct mlx5_cmd_layout *lay;
565 int ret;
566 int page_queue;
567 u8 status;
568 u8 token;
569 struct timespec ts1;
570 struct timespec ts2;
571};
572
573struct mlx5_pas {
574 u64 pa;
575 u8 log_sz;
576};
577
578static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
579{
580 if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
581 return buf->direct.buf + offset;
582 else
583 return buf->page_list[offset >> PAGE_SHIFT].buf +
584 (offset & (PAGE_SIZE - 1));
585}
586
587extern struct workqueue_struct *mlx5_core_wq;
588
589#define STRUCT_FIELD(header, field) \
590 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
591 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
592
593struct ib_field {
594 size_t struct_offset_bytes;
595 size_t struct_size_bytes;
596 int offset_bits;
597 int size_bits;
598};
599
600static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
601{
602 return pci_get_drvdata(pdev);
603}
604
605extern struct dentry *mlx5_debugfs_root;
606
607static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
608{
609 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
610}
611
612static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
613{
614 return ioread32be(&dev->iseg->fw_rev) >> 16;
615}
616
617static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
618{
619 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
620}
621
622static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
623{
624 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
625}
626
627static inline void *mlx5_vzalloc(unsigned long size)
628{
629 void *rtn;
630
631 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
632 if (!rtn)
633 rtn = vzalloc(size);
634 return rtn;
635}
636
637static inline void mlx5_vfree(const void *addr)
638{
639 if (addr && is_vmalloc_addr(addr))
640 vfree(addr);
641 else
642 kfree(addr);
643}
644
645int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
646void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
647int mlx5_cmd_init(struct mlx5_core_dev *dev);
648void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
649void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
650void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
651int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
652int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
653 int out_size);
654int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
655int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
656int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
657int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
658void mlx5_health_cleanup(void);
659void __init mlx5_health_init(void);
660void mlx5_start_health_poll(struct mlx5_core_dev *dev);
661void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
662int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
663 struct mlx5_buf *buf);
664void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
665struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
666 gfp_t flags, int npages);
667void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
668 struct mlx5_cmd_mailbox *head);
669int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
670 struct mlx5_create_srq_mbox_in *in, int inlen);
671int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
672int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
673 struct mlx5_query_srq_mbox_out *out);
674int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
675 u16 lwm, int is_srq);
676int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
677 struct mlx5_create_mkey_mbox_in *in, int inlen);
678int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
679int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
680 struct mlx5_query_mkey_mbox_out *out, int outlen);
681int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 u32 *mkey);
683int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
684int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
685int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
686 u16 opmod, int port);
687void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
688void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
689int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
690void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
691void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
692 s16 npages);
693int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
694int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
695void mlx5_register_debugfs(void);
696void mlx5_unregister_debugfs(void);
697int mlx5_eq_init(struct mlx5_core_dev *dev);
698void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
699void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
700void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
701void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
702void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
703struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
704void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
705void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
706int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
707 int nent, u64 mask, const char *name, struct mlx5_uar *uar);
708int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
709int mlx5_start_eqs(struct mlx5_core_dev *dev);
710int mlx5_stop_eqs(struct mlx5_core_dev *dev);
711int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
712int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
713
714int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
715void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
716int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
717 int size_in, void *data_out, int size_out,
718 u16 reg_num, int arg, int write);
719int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
720
721int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
722void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
723int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
724 struct mlx5_query_eq_mbox_out *out, int outlen);
725int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
726void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
727int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
728void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
729int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
730void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
731
732typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
733int mlx5_register_health_report_handler(health_handler_t handler);
734void mlx5_unregister_health_report_handler(void);
735const char *mlx5_command_str(int command);
736int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
737void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
738
739static inline u32 mlx5_mkey_to_idx(u32 mkey)
740{
741 return mkey >> 8;
742}
743
744static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745{
746 return mkey_idx << 8;
747}
748
749enum {
750 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
751 MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
752 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
753};
754
755enum {
756 MAX_MR_CACHE_ENTRIES = 16,
757};
758
759struct mlx5_profile {
760 u64 mask;
761 u32 log_max_qp;
762 int cmdif_csum;
763 struct {
764 int size;
765 int limit;
766 } mr_cache[MAX_MR_CACHE_ENTRIES];
767};
768
769#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
new file mode 100644
index 000000000000..d9e3eacb3a7f
--- /dev/null
+++ b/include/linux/mlx5/qp.h
@@ -0,0 +1,467 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_QP_H
34#define MLX5_QP_H
35
36#include <linux/mlx5/device.h>
37#include <linux/mlx5/driver.h>
38
39#define MLX5_INVALID_LKEY 0x100
40
41enum mlx5_qp_optpar {
42 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
43 MLX5_QP_OPTPAR_RRE = 1 << 1,
44 MLX5_QP_OPTPAR_RAE = 1 << 2,
45 MLX5_QP_OPTPAR_RWE = 1 << 3,
46 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
47 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
48 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
49 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
50 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
51 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
52 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
53 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
54 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
55 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
56 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
57 MLX5_QP_OPTPAR_SRQN = 1 << 18,
58 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
59 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
60 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
61};
62
63enum mlx5_qp_state {
64 MLX5_QP_STATE_RST = 0,
65 MLX5_QP_STATE_INIT = 1,
66 MLX5_QP_STATE_RTR = 2,
67 MLX5_QP_STATE_RTS = 3,
68 MLX5_QP_STATE_SQER = 4,
69 MLX5_QP_STATE_SQD = 5,
70 MLX5_QP_STATE_ERR = 6,
71 MLX5_QP_STATE_SQ_DRAINING = 7,
72 MLX5_QP_STATE_SUSPENDED = 9,
73 MLX5_QP_NUM_STATE
74};
75
76enum {
77 MLX5_QP_ST_RC = 0x0,
78 MLX5_QP_ST_UC = 0x1,
79 MLX5_QP_ST_UD = 0x2,
80 MLX5_QP_ST_XRC = 0x3,
81 MLX5_QP_ST_MLX = 0x4,
82 MLX5_QP_ST_DCI = 0x5,
83 MLX5_QP_ST_DCT = 0x6,
84 MLX5_QP_ST_QP0 = 0x7,
85 MLX5_QP_ST_QP1 = 0x8,
86 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
87 MLX5_QP_ST_RAW_IPV6 = 0xa,
88 MLX5_QP_ST_SNIFFER = 0xb,
89 MLX5_QP_ST_SYNC_UMR = 0xe,
90 MLX5_QP_ST_PTP_1588 = 0xd,
91 MLX5_QP_ST_REG_UMR = 0xc,
92 MLX5_QP_ST_MAX
93};
94
95enum {
96 MLX5_QP_PM_MIGRATED = 0x3,
97 MLX5_QP_PM_ARMED = 0x0,
98 MLX5_QP_PM_REARM = 0x1
99};
100
101enum {
102 MLX5_NON_ZERO_RQ = 0 << 24,
103 MLX5_SRQ_RQ = 1 << 24,
104 MLX5_CRQ_RQ = 2 << 24,
105 MLX5_ZERO_LEN_RQ = 3 << 24
106};
107
108enum {
109 /* params1 */
110 MLX5_QP_BIT_SRE = 1 << 15,
111 MLX5_QP_BIT_SWE = 1 << 14,
112 MLX5_QP_BIT_SAE = 1 << 13,
113 /* params2 */
114 MLX5_QP_BIT_RRE = 1 << 15,
115 MLX5_QP_BIT_RWE = 1 << 14,
116 MLX5_QP_BIT_RAE = 1 << 13,
117 MLX5_QP_BIT_RIC = 1 << 4,
118};
119
120enum {
121 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
122 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
123};
124
125enum {
126 MLX5_SEND_WQE_BB = 64,
127};
128
129enum {
130 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
131 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
132 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
133 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
134 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
135};
136
137enum {
138 MLX5_FENCE_MODE_NONE = 0 << 5,
139 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
140 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
141 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
142};
143
144enum {
145 MLX5_QP_LAT_SENSITIVE = 1 << 28,
146 MLX5_QP_ENABLE_SIG = 1 << 31,
147};
148
149enum {
150 MLX5_RCV_DBR = 0,
151 MLX5_SND_DBR = 1,
152};
153
154struct mlx5_wqe_fmr_seg {
155 __be32 flags;
156 __be32 mem_key;
157 __be64 buf_list;
158 __be64 start_addr;
159 __be64 reg_len;
160 __be32 offset;
161 __be32 page_size;
162 u32 reserved[2];
163};
164
165struct mlx5_wqe_ctrl_seg {
166 __be32 opmod_idx_opcode;
167 __be32 qpn_ds;
168 u8 signature;
169 u8 rsvd[2];
170 u8 fm_ce_se;
171 __be32 imm;
172};
173
174struct mlx5_wqe_xrc_seg {
175 __be32 xrc_srqn;
176 u8 rsvd[12];
177};
178
179struct mlx5_wqe_masked_atomic_seg {
180 __be64 swap_add;
181 __be64 compare;
182 __be64 swap_add_mask;
183 __be64 compare_mask;
184};
185
186struct mlx5_av {
187 union {
188 struct {
189 __be32 qkey;
190 __be32 reserved;
191 } qkey;
192 __be64 dc_key;
193 } key;
194 __be32 dqp_dct;
195 u8 stat_rate_sl;
196 u8 fl_mlid;
197 __be16 rlid;
198 u8 reserved0[10];
199 u8 tclass;
200 u8 hop_limit;
201 __be32 grh_gid_fl;
202 u8 rgid[16];
203};
204
205struct mlx5_wqe_datagram_seg {
206 struct mlx5_av av;
207};
208
209struct mlx5_wqe_raddr_seg {
210 __be64 raddr;
211 __be32 rkey;
212 u32 reserved;
213};
214
215struct mlx5_wqe_atomic_seg {
216 __be64 swap_add;
217 __be64 compare;
218};
219
220struct mlx5_wqe_data_seg {
221 __be32 byte_count;
222 __be32 lkey;
223 __be64 addr;
224};
225
226struct mlx5_wqe_umr_ctrl_seg {
227 u8 flags;
228 u8 rsvd0[3];
229 __be16 klm_octowords;
230 __be16 bsf_octowords;
231 __be64 mkey_mask;
232 u8 rsvd1[32];
233};
234
235struct mlx5_seg_set_psv {
236 __be32 psv_num;
237 __be16 syndrome;
238 __be16 status;
239 __be32 transient_sig;
240 __be32 ref_tag;
241};
242
243struct mlx5_seg_get_psv {
244 u8 rsvd[19];
245 u8 num_psv;
246 __be32 l_key;
247 __be64 va;
248 __be32 psv_index[4];
249};
250
251struct mlx5_seg_check_psv {
252 u8 rsvd0[2];
253 __be16 err_coalescing_op;
254 u8 rsvd1[2];
255 __be16 xport_err_op;
256 u8 rsvd2[2];
257 __be16 xport_err_mask;
258 u8 rsvd3[7];
259 u8 num_psv;
260 __be32 l_key;
261 __be64 va;
262 __be32 psv_index[4];
263};
264
265struct mlx5_rwqe_sig {
266 u8 rsvd0[4];
267 u8 signature;
268 u8 rsvd1[11];
269};
270
271struct mlx5_wqe_signature_seg {
272 u8 rsvd0[4];
273 u8 signature;
274 u8 rsvd1[11];
275};
276
277struct mlx5_wqe_inline_seg {
278 __be32 byte_count;
279};
280
281struct mlx5_core_qp {
282 void (*event) (struct mlx5_core_qp *, int);
283 int qpn;
284 atomic_t refcount;
285 struct completion free;
286 struct mlx5_rsc_debug *dbg;
287 int pid;
288};
289
290struct mlx5_qp_path {
291 u8 fl;
292 u8 rsvd3;
293 u8 free_ar;
294 u8 pkey_index;
295 u8 rsvd0;
296 u8 grh_mlid;
297 __be16 rlid;
298 u8 ackto_lt;
299 u8 mgid_index;
300 u8 static_rate;
301 u8 hop_limit;
302 __be32 tclass_flowlabel;
303 u8 rgid[16];
304 u8 rsvd1[4];
305 u8 sl;
306 u8 port;
307 u8 rsvd2[6];
308};
309
310struct mlx5_qp_context {
311 __be32 flags;
312 __be32 flags_pd;
313 u8 mtu_msgmax;
314 u8 rq_size_stride;
315 __be16 sq_crq_size;
316 __be32 qp_counter_set_usr_page;
317 __be32 wire_qpn;
318 __be32 log_pg_sz_remote_qpn;
319 struct mlx5_qp_path pri_path;
320 struct mlx5_qp_path alt_path;
321 __be32 params1;
322 u8 reserved2[4];
323 __be32 next_send_psn;
324 __be32 cqn_send;
325 u8 reserved3[8];
326 __be32 last_acked_psn;
327 __be32 ssn;
328 __be32 params2;
329 __be32 rnr_nextrecvpsn;
330 __be32 xrcd;
331 __be32 cqn_recv;
332 __be64 db_rec_addr;
333 __be32 qkey;
334 __be32 rq_type_srqn;
335 __be32 rmsn;
336 __be16 hw_sq_wqe_counter;
337 __be16 sw_sq_wqe_counter;
338 __be16 hw_rcyclic_byte_counter;
339 __be16 hw_rq_counter;
340 __be16 sw_rcyclic_byte_counter;
341 __be16 sw_rq_counter;
342 u8 rsvd0[5];
343 u8 cgs;
344 u8 cs_req;
345 u8 cs_res;
346 __be64 dc_access_key;
347 u8 rsvd1[24];
348};
349
350struct mlx5_create_qp_mbox_in {
351 struct mlx5_inbox_hdr hdr;
352 __be32 input_qpn;
353 u8 rsvd0[4];
354 __be32 opt_param_mask;
355 u8 rsvd1[4];
356 struct mlx5_qp_context ctx;
357 u8 rsvd3[16];
358 __be64 pas[0];
359};
360
361struct mlx5_create_qp_mbox_out {
362 struct mlx5_outbox_hdr hdr;
363 __be32 qpn;
364 u8 rsvd0[4];
365};
366
367struct mlx5_destroy_qp_mbox_in {
368 struct mlx5_inbox_hdr hdr;
369 __be32 qpn;
370 u8 rsvd0[4];
371};
372
373struct mlx5_destroy_qp_mbox_out {
374 struct mlx5_outbox_hdr hdr;
375 u8 rsvd0[8];
376};
377
378struct mlx5_modify_qp_mbox_in {
379 struct mlx5_inbox_hdr hdr;
380 __be32 qpn;
381 u8 rsvd1[4];
382 __be32 optparam;
383 u8 rsvd0[4];
384 struct mlx5_qp_context ctx;
385};
386
387struct mlx5_modify_qp_mbox_out {
388 struct mlx5_outbox_hdr hdr;
389 u8 rsvd0[8];
390};
391
392struct mlx5_query_qp_mbox_in {
393 struct mlx5_inbox_hdr hdr;
394 __be32 qpn;
395 u8 rsvd[4];
396};
397
398struct mlx5_query_qp_mbox_out {
399 struct mlx5_outbox_hdr hdr;
400 u8 rsvd1[8];
401 __be32 optparam;
402 u8 rsvd0[4];
403 struct mlx5_qp_context ctx;
404 u8 rsvd2[16];
405 __be64 pas[0];
406};
407
408struct mlx5_conf_sqp_mbox_in {
409 struct mlx5_inbox_hdr hdr;
410 __be32 qpn;
411 u8 rsvd[3];
412 u8 type;
413};
414
415struct mlx5_conf_sqp_mbox_out {
416 struct mlx5_outbox_hdr hdr;
417 u8 rsvd[8];
418};
419
420struct mlx5_alloc_xrcd_mbox_in {
421 struct mlx5_inbox_hdr hdr;
422 u8 rsvd[8];
423};
424
425struct mlx5_alloc_xrcd_mbox_out {
426 struct mlx5_outbox_hdr hdr;
427 __be32 xrcdn;
428 u8 rsvd[4];
429};
430
431struct mlx5_dealloc_xrcd_mbox_in {
432 struct mlx5_inbox_hdr hdr;
433 __be32 xrcdn;
434 u8 rsvd[4];
435};
436
437struct mlx5_dealloc_xrcd_mbox_out {
438 struct mlx5_outbox_hdr hdr;
439 u8 rsvd[8];
440};
441
442static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
443{
444 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
445}
446
447int mlx5_core_create_qp(struct mlx5_core_dev *dev,
448 struct mlx5_core_qp *qp,
449 struct mlx5_create_qp_mbox_in *in,
450 int inlen);
451int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
452 enum mlx5_qp_state new_state,
453 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
454 struct mlx5_core_qp *qp);
455int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
456 struct mlx5_core_qp *qp);
457int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
458 struct mlx5_query_qp_mbox_out *out, int outlen);
459
460int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
461int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
462void mlx5_init_qp_table(struct mlx5_core_dev *dev);
463void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
464int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
465void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
466
467#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
new file mode 100644
index 000000000000..e1a363a33663
--- /dev/null
+++ b/include/linux/mlx5/srq.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_SRQ_H
34#define MLX5_SRQ_H
35
36#include <linux/mlx5/driver.h>
37
38void mlx5_init_srq_table(struct mlx5_core_dev *dev);
39void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
40
41#endif /* MLX5_SRQ_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b10ce4b341ea..230c04bda3e2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -167,6 +167,7 @@ struct ucred {
167#define AF_PPPOX 24 /* PPPoX sockets */ 167#define AF_PPPOX 24 /* PPPoX sockets */
168#define AF_WANPIPE 25 /* Wanpipe API Sockets */ 168#define AF_WANPIPE 25 /* Wanpipe API Sockets */
169#define AF_LLC 26 /* Linux LLC */ 169#define AF_LLC 26 /* Linux LLC */
170#define AF_IB 27 /* Native InfiniBand address */
170#define AF_CAN 29 /* Controller Area Network */ 171#define AF_CAN 29 /* Controller Area Network */
171#define AF_TIPC 30 /* TIPC sockets */ 172#define AF_TIPC 30 /* TIPC sockets */
172#define AF_BLUETOOTH 31 /* Bluetooth sockets */ 173#define AF_BLUETOOTH 31 /* Bluetooth sockets */
@@ -211,6 +212,7 @@ struct ucred {
211#define PF_PPPOX AF_PPPOX 212#define PF_PPPOX AF_PPPOX
212#define PF_WANPIPE AF_WANPIPE 213#define PF_WANPIPE AF_WANPIPE
213#define PF_LLC AF_LLC 214#define PF_LLC AF_LLC
215#define PF_IB AF_IB
214#define PF_CAN AF_CAN 216#define PF_CAN AF_CAN
215#define PF_TIPC AF_TIPC 217#define PF_TIPC AF_TIPC
216#define PF_BLUETOOTH AF_BLUETOOTH 218#define PF_BLUETOOTH AF_BLUETOOTH
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
new file mode 100644
index 000000000000..cf8f9e700e48
--- /dev/null
+++ b/include/rdma/ib.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2010 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#if !defined(_RDMA_IB_H)
34#define _RDMA_IB_H
35
36#include <linux/types.h>
37
38struct ib_addr {
39 union {
40 __u8 uib_addr8[16];
41 __be16 uib_addr16[8];
42 __be32 uib_addr32[4];
43 __be64 uib_addr64[2];
44 } ib_u;
45#define sib_addr8 ib_u.uib_addr8
46#define sib_addr16 ib_u.uib_addr16
47#define sib_addr32 ib_u.uib_addr32
48#define sib_addr64 ib_u.uib_addr64
49#define sib_raw ib_u.uib_addr8
50#define sib_subnet_prefix ib_u.uib_addr64[0]
51#define sib_interface_id ib_u.uib_addr64[1]
52};
53
54static inline int ib_addr_any(const struct ib_addr *a)
55{
56 return ((a->sib_addr64[0] | a->sib_addr64[1]) == 0);
57}
58
59static inline int ib_addr_loopback(const struct ib_addr *a)
60{
61 return ((a->sib_addr32[0] | a->sib_addr32[1] |
62 a->sib_addr32[2] | (a->sib_addr32[3] ^ htonl(1))) == 0);
63}
64
65static inline void ib_addr_set(struct ib_addr *addr,
66 __be32 w1, __be32 w2, __be32 w3, __be32 w4)
67{
68 addr->sib_addr32[0] = w1;
69 addr->sib_addr32[1] = w2;
70 addr->sib_addr32[2] = w3;
71 addr->sib_addr32[3] = w4;
72}
73
74static inline int ib_addr_cmp(const struct ib_addr *a1, const struct ib_addr *a2)
75{
76 return memcmp(a1, a2, sizeof(struct ib_addr));
77}
78
79struct sockaddr_ib {
80 unsigned short int sib_family; /* AF_IB */
81 __be16 sib_pkey;
82 __be32 sib_flowinfo;
83 struct ib_addr sib_addr;
84 __be64 sib_sid;
85 __be64 sib_sid_mask;
86 __u64 sib_scope_id;
87};
88
89#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 99965395c5f3..f3ac0f2c4c66 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -102,11 +102,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr);
102int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, 102int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
103 const unsigned char *dst_dev_addr); 103 const unsigned char *dst_dev_addr);
104 104
105static inline int ip_addr_size(struct sockaddr *addr) 105int rdma_addr_size(struct sockaddr *addr);
106{
107 return addr->sa_family == AF_INET6 ?
108 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
109}
110 106
111static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 107static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
112{ 108{
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 8275e539bace..125f8714301d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -402,6 +402,12 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
402 struct ib_ah_attr *ah_attr); 402 struct ib_ah_attr *ah_attr);
403 403
404/** 404/**
405 * ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
406 * to IB MAD wire format.
407 */
408void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute);
409
410/**
405 * ib_sa_unpack_path - Convert a path record from MAD format to struct 411 * ib_sa_unpack_path - Convert a path record from MAD format to struct
406 * ib_sa_path_rec. 412 * ib_sa_path_rec.
407 */ 413 */
@@ -418,4 +424,5 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
418 void *context), 424 void *context),
419 void *context, 425 void *context,
420 struct ib_sa_query **sa_query); 426 struct ib_sa_query **sa_query);
427
421#endif /* IB_SA_H */ 428#endif /* IB_SA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 98cc4b29fc5b..645c3cedce9c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -610,7 +610,21 @@ enum ib_qp_type {
610 IB_QPT_RAW_PACKET = 8, 610 IB_QPT_RAW_PACKET = 8,
611 IB_QPT_XRC_INI = 9, 611 IB_QPT_XRC_INI = 9,
612 IB_QPT_XRC_TGT, 612 IB_QPT_XRC_TGT,
613 IB_QPT_MAX 613 IB_QPT_MAX,
614 /* Reserve a range for qp types internal to the low level driver.
615 * These qp types will not be visible at the IB core layer, so the
616 * IB_QPT_MAX usages should not be affected in the core layer
617 */
618 IB_QPT_RESERVED1 = 0x1000,
619 IB_QPT_RESERVED2,
620 IB_QPT_RESERVED3,
621 IB_QPT_RESERVED4,
622 IB_QPT_RESERVED5,
623 IB_QPT_RESERVED6,
624 IB_QPT_RESERVED7,
625 IB_QPT_RESERVED8,
626 IB_QPT_RESERVED9,
627 IB_QPT_RESERVED10,
614}; 628};
615 629
616enum ib_qp_create_flags { 630enum ib_qp_create_flags {
@@ -766,6 +780,19 @@ enum ib_wr_opcode {
766 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 780 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
767 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 781 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
768 IB_WR_BIND_MW, 782 IB_WR_BIND_MW,
783 /* reserve values for low level drivers' internal use.
784 * These values will not be used at all in the ib core layer.
785 */
786 IB_WR_RESERVED1 = 0xf0,
787 IB_WR_RESERVED2,
788 IB_WR_RESERVED3,
789 IB_WR_RESERVED4,
790 IB_WR_RESERVED5,
791 IB_WR_RESERVED6,
792 IB_WR_RESERVED7,
793 IB_WR_RESERVED8,
794 IB_WR_RESERVED9,
795 IB_WR_RESERVED10,
769}; 796};
770 797
771enum ib_send_flags { 798enum ib_send_flags {
@@ -773,7 +800,11 @@ enum ib_send_flags {
773 IB_SEND_SIGNALED = (1<<1), 800 IB_SEND_SIGNALED = (1<<1),
774 IB_SEND_SOLICITED = (1<<2), 801 IB_SEND_SOLICITED = (1<<2),
775 IB_SEND_INLINE = (1<<3), 802 IB_SEND_INLINE = (1<<3),
776 IB_SEND_IP_CSUM = (1<<4) 803 IB_SEND_IP_CSUM = (1<<4),
804
805 /* reserve bits 26-31 for low level drivers' internal use */
806 IB_SEND_RESERVED_START = (1 << 26),
807 IB_SEND_RESERVED_END = (1 << 31),
777}; 808};
778 809
779struct ib_sge { 810struct ib_sge {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index ad3a3142383a..1ed2088dc9f5 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -70,6 +70,11 @@ enum rdma_port_space {
70 RDMA_PS_UDP = 0x0111, 70 RDMA_PS_UDP = 0x0111,
71}; 71};
72 72
73#define RDMA_IB_IP_PS_MASK 0xFFFFFFFFFFFF0000ULL
74#define RDMA_IB_IP_PS_TCP 0x0000000001060000ULL
75#define RDMA_IB_IP_PS_UDP 0x0000000001110000ULL
76#define RDMA_IB_IP_PS_IB 0x00000000013F0000ULL
77
73struct rdma_addr { 78struct rdma_addr {
74 struct sockaddr_storage src_addr; 79 struct sockaddr_storage src_addr;
75 struct sockaddr_storage dst_addr; 80 struct sockaddr_storage dst_addr;
@@ -93,6 +98,7 @@ struct rdma_conn_param {
93 /* Fields below ignored if a QP is created on the rdma_cm_id. */ 98 /* Fields below ignored if a QP is created on the rdma_cm_id. */
94 u8 srq; 99 u8 srq;
95 u32 qp_num; 100 u32 qp_num;
101 u32 qkey;
96}; 102};
97 103
98struct rdma_ud_param { 104struct rdma_ud_param {
@@ -367,4 +373,11 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
367 */ 373 */
368int rdma_set_afonly(struct rdma_cm_id *id, int afonly); 374int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
369 375
376 /**
377 * rdma_get_service_id - Return the IB service ID for a specified address.
378 * @id: Communication identifier associated with the address.
379 * @addr: Address for the service ID.
380 */
381__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
382
370#endif /* RDMA_CM_H */ 383#endif /* RDMA_CM_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 1ee9239ff8c2..99b80abf360a 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -45,8 +45,8 @@
45enum { 45enum {
46 RDMA_USER_CM_CMD_CREATE_ID, 46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID, 47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR, 48 RDMA_USER_CM_CMD_BIND_IP,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR, 49 RDMA_USER_CM_CMD_RESOLVE_IP,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE, 50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE, 51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT, 52 RDMA_USER_CM_CMD_CONNECT,
@@ -59,9 +59,13 @@ enum {
59 RDMA_USER_CM_CMD_GET_OPTION, 59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION, 60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY, 61 RDMA_USER_CM_CMD_NOTIFY,
62 RDMA_USER_CM_CMD_JOIN_MCAST, 62 RDMA_USER_CM_CMD_JOIN_IP_MCAST,
63 RDMA_USER_CM_CMD_LEAVE_MCAST, 63 RDMA_USER_CM_CMD_LEAVE_MCAST,
64 RDMA_USER_CM_CMD_MIGRATE_ID 64 RDMA_USER_CM_CMD_MIGRATE_ID,
65 RDMA_USER_CM_CMD_QUERY,
66 RDMA_USER_CM_CMD_BIND,
67 RDMA_USER_CM_CMD_RESOLVE_ADDR,
68 RDMA_USER_CM_CMD_JOIN_MCAST
65}; 69};
66 70
67/* 71/*
@@ -95,28 +99,51 @@ struct rdma_ucm_destroy_id_resp {
95 __u32 events_reported; 99 __u32 events_reported;
96}; 100};
97 101
98struct rdma_ucm_bind_addr { 102struct rdma_ucm_bind_ip {
99 __u64 response; 103 __u64 response;
100 struct sockaddr_in6 addr; 104 struct sockaddr_in6 addr;
101 __u32 id; 105 __u32 id;
102}; 106};
103 107
104struct rdma_ucm_resolve_addr { 108struct rdma_ucm_bind {
109 __u32 id;
110 __u16 addr_size;
111 __u16 reserved;
112 struct sockaddr_storage addr;
113};
114
115struct rdma_ucm_resolve_ip {
105 struct sockaddr_in6 src_addr; 116 struct sockaddr_in6 src_addr;
106 struct sockaddr_in6 dst_addr; 117 struct sockaddr_in6 dst_addr;
107 __u32 id; 118 __u32 id;
108 __u32 timeout_ms; 119 __u32 timeout_ms;
109}; 120};
110 121
122struct rdma_ucm_resolve_addr {
123 __u32 id;
124 __u32 timeout_ms;
125 __u16 src_size;
126 __u16 dst_size;
127 __u32 reserved;
128 struct sockaddr_storage src_addr;
129 struct sockaddr_storage dst_addr;
130};
131
111struct rdma_ucm_resolve_route { 132struct rdma_ucm_resolve_route {
112 __u32 id; 133 __u32 id;
113 __u32 timeout_ms; 134 __u32 timeout_ms;
114}; 135};
115 136
116struct rdma_ucm_query_route { 137enum {
138 RDMA_USER_CM_QUERY_ADDR,
139 RDMA_USER_CM_QUERY_PATH,
140 RDMA_USER_CM_QUERY_GID
141};
142
143struct rdma_ucm_query {
117 __u64 response; 144 __u64 response;
118 __u32 id; 145 __u32 id;
119 __u32 reserved; 146 __u32 option;
120}; 147};
121 148
122struct rdma_ucm_query_route_resp { 149struct rdma_ucm_query_route_resp {
@@ -129,9 +156,26 @@ struct rdma_ucm_query_route_resp {
129 __u8 reserved[3]; 156 __u8 reserved[3];
130}; 157};
131 158
159struct rdma_ucm_query_addr_resp {
160 __u64 node_guid;
161 __u8 port_num;
162 __u8 reserved;
163 __u16 pkey;
164 __u16 src_size;
165 __u16 dst_size;
166 struct sockaddr_storage src_addr;
167 struct sockaddr_storage dst_addr;
168};
169
170struct rdma_ucm_query_path_resp {
171 __u32 num_paths;
172 __u32 reserved;
173 struct ib_path_rec_data path_data[0];
174};
175
132struct rdma_ucm_conn_param { 176struct rdma_ucm_conn_param {
133 __u32 qp_num; 177 __u32 qp_num;
134 __u32 reserved; 178 __u32 qkey;
135 __u8 private_data[RDMA_MAX_PRIVATE_DATA]; 179 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
136 __u8 private_data_len; 180 __u8 private_data_len;
137 __u8 srq; 181 __u8 srq;
@@ -192,13 +236,22 @@ struct rdma_ucm_notify {
192 __u32 event; 236 __u32 event;
193}; 237};
194 238
195struct rdma_ucm_join_mcast { 239struct rdma_ucm_join_ip_mcast {
196 __u64 response; /* rdma_ucm_create_id_resp */ 240 __u64 response; /* rdma_ucm_create_id_resp */
197 __u64 uid; 241 __u64 uid;
198 struct sockaddr_in6 addr; 242 struct sockaddr_in6 addr;
199 __u32 id; 243 __u32 id;
200}; 244};
201 245
246struct rdma_ucm_join_mcast {
247 __u64 response; /* rdma_ucma_create_id_resp */
248 __u64 uid;
249 __u32 id;
250 __u16 addr_size;
251 __u16 reserved;
252 struct sockaddr_storage addr;
253};
254
202struct rdma_ucm_get_event { 255struct rdma_ucm_get_event {
203 __u64 response; 256 __u64 response;
204}; 257};