aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-23 21:45:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-23 21:45:06 -0500
commit048ccca8c1c8f583deec3367d7df521bb1f542ae (patch)
treeefa882c88f658f711d63581a3063203c63682338 /include
parentb3e27d5d4a29bcc8e057b496d5ef5194addaaac0 (diff)
parent34356f64ac0df2326fa50e2d4bca6f7c03ed16c1 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "Initial roundup of 4.5 merge window patches - Remove usage of ib_query_device and instead store attributes in ib_device struct - Move iopoll out of block and into lib, rename to irqpoll, and use in several places in the rdma stack as our new completion queue polling library mechanism. Update the other block drivers that already used iopoll to use the new mechanism too. - Replace the per-entry GID table locks with a single GID table lock - IPoIB multicast cleanup - Cleanups to the IB MR facility - Add support for 64bit extended IB counters - Fix for netlink oops while parsing RDMA nl messages - RoCEv2 support for the core IB code - mlx4 RoCEv2 support - mlx5 RoCEv2 support - Cross Channel support for mlx5 - Timestamp support for mlx5 - Atomic support for mlx5 - Raw QP support for mlx5 - MAINTAINERS update for mlx4/mlx5 - Misc ocrdma, qib, nes, usNIC, cxgb3, cxgb4, mlx4, mlx5 updates - Add support for remote invalidate to the iSER driver (pushed through the RDMA tree due to dependencies, acknowledged by nab) - Update to NFSoRDMA (pushed through the RDMA tree due to dependencies, acknowledged by Bruce)" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (169 commits) IB/mlx5: Unify CQ create flags check IB/mlx5: Expose Raw Packet QP to user space consumers {IB, net}/mlx5: Move the modify QP operation table to mlx5_ib IB/mlx5: Support setting Ethernet priority for Raw Packet QPs IB/mlx5: Add Raw Packet QP query functionality IB/mlx5: Add create and destroy functionality for Raw Packet QP IB/mlx5: Refactor mlx5_ib_qp to accommodate other QP types IB/mlx5: Allocate a Transport Domain for each ucontext net/mlx5_core: Warn on unsupported events of QP/RQ/SQ net/mlx5_core: Add RQ and SQ event handling net/mlx5_core: Export transport objects IB/mlx5: Expose CQE version to user-space IB/mlx5: Add CQE version 1 support to user QPs and SRQs IB/mlx5: Fix data validation in mlx5_ib_alloc_ucontext IB/sa: Fix netlink local service GFP crash IB/srpt: Remove redundant wc array IB/qib: Improve ipoib UD performance IB/mlx4: Advertise RoCE v2 support IB/mlx4: Create and use another QP1 for RoCEv2 IB/mlx4: Enable send of RoCE QP1 packets with IP/UDP headers ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-iopoll.h46
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq_poll.h25
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h15
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/linux/mlx5/device.h40
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h48
-rw-r--r--include/linux/mlx5/qp.h46
-rw-r--r--include/linux/mlx5/transobj.h78
-rw-r--r--include/linux/mlx5/vport.h8
-rw-r--r--include/linux/sunrpc/svc_rdma.h39
-rw-r--r--include/rdma/ib_addr.h16
-rw-r--r--include/rdma/ib_cache.h4
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_pack.h45
-rw-r--r--include/rdma/ib_pma.h1
-rw-r--r--include/rdma/ib_sa.h3
-rw-r--r--include/rdma/ib_verbs.h356
-rw-r--r--include/scsi/iser.h78
-rw-r--r--include/trace/events/irq.h2
22 files changed, 624 insertions, 268 deletions
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
deleted file mode 100644
index 77ae77c0b704..000000000000
--- a/include/linux/blk-iopoll.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef BLK_IOPOLL_H
2#define BLK_IOPOLL_H
3
4struct blk_iopoll;
5typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);
6
7struct blk_iopoll {
8 struct list_head list;
9 unsigned long state;
10 unsigned long data;
11 int weight;
12 int max;
13 blk_iopoll_fn *poll;
14};
15
16enum {
17 IOPOLL_F_SCHED = 0,
18 IOPOLL_F_DISABLE = 1,
19};
20
21/*
22 * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating
23 * that we were the first to acquire this iop for scheduling. If this iop
24 * is currently disabled, return "failure".
25 */
26static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)
27{
28 if (!test_bit(IOPOLL_F_DISABLE, &iop->state))
29 return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);
30
31 return 1;
32}
33
34static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)
35{
36 return test_bit(IOPOLL_F_DISABLE, &iop->state);
37}
38
39extern void blk_iopoll_sched(struct blk_iopoll *);
40extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);
41extern void blk_iopoll_complete(struct blk_iopoll *);
42extern void __blk_iopoll_complete(struct blk_iopoll *);
43extern void blk_iopoll_enable(struct blk_iopoll *);
44extern void blk_iopoll_disable(struct blk_iopoll *);
45
46#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index cb30edbfe9fc..0e95fcc75b2a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,7 @@ enum
413 NET_TX_SOFTIRQ, 413 NET_TX_SOFTIRQ,
414 NET_RX_SOFTIRQ, 414 NET_RX_SOFTIRQ,
415 BLOCK_SOFTIRQ, 415 BLOCK_SOFTIRQ,
416 BLOCK_IOPOLL_SOFTIRQ, 416 IRQ_POLL_SOFTIRQ,
417 TASKLET_SOFTIRQ, 417 TASKLET_SOFTIRQ,
418 SCHED_SOFTIRQ, 418 SCHED_SOFTIRQ,
419 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the 419 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h
new file mode 100644
index 000000000000..3e8c1b8fb9be
--- /dev/null
+++ b/include/linux/irq_poll.h
@@ -0,0 +1,25 @@
1#ifndef IRQ_POLL_H
2#define IRQ_POLL_H
3
4struct irq_poll;
5typedef int (irq_poll_fn)(struct irq_poll *, int);
6
7struct irq_poll {
8 struct list_head list;
9 unsigned long state;
10 int weight;
11 irq_poll_fn *poll;
12};
13
14enum {
15 IRQ_POLL_F_SCHED = 0,
16 IRQ_POLL_F_DISABLE = 1,
17};
18
19extern void irq_poll_sched(struct irq_poll *);
20extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *);
21extern void irq_poll_complete(struct irq_poll *);
22extern void irq_poll_enable(struct irq_poll *);
23extern void irq_poll_disable(struct irq_poll *);
24
25#endif
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 58391f2e0414..116b284bc4ce 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -206,7 +206,8 @@ enum {
206 MLX4_SET_PORT_GID_TABLE = 0x5, 206 MLX4_SET_PORT_GID_TABLE = 0x5,
207 MLX4_SET_PORT_PRIO2TC = 0x8, 207 MLX4_SET_PORT_PRIO2TC = 0x8,
208 MLX4_SET_PORT_SCHEDULER = 0x9, 208 MLX4_SET_PORT_SCHEDULER = 0x9,
209 MLX4_SET_PORT_VXLAN = 0xB 209 MLX4_SET_PORT_VXLAN = 0xB,
210 MLX4_SET_PORT_ROCE_ADDR = 0xD
210}; 211};
211 212
212enum { 213enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d3133be12d92..430a929f048b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -216,6 +216,7 @@ enum {
216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, 216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
217 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, 217 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
218 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, 218 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
219 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
219}; 220};
220 221
221enum { 222enum {
@@ -267,12 +268,14 @@ enum {
267 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 268 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
268 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 269 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
269 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 270 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
271 MLX4_BMME_FLAG_ROCE_V1_V2 = 1 << 19,
270 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24, 272 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
271 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, 273 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
272}; 274};
273 275
274enum { 276enum {
275 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP 277 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP,
278 MLX4_FLAG_ROCE_V1_V2 = MLX4_BMME_FLAG_ROCE_V1_V2
276}; 279};
277 280
278enum mlx4_event { 281enum mlx4_event {
@@ -979,14 +982,11 @@ struct mlx4_mad_ifc {
979 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 982 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
980 if ((type) == (dev)->caps.port_mask[(port)]) 983 if ((type) == (dev)->caps.port_mask[(port)])
981 984
982#define mlx4_foreach_non_ib_transport_port(port, dev) \
983 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
984 if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
985
986#define mlx4_foreach_ib_transport_port(port, dev) \ 985#define mlx4_foreach_ib_transport_port(port, dev) \
987 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 986 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
988 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ 987 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
989 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 988 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
989 ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
990 990
991#define MLX4_INVALID_SLAVE_ID 0xFF 991#define MLX4_INVALID_SLAVE_ID 0xFF
992#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1) 992#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
@@ -1457,6 +1457,7 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1457 1457
1458int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); 1458int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1459int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis); 1459int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1460int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port);
1460int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2); 1461int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1461int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); 1462int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1462int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); 1463int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index fe052e234906..587cdf943b52 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -194,7 +194,7 @@ struct mlx4_qp_context {
194 u8 mtu_msgmax; 194 u8 mtu_msgmax;
195 u8 rq_size_stride; 195 u8 rq_size_stride;
196 u8 sq_size_stride; 196 u8 sq_size_stride;
197 u8 rlkey; 197 u8 rlkey_roce_mode;
198 __be32 usr_page; 198 __be32 usr_page;
199 __be32 local_qpn; 199 __be32 local_qpn;
200 __be32 remote_qpn; 200 __be32 remote_qpn;
@@ -204,7 +204,8 @@ struct mlx4_qp_context {
204 u32 reserved1; 204 u32 reserved1;
205 __be32 next_send_psn; 205 __be32 next_send_psn;
206 __be32 cqn_send; 206 __be32 cqn_send;
207 u32 reserved2[2]; 207 __be16 roce_entropy;
208 __be16 reserved2[3];
208 __be32 last_acked_psn; 209 __be32 last_acked_psn;
209 __be32 ssn; 210 __be32 ssn;
210 __be32 params2; 211 __be32 params2;
@@ -487,4 +488,14 @@ static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
487 488
488void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp); 489void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
489 490
491static inline u16 folded_qp(u32 q)
492{
493 u16 res;
494
495 res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
496 return res;
497}
498
499u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
500
490#endif /* MLX4_QP_H */ 501#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 7be845e30689..987764afa65c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -223,6 +223,14 @@ enum {
223#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 223#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
224#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 224#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
225 225
226#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
227
228enum {
229 MLX5_EVENT_QUEUE_TYPE_QP = 0,
230 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
231 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
232};
233
226enum mlx5_event { 234enum mlx5_event {
227 MLX5_EVENT_TYPE_COMP = 0x0, 235 MLX5_EVENT_TYPE_COMP = 0x0,
228 236
@@ -280,6 +288,26 @@ enum {
280}; 288};
281 289
282enum { 290enum {
291 MLX5_ROCE_VERSION_1 = 0,
292 MLX5_ROCE_VERSION_2 = 2,
293};
294
295enum {
296 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
297 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
298};
299
300enum {
301 MLX5_ROCE_L3_TYPE_IPV4 = 0,
302 MLX5_ROCE_L3_TYPE_IPV6 = 1,
303};
304
305enum {
306 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
307 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
308};
309
310enum {
283 MLX5_OPCODE_NOP = 0x00, 311 MLX5_OPCODE_NOP = 0x00,
284 MLX5_OPCODE_SEND_INVAL = 0x01, 312 MLX5_OPCODE_SEND_INVAL = 0x01,
285 MLX5_OPCODE_RDMA_WRITE = 0x08, 313 MLX5_OPCODE_RDMA_WRITE = 0x08,
@@ -446,7 +474,7 @@ struct mlx5_init_seg {
446 __be32 rsvd2[880]; 474 __be32 rsvd2[880];
447 __be32 internal_timer_h; 475 __be32 internal_timer_h;
448 __be32 internal_timer_l; 476 __be32 internal_timer_l;
449 __be32 rsrv3[2]; 477 __be32 rsvd3[2];
450 __be32 health_counter; 478 __be32 health_counter;
451 __be32 rsvd4[1019]; 479 __be32 rsvd4[1019];
452 __be64 ieee1588_clk; 480 __be64 ieee1588_clk;
@@ -460,7 +488,9 @@ struct mlx5_eqe_comp {
460}; 488};
461 489
462struct mlx5_eqe_qp_srq { 490struct mlx5_eqe_qp_srq {
463 __be32 reserved[6]; 491 __be32 reserved1[5];
492 u8 type;
493 u8 reserved2[3];
464 __be32 qp_srq_n; 494 __be32 qp_srq_n;
465}; 495};
466 496
@@ -651,6 +681,12 @@ enum {
651}; 681};
652 682
653enum { 683enum {
684 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
685 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
686 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
687};
688
689enum {
654 CQE_L2_OK = 1 << 0, 690 CQE_L2_OK = 1 << 0,
655 CQE_L3_OK = 1 << 1, 691 CQE_L3_OK = 1 << 1,
656 CQE_L4_OK = 1 << 2, 692 CQE_L4_OK = 1 << 2,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5162f3533042..1e3006dcf35d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -115,6 +115,11 @@ enum {
115 MLX5_REG_HOST_ENDIANNESS = 0x7004, 115 MLX5_REG_HOST_ENDIANNESS = 0x7004,
116}; 116};
117 117
118enum {
119 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
120 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
121};
122
118enum mlx5_page_fault_resume_flags { 123enum mlx5_page_fault_resume_flags {
119 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, 124 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
120 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, 125 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
@@ -341,9 +346,11 @@ struct mlx5_core_mr {
341}; 346};
342 347
343enum mlx5_res_type { 348enum mlx5_res_type {
344 MLX5_RES_QP, 349 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
345 MLX5_RES_SRQ, 350 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
346 MLX5_RES_XSRQ, 351 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
352 MLX5_RES_SRQ = 3,
353 MLX5_RES_XSRQ = 4,
347}; 354};
348 355
349struct mlx5_core_rsc_common { 356struct mlx5_core_rsc_common {
@@ -651,13 +658,6 @@ extern struct workqueue_struct *mlx5_core_wq;
651 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 658 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
652 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field 659 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
653 660
654struct ib_field {
655 size_t struct_offset_bytes;
656 size_t struct_size_bytes;
657 int offset_bits;
658 int size_bits;
659};
660
661static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) 661static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
662{ 662{
663 return pci_get_drvdata(pdev); 663 return pci_get_drvdata(pdev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 68d73f82e009..231ab6bcea76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -67,6 +67,11 @@ enum {
67}; 67};
68 68
69enum { 69enum {
70 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
71 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
72};
73
74enum {
70 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 75 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
71 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 76 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
72 MLX5_CMD_OP_INIT_HCA = 0x102, 77 MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -573,21 +578,24 @@ enum {
573struct mlx5_ifc_atomic_caps_bits { 578struct mlx5_ifc_atomic_caps_bits {
574 u8 reserved_0[0x40]; 579 u8 reserved_0[0x40];
575 580
576 u8 atomic_req_endianness[0x1]; 581 u8 atomic_req_8B_endianess_mode[0x2];
577 u8 reserved_1[0x1f]; 582 u8 reserved_1[0x4];
583 u8 supported_atomic_req_8B_endianess_mode_1[0x1];
578 584
579 u8 reserved_2[0x20]; 585 u8 reserved_2[0x19];
580 586
581 u8 reserved_3[0x10]; 587 u8 reserved_3[0x20];
582 u8 atomic_operations[0x10];
583 588
584 u8 reserved_4[0x10]; 589 u8 reserved_4[0x10];
585 u8 atomic_size_qp[0x10]; 590 u8 atomic_operations[0x10];
586 591
587 u8 reserved_5[0x10]; 592 u8 reserved_5[0x10];
593 u8 atomic_size_qp[0x10];
594
595 u8 reserved_6[0x10];
588 u8 atomic_size_dc[0x10]; 596 u8 atomic_size_dc[0x10];
589 597
590 u8 reserved_6[0x720]; 598 u8 reserved_7[0x720];
591}; 599};
592 600
593struct mlx5_ifc_odp_cap_bits { 601struct mlx5_ifc_odp_cap_bits {
@@ -850,7 +858,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
850 u8 reserved_66[0x8]; 858 u8 reserved_66[0x8];
851 u8 log_uar_page_sz[0x10]; 859 u8 log_uar_page_sz[0x10];
852 860
853 u8 reserved_67[0x40]; 861 u8 reserved_67[0x20];
862 u8 device_frequency_mhz[0x20];
854 u8 device_frequency_khz[0x20]; 863 u8 device_frequency_khz[0x20];
855 u8 reserved_68[0x5f]; 864 u8 reserved_68[0x5f];
856 u8 cqe_zip[0x1]; 865 u8 cqe_zip[0x1];
@@ -2215,19 +2224,25 @@ struct mlx5_ifc_nic_vport_context_bits {
2215 2224
2216 u8 mtu[0x10]; 2225 u8 mtu[0x10];
2217 2226
2218 u8 reserved_3[0x640]; 2227 u8 system_image_guid[0x40];
2228 u8 port_guid[0x40];
2229 u8 node_guid[0x40];
2230
2231 u8 reserved_3[0x140];
2232 u8 qkey_violation_counter[0x10];
2233 u8 reserved_4[0x430];
2219 2234
2220 u8 promisc_uc[0x1]; 2235 u8 promisc_uc[0x1];
2221 u8 promisc_mc[0x1]; 2236 u8 promisc_mc[0x1];
2222 u8 promisc_all[0x1]; 2237 u8 promisc_all[0x1];
2223 u8 reserved_4[0x2]; 2238 u8 reserved_5[0x2];
2224 u8 allowed_list_type[0x3]; 2239 u8 allowed_list_type[0x3];
2225 u8 reserved_5[0xc]; 2240 u8 reserved_6[0xc];
2226 u8 allowed_list_size[0xc]; 2241 u8 allowed_list_size[0xc];
2227 2242
2228 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2243 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2229 2244
2230 u8 reserved_6[0x20]; 2245 u8 reserved_7[0x20];
2231 2246
2232 u8 current_uc_mac_address[0][0x40]; 2247 u8 current_uc_mac_address[0][0x40];
2233}; 2248};
@@ -4199,6 +4214,13 @@ struct mlx5_ifc_modify_tis_out_bits {
4199 u8 reserved_1[0x40]; 4214 u8 reserved_1[0x40];
4200}; 4215};
4201 4216
4217struct mlx5_ifc_modify_tis_bitmask_bits {
4218 u8 reserved_0[0x20];
4219
4220 u8 reserved_1[0x1f];
4221 u8 prio[0x1];
4222};
4223
4202struct mlx5_ifc_modify_tis_in_bits { 4224struct mlx5_ifc_modify_tis_in_bits {
4203 u8 opcode[0x10]; 4225 u8 opcode[0x10];
4204 u8 reserved_0[0x10]; 4226 u8 reserved_0[0x10];
@@ -4211,7 +4233,7 @@ struct mlx5_ifc_modify_tis_in_bits {
4211 4233
4212 u8 reserved_3[0x20]; 4234 u8 reserved_3[0x20];
4213 4235
4214 u8 modify_bitmask[0x40]; 4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
4215 4237
4216 u8 reserved_4[0x40]; 4238 u8 reserved_4[0x40];
4217 4239
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f079fb1a31f7..5b8c89ffaa58 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -85,7 +85,16 @@ enum mlx5_qp_state {
85 MLX5_QP_STATE_ERR = 6, 85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7, 86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9, 87 MLX5_QP_STATE_SUSPENDED = 9,
88 MLX5_QP_NUM_STATE 88 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91};
92
93enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
89}; 98};
90 99
91enum { 100enum {
@@ -130,6 +139,9 @@ enum {
130 MLX5_QP_BIT_RWE = 1 << 14, 139 MLX5_QP_BIT_RWE = 1 << 14,
131 MLX5_QP_BIT_RAE = 1 << 13, 140 MLX5_QP_BIT_RAE = 1 << 13,
132 MLX5_QP_BIT_RIC = 1 << 4, 141 MLX5_QP_BIT_RIC = 1 << 4,
142 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
143 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
144 MLX5_QP_BIT_CC_MASTER = 1 << 0
133}; 145};
134 146
135enum { 147enum {
@@ -248,8 +260,12 @@ struct mlx5_av {
248 __be32 dqp_dct; 260 __be32 dqp_dct;
249 u8 stat_rate_sl; 261 u8 stat_rate_sl;
250 u8 fl_mlid; 262 u8 fl_mlid;
251 __be16 rlid; 263 union {
252 u8 reserved0[10]; 264 __be16 rlid;
265 __be16 udp_sport;
266 };
267 u8 reserved0[4];
268 u8 rmac[6];
253 u8 tclass; 269 u8 tclass;
254 u8 hop_limit; 270 u8 hop_limit;
255 __be32 grh_gid_fl; 271 __be32 grh_gid_fl;
@@ -456,11 +472,16 @@ struct mlx5_qp_path {
456 u8 static_rate; 472 u8 static_rate;
457 u8 hop_limit; 473 u8 hop_limit;
458 __be32 tclass_flowlabel; 474 __be32 tclass_flowlabel;
459 u8 rgid[16]; 475 union {
460 u8 rsvd1[4]; 476 u8 rgid[16];
461 u8 sl; 477 u8 rip[16];
478 };
479 u8 f_dscp_ecn_prio;
480 u8 ecn_dscp;
481 __be16 udp_sport;
482 u8 dci_cfi_prio_sl;
462 u8 port; 483 u8 port;
463 u8 rsvd2[6]; 484 u8 rmac[6];
464}; 485};
465 486
466struct mlx5_qp_context { 487struct mlx5_qp_context {
@@ -620,8 +641,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
620 struct mlx5_core_qp *qp, 641 struct mlx5_core_qp *qp,
621 struct mlx5_create_qp_mbox_in *in, 642 struct mlx5_create_qp_mbox_in *in,
622 int inlen); 643 int inlen);
623int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, 644int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
624 enum mlx5_qp_state new_state,
625 struct mlx5_modify_qp_mbox_in *in, int sqd_event, 645 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
626 struct mlx5_core_qp *qp); 646 struct mlx5_core_qp *qp);
627int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 647int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
@@ -639,6 +659,14 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
639int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, 659int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
640 u8 context, int error); 660 u8 context, int error);
641#endif 661#endif
662int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
663 struct mlx5_core_qp *rq);
664void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
665 struct mlx5_core_qp *rq);
666int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
667 struct mlx5_core_qp *sq);
668void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
669 struct mlx5_core_qp *sq);
642 670
643static inline const char *mlx5_qp_type_str(int type) 671static inline const char *mlx5_qp_type_str(int type)
644{ 672{
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
new file mode 100644
index 000000000000..88441f5ece25
--- /dev/null
+++ b/include/linux/mlx5/transobj.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __TRANSOBJ_H__
34#define __TRANSOBJ_H__
35
36#include <linux/mlx5/driver.h>
37
38int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
39void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
40int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
41 u32 *rqn);
42int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
43void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
44int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
45int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
46 u32 *sqn);
47int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
48void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
49int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
50int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
51 u32 *tirn);
52int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
53 int inlen);
54void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
55int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
56 u32 *tisn);
57int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
58 int inlen);
59void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
60int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
61 u32 *rmpn);
62int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
63int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
64int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
65int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
66int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
67 u32 *rmpn);
68int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
69int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
70int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
71
72int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
73 u32 *rqtn);
74int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
75 int inlen);
76void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
77
78#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 638f2ca7a527..123771003e68 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,11 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 47 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
49 u64 *system_image_guid);
50int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
51int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
52 u16 *qkey_viol_cntr);
48int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 53int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
49 u8 port_num, u16 vf_num, u16 gid_index, 54 u8 port_num, u16 vf_num, u16 gid_index,
50 union ib_gid *gid); 55 union ib_gid *gid);
@@ -85,4 +90,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
85 u16 vlans[], 90 u16 vlans[],
86 int list_size); 91 int list_size);
87 92
93int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
94int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
95
88#endif /* __MLX5_VPORT_H__ */ 96#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f869807a0d0e..5322fea6fe4c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -51,6 +51,7 @@
51/* RPC/RDMA parameters and stats */ 51/* RPC/RDMA parameters and stats */
52extern unsigned int svcrdma_ord; 52extern unsigned int svcrdma_ord;
53extern unsigned int svcrdma_max_requests; 53extern unsigned int svcrdma_max_requests;
54extern unsigned int svcrdma_max_bc_requests;
54extern unsigned int svcrdma_max_req_size; 55extern unsigned int svcrdma_max_req_size;
55 56
56extern atomic_t rdma_stat_recv; 57extern atomic_t rdma_stat_recv;
@@ -69,6 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
69 * completes. 70 * completes.
70 */ 71 */
71struct svc_rdma_op_ctxt { 72struct svc_rdma_op_ctxt {
73 struct list_head free;
72 struct svc_rdma_op_ctxt *read_hdr; 74 struct svc_rdma_op_ctxt *read_hdr;
73 struct svc_rdma_fastreg_mr *frmr; 75 struct svc_rdma_fastreg_mr *frmr;
74 int hdr_count; 76 int hdr_count;
@@ -112,6 +114,7 @@ struct svc_rdma_fastreg_mr {
112 struct list_head frmr_list; 114 struct list_head frmr_list;
113}; 115};
114struct svc_rdma_req_map { 116struct svc_rdma_req_map {
117 struct list_head free;
115 unsigned long count; 118 unsigned long count;
116 union { 119 union {
117 struct kvec sge[RPCSVC_MAXPAGES]; 120 struct kvec sge[RPCSVC_MAXPAGES];
@@ -132,28 +135,32 @@ struct svcxprt_rdma {
132 int sc_max_sge; 135 int sc_max_sge;
133 int sc_max_sge_rd; /* max sge for read target */ 136 int sc_max_sge_rd; /* max sge for read target */
134 137
135 int sc_sq_depth; /* Depth of SQ */
136 atomic_t sc_sq_count; /* Number of SQ WR on queue */ 138 atomic_t sc_sq_count; /* Number of SQ WR on queue */
137 139 unsigned int sc_sq_depth; /* Depth of SQ */
138 int sc_max_requests; /* Depth of RQ */ 140 unsigned int sc_rq_depth; /* Depth of RQ */
141 u32 sc_max_requests; /* Forward credits */
142 u32 sc_max_bc_requests;/* Backward credits */
139 int sc_max_req_size; /* Size of each RQ WR buf */ 143 int sc_max_req_size; /* Size of each RQ WR buf */
140 144
141 struct ib_pd *sc_pd; 145 struct ib_pd *sc_pd;
142 146
143 atomic_t sc_dma_used; 147 atomic_t sc_dma_used;
144 atomic_t sc_ctxt_used; 148 spinlock_t sc_ctxt_lock;
149 struct list_head sc_ctxts;
150 int sc_ctxt_used;
151 spinlock_t sc_map_lock;
152 struct list_head sc_maps;
153
145 struct list_head sc_rq_dto_q; 154 struct list_head sc_rq_dto_q;
146 spinlock_t sc_rq_dto_lock; 155 spinlock_t sc_rq_dto_lock;
147 struct ib_qp *sc_qp; 156 struct ib_qp *sc_qp;
148 struct ib_cq *sc_rq_cq; 157 struct ib_cq *sc_rq_cq;
149 struct ib_cq *sc_sq_cq; 158 struct ib_cq *sc_sq_cq;
150 struct ib_mr *sc_phys_mr; /* MR for server memory */
151 int (*sc_reader)(struct svcxprt_rdma *, 159 int (*sc_reader)(struct svcxprt_rdma *,
152 struct svc_rqst *, 160 struct svc_rqst *,
153 struct svc_rdma_op_ctxt *, 161 struct svc_rdma_op_ctxt *,
154 int *, u32 *, u32, u32, u64, bool); 162 int *, u32 *, u32, u32, u64, bool);
155 u32 sc_dev_caps; /* distilled device caps */ 163 u32 sc_dev_caps; /* distilled device caps */
156 u32 sc_dma_lkey; /* local dma key */
157 unsigned int sc_frmr_pg_list_len; 164 unsigned int sc_frmr_pg_list_len;
158 struct list_head sc_frmr_q; 165 struct list_head sc_frmr_q;
159 spinlock_t sc_frmr_q_lock; 166 spinlock_t sc_frmr_q_lock;
@@ -179,8 +186,18 @@ struct svcxprt_rdma {
179#define RPCRDMA_MAX_REQUESTS 32 186#define RPCRDMA_MAX_REQUESTS 32
180#define RPCRDMA_MAX_REQ_SIZE 4096 187#define RPCRDMA_MAX_REQ_SIZE 4096
181 188
189/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
190 * current NFSv4.1 implementation supports one backchannel slot.
191 */
192#define RPCRDMA_MAX_BC_REQUESTS 2
193
182#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD 194#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
183 195
196/* svc_rdma_backchannel.c */
197extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
198 struct rpcrdma_msg *rmsgp,
199 struct xdr_buf *rcvbuf);
200
184/* svc_rdma_marshal.c */ 201/* svc_rdma_marshal.c */
185extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 202extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
186extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -206,6 +223,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
206 u32, u32, u64, bool); 223 u32, u32, u64, bool);
207 224
208/* svc_rdma_sendto.c */ 225/* svc_rdma_sendto.c */
226extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
227 struct svc_rdma_req_map *);
209extern int svc_rdma_sendto(struct svc_rqst *); 228extern int svc_rdma_sendto(struct svc_rqst *);
210extern struct rpcrdma_read_chunk * 229extern struct rpcrdma_read_chunk *
211 svc_rdma_get_read_chunk(struct rpcrdma_msg *); 230 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
@@ -214,13 +233,14 @@ extern struct rpcrdma_read_chunk *
214extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 233extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
215extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, 234extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
216 enum rpcrdma_errcode); 235 enum rpcrdma_errcode);
217extern int svc_rdma_post_recv(struct svcxprt_rdma *); 236extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
218extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 237extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
219extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 238extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
220extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 239extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
221extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); 240extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
222extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); 241extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *);
223extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); 242extern void svc_rdma_put_req_map(struct svcxprt_rdma *,
243 struct svc_rdma_req_map *);
224extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); 244extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
225extern void svc_rdma_put_frmr(struct svcxprt_rdma *, 245extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
226 struct svc_rdma_fastreg_mr *); 246 struct svc_rdma_fastreg_mr *);
@@ -234,6 +254,7 @@ extern struct svc_xprt_class svc_rdma_bc_class;
234#endif 254#endif
235 255
236/* svc_rdma.c */ 256/* svc_rdma.c */
257extern struct workqueue_struct *svc_rdma_wq;
237extern int svc_rdma_init(void); 258extern int svc_rdma_init(void);
238extern void svc_rdma_cleanup(void); 259extern void svc_rdma_cleanup(void);
239 260
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 11528591d0d7..c34c9002460c 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -83,6 +83,8 @@ struct rdma_dev_addr {
83 int bound_dev_if; 83 int bound_dev_if;
84 enum rdma_transport_type transport; 84 enum rdma_transport_type transport;
85 struct net *net; 85 struct net *net;
86 enum rdma_network_type network;
87 int hoplimit;
86}; 88};
87 89
88/** 90/**
@@ -91,8 +93,8 @@ struct rdma_dev_addr {
91 * 93 *
92 * The dev_addr->net field must be initialized. 94 * The dev_addr->net field must be initialized.
93 */ 95 */
94int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, 96int rdma_translate_ip(const struct sockaddr *addr,
95 u16 *vlan_id); 97 struct rdma_dev_addr *dev_addr, u16 *vlan_id);
96 98
97/** 99/**
98 * rdma_resolve_ip - Resolve source and destination IP addresses to 100 * rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -117,6 +119,10 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
117 struct rdma_dev_addr *addr, void *context), 119 struct rdma_dev_addr *addr, void *context),
118 void *context); 120 void *context);
119 121
122int rdma_resolve_ip_route(struct sockaddr *src_addr,
123 const struct sockaddr *dst_addr,
124 struct rdma_dev_addr *addr);
125
120void rdma_addr_cancel(struct rdma_dev_addr *addr); 126void rdma_addr_cancel(struct rdma_dev_addr *addr);
121 127
122int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, 128int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
@@ -125,8 +131,10 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
125int rdma_addr_size(struct sockaddr *addr); 131int rdma_addr_size(struct sockaddr *addr);
126 132
127int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); 133int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
128int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, 134int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
129 u8 *smac, u16 *vlan_id, int if_index); 135 const union ib_gid *dgid,
136 u8 *smac, u16 *vlan_id, int *if_index,
137 int *hoplimit);
130 138
131static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 139static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
132{ 140{
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 269a27cf0a46..e30f19bd4a41 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -60,6 +60,7 @@ int ib_get_cached_gid(struct ib_device *device,
60 * a specified GID value occurs. 60 * a specified GID value occurs.
61 * @device: The device to query. 61 * @device: The device to query.
62 * @gid: The GID value to search for. 62 * @gid: The GID value to search for.
63 * @gid_type: The GID type to search for.
63 * @ndev: In RoCE, the net device of the device. NULL means ignore. 64 * @ndev: In RoCE, the net device of the device. NULL means ignore.
64 * @port_num: The port number of the device where the GID value was found. 65 * @port_num: The port number of the device where the GID value was found.
65 * @index: The index into the cached GID table where the GID was found. This 66 * @index: The index into the cached GID table where the GID was found. This
@@ -70,6 +71,7 @@ int ib_get_cached_gid(struct ib_device *device,
70 */ 71 */
71int ib_find_cached_gid(struct ib_device *device, 72int ib_find_cached_gid(struct ib_device *device,
72 const union ib_gid *gid, 73 const union ib_gid *gid,
74 enum ib_gid_type gid_type,
73 struct net_device *ndev, 75 struct net_device *ndev,
74 u8 *port_num, 76 u8 *port_num,
75 u16 *index); 77 u16 *index);
@@ -79,6 +81,7 @@ int ib_find_cached_gid(struct ib_device *device,
79 * GID value occurs 81 * GID value occurs
80 * @device: The device to query. 82 * @device: The device to query.
81 * @gid: The GID value to search for. 83 * @gid: The GID value to search for.
84 * @gid_type: The GID type to search for.
82 * @port_num: The port number of the device where the GID value sould be 85 * @port_num: The port number of the device where the GID value sould be
83 * searched. 86 * searched.
84 * @ndev: In RoCE, the net device of the device. Null means ignore. 87 * @ndev: In RoCE, the net device of the device. Null means ignore.
@@ -90,6 +93,7 @@ int ib_find_cached_gid(struct ib_device *device,
90 */ 93 */
91int ib_find_cached_gid_by_port(struct ib_device *device, 94int ib_find_cached_gid_by_port(struct ib_device *device,
92 const union ib_gid *gid, 95 const union ib_gid *gid,
96 enum ib_gid_type gid_type,
93 u8 port_num, 97 u8 port_num,
94 struct net_device *ndev, 98 struct net_device *ndev,
95 u16 *index); 99 u16 *index);
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index ec9b44dd3d80..0ff049bd9ad4 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -438,6 +438,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
438/** 438/**
439 * ib_mad_recv_handler - callback handler for a received MAD. 439 * ib_mad_recv_handler - callback handler for a received MAD.
440 * @mad_agent: MAD agent requesting the received MAD. 440 * @mad_agent: MAD agent requesting the received MAD.
441 * @send_buf: Send buffer if found, else NULL
441 * @mad_recv_wc: Received work completion information on the received MAD. 442 * @mad_recv_wc: Received work completion information on the received MAD.
442 * 443 *
443 * MADs received in response to a send request operation will be handed to 444 * MADs received in response to a send request operation will be handed to
@@ -447,6 +448,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
447 * modify the data referenced by @mad_recv_wc. 448 * modify the data referenced by @mad_recv_wc.
448 */ 449 */
449typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, 450typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
451 struct ib_mad_send_buf *send_buf,
450 struct ib_mad_recv_wc *mad_recv_wc); 452 struct ib_mad_recv_wc *mad_recv_wc);
451 453
452/** 454/**
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index e99d8f9a4551..0f3daae44bf9 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -41,6 +41,8 @@ enum {
41 IB_ETH_BYTES = 14, 41 IB_ETH_BYTES = 14,
42 IB_VLAN_BYTES = 4, 42 IB_VLAN_BYTES = 4,
43 IB_GRH_BYTES = 40, 43 IB_GRH_BYTES = 40,
44 IB_IP4_BYTES = 20,
45 IB_UDP_BYTES = 8,
44 IB_BTH_BYTES = 12, 46 IB_BTH_BYTES = 12,
45 IB_DETH_BYTES = 8 47 IB_DETH_BYTES = 8
46}; 48};
@@ -223,6 +225,27 @@ struct ib_unpacked_eth {
223 __be16 type; 225 __be16 type;
224}; 226};
225 227
228struct ib_unpacked_ip4 {
229 u8 ver;
230 u8 hdr_len;
231 u8 tos;
232 __be16 tot_len;
233 __be16 id;
234 __be16 frag_off;
235 u8 ttl;
236 u8 protocol;
237 __sum16 check;
238 __be32 saddr;
239 __be32 daddr;
240};
241
242struct ib_unpacked_udp {
243 __be16 sport;
244 __be16 dport;
245 __be16 length;
246 __be16 csum;
247};
248
226struct ib_unpacked_vlan { 249struct ib_unpacked_vlan {
227 __be16 tag; 250 __be16 tag;
228 __be16 type; 251 __be16 type;
@@ -237,6 +260,10 @@ struct ib_ud_header {
237 struct ib_unpacked_vlan vlan; 260 struct ib_unpacked_vlan vlan;
238 int grh_present; 261 int grh_present;
239 struct ib_unpacked_grh grh; 262 struct ib_unpacked_grh grh;
263 int ipv4_present;
264 struct ib_unpacked_ip4 ip4;
265 int udp_present;
266 struct ib_unpacked_udp udp;
240 struct ib_unpacked_bth bth; 267 struct ib_unpacked_bth bth;
241 struct ib_unpacked_deth deth; 268 struct ib_unpacked_deth deth;
242 int immediate_present; 269 int immediate_present;
@@ -253,13 +280,17 @@ void ib_unpack(const struct ib_field *desc,
253 void *buf, 280 void *buf,
254 void *structure); 281 void *structure);
255 282
256void ib_ud_header_init(int payload_bytes, 283__sum16 ib_ud_ip4_csum(struct ib_ud_header *header);
257 int lrh_present, 284
258 int eth_present, 285int ib_ud_header_init(int payload_bytes,
259 int vlan_present, 286 int lrh_present,
260 int grh_present, 287 int eth_present,
261 int immediate_present, 288 int vlan_present,
262 struct ib_ud_header *header); 289 int grh_present,
290 int ip_version,
291 int udp_present,
292 int immediate_present,
293 struct ib_ud_header *header);
263 294
264int ib_ud_header_pack(struct ib_ud_header *header, 295int ib_ud_header_pack(struct ib_ud_header *header,
265 void *buf); 296 void *buf);
diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h
index a5889f18807b..2f8a65c1fca7 100644
--- a/include/rdma/ib_pma.h
+++ b/include/rdma/ib_pma.h
@@ -42,6 +42,7 @@
42 */ 42 */
43#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8) 43#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
44#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9) 44#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
45#define IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF cpu_to_be16(1 << 10)
45#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12) 46#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
46 47
47#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) 48#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 301969552d0a..cdc1c81aa275 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -160,6 +160,7 @@ struct ib_sa_path_rec {
160 int ifindex; 160 int ifindex;
161 /* ignored in IB */ 161 /* ignored in IB */
162 struct net *net; 162 struct net *net;
163 enum ib_gid_type gid_type;
163}; 164};
164 165
165static inline struct net_device *ib_get_ndev_from_path(struct ib_sa_path_rec *rec) 166static inline struct net_device *ib_get_ndev_from_path(struct ib_sa_path_rec *rec)
@@ -402,6 +403,8 @@ int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
402 */ 403 */
403int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, 404int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
404 struct ib_sa_mcmember_rec *rec, 405 struct ib_sa_mcmember_rec *rec,
406 struct net_device *ndev,
407 enum ib_gid_type gid_type,
405 struct ib_ah_attr *ah_attr); 408 struct ib_ah_attr *ah_attr);
406 409
407/** 410/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 120da1d7f57e..284b00c8fea4 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -49,13 +49,19 @@
49#include <linux/scatterlist.h> 49#include <linux/scatterlist.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/socket.h> 51#include <linux/socket.h>
52#include <linux/irq_poll.h>
52#include <uapi/linux/if_ether.h> 53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
53 58
54#include <linux/atomic.h> 59#include <linux/atomic.h>
55#include <linux/mmu_notifier.h> 60#include <linux/mmu_notifier.h>
56#include <asm/uaccess.h> 61#include <asm/uaccess.h>
57 62
58extern struct workqueue_struct *ib_wq; 63extern struct workqueue_struct *ib_wq;
64extern struct workqueue_struct *ib_comp_wq;
59 65
60union ib_gid { 66union ib_gid {
61 u8 raw[16]; 67 u8 raw[16];
@@ -67,7 +73,17 @@ union ib_gid {
67 73
68extern union ib_gid zgid; 74extern union ib_gid zgid;
69 75
76enum ib_gid_type {
77 /* If link layer is Ethernet, this is RoCE V1 */
78 IB_GID_TYPE_IB = 0,
79 IB_GID_TYPE_ROCE = 0,
80 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
81 IB_GID_TYPE_SIZE
82};
83
84#define ROCE_V2_UDP_DPORT 4791
70struct ib_gid_attr { 85struct ib_gid_attr {
86 enum ib_gid_type gid_type;
71 struct net_device *ndev; 87 struct net_device *ndev;
72}; 88};
73 89
@@ -98,6 +114,35 @@ enum rdma_protocol_type {
98__attribute_const__ enum rdma_transport_type 114__attribute_const__ enum rdma_transport_type
99rdma_node_get_transport(enum rdma_node_type node_type); 115rdma_node_get_transport(enum rdma_node_type node_type);
100 116
117enum rdma_network_type {
118 RDMA_NETWORK_IB,
119 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
120 RDMA_NETWORK_IPV4,
121 RDMA_NETWORK_IPV6
122};
123
124static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
125{
126 if (network_type == RDMA_NETWORK_IPV4 ||
127 network_type == RDMA_NETWORK_IPV6)
128 return IB_GID_TYPE_ROCE_UDP_ENCAP;
129
130 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
131 return IB_GID_TYPE_IB;
132}
133
134static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
135 union ib_gid *gid)
136{
137 if (gid_type == IB_GID_TYPE_IB)
138 return RDMA_NETWORK_IB;
139
140 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
141 return RDMA_NETWORK_IPV4;
142 else
143 return RDMA_NETWORK_IPV6;
144}
145
101enum rdma_link_layer { 146enum rdma_link_layer {
102 IB_LINK_LAYER_UNSPECIFIED, 147 IB_LINK_LAYER_UNSPECIFIED,
103 IB_LINK_LAYER_INFINIBAND, 148 IB_LINK_LAYER_INFINIBAND,
@@ -105,24 +150,32 @@ enum rdma_link_layer {
105}; 150};
106 151
107enum ib_device_cap_flags { 152enum ib_device_cap_flags {
108 IB_DEVICE_RESIZE_MAX_WR = 1, 153 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
109 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 154 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
110 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 155 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
111 IB_DEVICE_RAW_MULTI = (1<<3), 156 IB_DEVICE_RAW_MULTI = (1 << 3),
112 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 157 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
113 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 158 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
114 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 159 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
115 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 160 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
116 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 161 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
117 IB_DEVICE_INIT_TYPE = (1<<9), 162 IB_DEVICE_INIT_TYPE = (1 << 9),
118 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 163 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
119 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 164 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
120 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 165 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
121 IB_DEVICE_SRQ_RESIZE = (1<<13), 166 IB_DEVICE_SRQ_RESIZE = (1 << 13),
122 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 167 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
123 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15), 168
124 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ 169 /*
125 IB_DEVICE_MEM_WINDOW = (1<<17), 170 * This device supports a per-device lkey or stag that can be
171 * used without performing a memory registration for the local
172 * memory. Note that ULPs should never check this flag, but
173 * instead of use the local_dma_lkey flag in the ib_pd structure,
174 * which will always contain a usable lkey.
175 */
176 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
177 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
178 IB_DEVICE_MEM_WINDOW = (1 << 17),
126 /* 179 /*
127 * Devices should set IB_DEVICE_UD_IP_SUM if they support 180 * Devices should set IB_DEVICE_UD_IP_SUM if they support
128 * insertion of UDP and TCP checksum on outgoing UD IPoIB 181 * insertion of UDP and TCP checksum on outgoing UD IPoIB
@@ -130,18 +183,35 @@ enum ib_device_cap_flags {
130 * incoming messages. Setting this flag implies that the 183 * incoming messages. Setting this flag implies that the
131 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 184 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
132 */ 185 */
133 IB_DEVICE_UD_IP_CSUM = (1<<18), 186 IB_DEVICE_UD_IP_CSUM = (1 << 18),
134 IB_DEVICE_UD_TSO = (1<<19), 187 IB_DEVICE_UD_TSO = (1 << 19),
135 IB_DEVICE_XRC = (1<<20), 188 IB_DEVICE_XRC = (1 << 20),
136 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), 189
137 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 190 /*
138 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), 191 * This device supports the IB "base memory management extension",
139 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), 192 * which includes support for fast registrations (IB_WR_REG_MR,
140 IB_DEVICE_RC_IP_CSUM = (1<<25), 193 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
141 IB_DEVICE_RAW_IP_CSUM = (1<<26), 194 * also be set by any iWarp device which must support FRs to comply
142 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), 195 * to the iWarp verbs spec. iWarp devices also support the
143 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), 196 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
144 IB_DEVICE_ON_DEMAND_PAGING = (1<<31), 197 * stag.
198 */
199 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
200 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
201 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
202 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
203 IB_DEVICE_RC_IP_CSUM = (1 << 25),
204 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
205 /*
206 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
207 * support execution of WQEs that involve synchronization
208 * of I/O operations with single completion queue managed
209 * by hardware.
210 */
211 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
212 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
213 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
214 IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
145}; 215};
146 216
147enum ib_signature_prot_cap { 217enum ib_signature_prot_cap {
@@ -184,6 +254,7 @@ struct ib_odp_caps {
184 254
185enum ib_cq_creation_flags { 255enum ib_cq_creation_flags {
186 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 256 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
257 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
187}; 258};
188 259
189struct ib_cq_init_attr { 260struct ib_cq_init_attr {
@@ -393,6 +464,7 @@ union rdma_protocol_stats {
393#define RDMA_CORE_CAP_PROT_IB 0x00100000 464#define RDMA_CORE_CAP_PROT_IB 0x00100000
394#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 465#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
395#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 466#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
467#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
396 468
397#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 469#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
398 | RDMA_CORE_CAP_IB_MAD \ 470 | RDMA_CORE_CAP_IB_MAD \
@@ -405,6 +477,12 @@ union rdma_protocol_stats {
405 | RDMA_CORE_CAP_IB_CM \ 477 | RDMA_CORE_CAP_IB_CM \
406 | RDMA_CORE_CAP_AF_IB \ 478 | RDMA_CORE_CAP_AF_IB \
407 | RDMA_CORE_CAP_ETH_AH) 479 | RDMA_CORE_CAP_ETH_AH)
480#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
481 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
482 | RDMA_CORE_CAP_IB_MAD \
483 | RDMA_CORE_CAP_IB_CM \
484 | RDMA_CORE_CAP_AF_IB \
485 | RDMA_CORE_CAP_ETH_AH)
408#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 486#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
409 | RDMA_CORE_CAP_IW_CM) 487 | RDMA_CORE_CAP_IW_CM)
410#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 488#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
@@ -519,6 +597,17 @@ struct ib_grh {
519 union ib_gid dgid; 597 union ib_gid dgid;
520}; 598};
521 599
600union rdma_network_hdr {
601 struct ib_grh ibgrh;
602 struct {
603 /* The IB spec states that if it's IPv4, the header
604 * is located in the last 20 bytes of the header.
605 */
606 u8 reserved[20];
607 struct iphdr roce4grh;
608 };
609};
610
522enum { 611enum {
523 IB_MULTICAST_QPN = 0xffffff 612 IB_MULTICAST_QPN = 0xffffff
524}; 613};
@@ -734,7 +823,6 @@ enum ib_wc_opcode {
734 IB_WC_RDMA_READ, 823 IB_WC_RDMA_READ,
735 IB_WC_COMP_SWAP, 824 IB_WC_COMP_SWAP,
736 IB_WC_FETCH_ADD, 825 IB_WC_FETCH_ADD,
737 IB_WC_BIND_MW,
738 IB_WC_LSO, 826 IB_WC_LSO,
739 IB_WC_LOCAL_INV, 827 IB_WC_LOCAL_INV,
740 IB_WC_REG_MR, 828 IB_WC_REG_MR,
@@ -755,10 +843,14 @@ enum ib_wc_flags {
755 IB_WC_IP_CSUM_OK = (1<<3), 843 IB_WC_IP_CSUM_OK = (1<<3),
756 IB_WC_WITH_SMAC = (1<<4), 844 IB_WC_WITH_SMAC = (1<<4),
757 IB_WC_WITH_VLAN = (1<<5), 845 IB_WC_WITH_VLAN = (1<<5),
846 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
758}; 847};
759 848
760struct ib_wc { 849struct ib_wc {
761 u64 wr_id; 850 union {
851 u64 wr_id;
852 struct ib_cqe *wr_cqe;
853 };
762 enum ib_wc_status status; 854 enum ib_wc_status status;
763 enum ib_wc_opcode opcode; 855 enum ib_wc_opcode opcode;
764 u32 vendor_err; 856 u32 vendor_err;
@@ -777,6 +869,7 @@ struct ib_wc {
777 u8 port_num; /* valid only for DR SMPs on switches */ 869 u8 port_num; /* valid only for DR SMPs on switches */
778 u8 smac[ETH_ALEN]; 870 u8 smac[ETH_ALEN];
779 u16 vlan_id; 871 u16 vlan_id;
872 u8 network_hdr_type;
780}; 873};
781 874
782enum ib_cq_notify_flags { 875enum ib_cq_notify_flags {
@@ -866,6 +959,9 @@ enum ib_qp_type {
866enum ib_qp_create_flags { 959enum ib_qp_create_flags {
867 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 960 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
868 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 961 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
962 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
963 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
964 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
869 IB_QP_CREATE_NETIF_QP = 1 << 5, 965 IB_QP_CREATE_NETIF_QP = 1 << 5,
870 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 966 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
871 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 967 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
@@ -1027,7 +1123,6 @@ enum ib_wr_opcode {
1027 IB_WR_REG_MR, 1123 IB_WR_REG_MR,
1028 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1124 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1029 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1125 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1030 IB_WR_BIND_MW,
1031 IB_WR_REG_SIG_MR, 1126 IB_WR_REG_SIG_MR,
1032 /* reserve values for low level drivers' internal use. 1127 /* reserve values for low level drivers' internal use.
1033 * These values will not be used at all in the ib core layer. 1128 * These values will not be used at all in the ib core layer.
@@ -1062,26 +1157,16 @@ struct ib_sge {
1062 u32 lkey; 1157 u32 lkey;
1063}; 1158};
1064 1159
1065/** 1160struct ib_cqe {
1066 * struct ib_mw_bind_info - Parameters for a memory window bind operation. 1161 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1067 * @mr: A memory region to bind the memory window to.
1068 * @addr: The address where the memory window should begin.
1069 * @length: The length of the memory window, in bytes.
1070 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1071 *
1072 * This struct contains the shared parameters for type 1 and type 2
1073 * memory window bind operations.
1074 */
1075struct ib_mw_bind_info {
1076 struct ib_mr *mr;
1077 u64 addr;
1078 u64 length;
1079 int mw_access_flags;
1080}; 1162};
1081 1163
1082struct ib_send_wr { 1164struct ib_send_wr {
1083 struct ib_send_wr *next; 1165 struct ib_send_wr *next;
1084 u64 wr_id; 1166 union {
1167 u64 wr_id;
1168 struct ib_cqe *wr_cqe;
1169 };
1085 struct ib_sge *sg_list; 1170 struct ib_sge *sg_list;
1086 int num_sge; 1171 int num_sge;
1087 enum ib_wr_opcode opcode; 1172 enum ib_wr_opcode opcode;
@@ -1147,19 +1232,6 @@ static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1147 return container_of(wr, struct ib_reg_wr, wr); 1232 return container_of(wr, struct ib_reg_wr, wr);
1148} 1233}
1149 1234
1150struct ib_bind_mw_wr {
1151 struct ib_send_wr wr;
1152 struct ib_mw *mw;
1153 /* The new rkey for the memory window. */
1154 u32 rkey;
1155 struct ib_mw_bind_info bind_info;
1156};
1157
1158static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr)
1159{
1160 return container_of(wr, struct ib_bind_mw_wr, wr);
1161}
1162
1163struct ib_sig_handover_wr { 1235struct ib_sig_handover_wr {
1164 struct ib_send_wr wr; 1236 struct ib_send_wr wr;
1165 struct ib_sig_attrs *sig_attrs; 1237 struct ib_sig_attrs *sig_attrs;
@@ -1175,7 +1247,10 @@ static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1175 1247
1176struct ib_recv_wr { 1248struct ib_recv_wr {
1177 struct ib_recv_wr *next; 1249 struct ib_recv_wr *next;
1178 u64 wr_id; 1250 union {
1251 u64 wr_id;
1252 struct ib_cqe *wr_cqe;
1253 };
1179 struct ib_sge *sg_list; 1254 struct ib_sge *sg_list;
1180 int num_sge; 1255 int num_sge;
1181}; 1256};
@@ -1190,20 +1265,10 @@ enum ib_access_flags {
1190 IB_ACCESS_ON_DEMAND = (1<<6), 1265 IB_ACCESS_ON_DEMAND = (1<<6),
1191}; 1266};
1192 1267
1193struct ib_phys_buf { 1268/*
1194 u64 addr; 1269 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1195 u64 size; 1270 * are hidden here instead of a uapi header!
1196}; 1271 */
1197
1198struct ib_mr_attr {
1199 struct ib_pd *pd;
1200 u64 device_virt_addr;
1201 u64 size;
1202 int mr_access_flags;
1203 u32 lkey;
1204 u32 rkey;
1205};
1206
1207enum ib_mr_rereg_flags { 1272enum ib_mr_rereg_flags {
1208 IB_MR_REREG_TRANS = 1, 1273 IB_MR_REREG_TRANS = 1,
1209 IB_MR_REREG_PD = (1<<1), 1274 IB_MR_REREG_PD = (1<<1),
@@ -1211,18 +1276,6 @@ enum ib_mr_rereg_flags {
1211 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1276 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1212}; 1277};
1213 1278
1214/**
1215 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1216 * @wr_id: Work request id.
1217 * @send_flags: Flags from ib_send_flags enum.
1218 * @bind_info: More parameters of the bind operation.
1219 */
1220struct ib_mw_bind {
1221 u64 wr_id;
1222 int send_flags;
1223 struct ib_mw_bind_info bind_info;
1224};
1225
1226struct ib_fmr_attr { 1279struct ib_fmr_attr {
1227 int max_pages; 1280 int max_pages;
1228 int max_maps; 1281 int max_maps;
@@ -1307,6 +1360,12 @@ struct ib_ah {
1307 1360
1308typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1361typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1309 1362
1363enum ib_poll_context {
1364 IB_POLL_DIRECT, /* caller context, no hw completions */
1365 IB_POLL_SOFTIRQ, /* poll from softirq context */
1366 IB_POLL_WORKQUEUE, /* poll from workqueue */
1367};
1368
1310struct ib_cq { 1369struct ib_cq {
1311 struct ib_device *device; 1370 struct ib_device *device;
1312 struct ib_uobject *uobject; 1371 struct ib_uobject *uobject;
@@ -1315,6 +1374,12 @@ struct ib_cq {
1315 void *cq_context; 1374 void *cq_context;
1316 int cqe; 1375 int cqe;
1317 atomic_t usecnt; /* count number of work queues */ 1376 atomic_t usecnt; /* count number of work queues */
1377 enum ib_poll_context poll_ctx;
1378 struct ib_wc *wc;
1379 union {
1380 struct irq_poll iop;
1381 struct work_struct work;
1382 };
1318}; 1383};
1319 1384
1320struct ib_srq { 1385struct ib_srq {
@@ -1363,7 +1428,6 @@ struct ib_mr {
1363 u64 iova; 1428 u64 iova;
1364 u32 length; 1429 u32 length;
1365 unsigned int page_size; 1430 unsigned int page_size;
1366 atomic_t usecnt; /* count number of MWs */
1367}; 1431};
1368 1432
1369struct ib_mw { 1433struct ib_mw {
@@ -1724,11 +1788,6 @@ struct ib_device {
1724 int wc_cnt); 1788 int wc_cnt);
1725 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1789 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1726 int mr_access_flags); 1790 int mr_access_flags);
1727 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1728 struct ib_phys_buf *phys_buf_array,
1729 int num_phys_buf,
1730 int mr_access_flags,
1731 u64 *iova_start);
1732 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1791 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1733 u64 start, u64 length, 1792 u64 start, u64 length,
1734 u64 virt_addr, 1793 u64 virt_addr,
@@ -1741,8 +1800,6 @@ struct ib_device {
1741 int mr_access_flags, 1800 int mr_access_flags,
1742 struct ib_pd *pd, 1801 struct ib_pd *pd,
1743 struct ib_udata *udata); 1802 struct ib_udata *udata);
1744 int (*query_mr)(struct ib_mr *mr,
1745 struct ib_mr_attr *mr_attr);
1746 int (*dereg_mr)(struct ib_mr *mr); 1803 int (*dereg_mr)(struct ib_mr *mr);
1747 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 1804 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
1748 enum ib_mr_type mr_type, 1805 enum ib_mr_type mr_type,
@@ -1750,18 +1807,8 @@ struct ib_device {
1750 int (*map_mr_sg)(struct ib_mr *mr, 1807 int (*map_mr_sg)(struct ib_mr *mr,
1751 struct scatterlist *sg, 1808 struct scatterlist *sg,
1752 int sg_nents); 1809 int sg_nents);
1753 int (*rereg_phys_mr)(struct ib_mr *mr,
1754 int mr_rereg_mask,
1755 struct ib_pd *pd,
1756 struct ib_phys_buf *phys_buf_array,
1757 int num_phys_buf,
1758 int mr_access_flags,
1759 u64 *iova_start);
1760 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 1810 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1761 enum ib_mw_type type); 1811 enum ib_mw_type type);
1762 int (*bind_mw)(struct ib_qp *qp,
1763 struct ib_mw *mw,
1764 struct ib_mw_bind *mw_bind);
1765 int (*dealloc_mw)(struct ib_mw *mw); 1812 int (*dealloc_mw)(struct ib_mw *mw);
1766 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1813 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1767 int mr_access_flags, 1814 int mr_access_flags,
@@ -1823,6 +1870,7 @@ struct ib_device {
1823 u16 is_switch:1; 1870 u16 is_switch:1;
1824 u8 node_type; 1871 u8 node_type;
1825 u8 phys_port_cnt; 1872 u8 phys_port_cnt;
1873 struct ib_device_attr attrs;
1826 1874
1827 /** 1875 /**
1828 * The following mandatory functions are used only at device 1876 * The following mandatory functions are used only at device
@@ -1888,6 +1936,31 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
1888 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1936 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1889} 1937}
1890 1938
1939static inline bool ib_is_udata_cleared(struct ib_udata *udata,
1940 size_t offset,
1941 size_t len)
1942{
1943 const void __user *p = udata->inbuf + offset;
1944 bool ret = false;
1945 u8 *buf;
1946
1947 if (len > USHRT_MAX)
1948 return false;
1949
1950 buf = kmalloc(len, GFP_KERNEL);
1951 if (!buf)
1952 return false;
1953
1954 if (copy_from_user(buf, p, len))
1955 goto free;
1956
1957 ret = !memchr_inv(buf, 0, len);
1958
1959free:
1960 kfree(buf);
1961 return ret;
1962}
1963
1891/** 1964/**
1892 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1965 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1893 * contains all required attributes and no attributes not allowed for 1966 * contains all required attributes and no attributes not allowed for
@@ -1912,9 +1985,6 @@ int ib_register_event_handler (struct ib_event_handler *event_handler);
1912int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1985int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1913void ib_dispatch_event(struct ib_event *event); 1986void ib_dispatch_event(struct ib_event *event);
1914 1987
1915int ib_query_device(struct ib_device *device,
1916 struct ib_device_attr *device_attr);
1917
1918int ib_query_port(struct ib_device *device, 1988int ib_query_port(struct ib_device *device,
1919 u8 port_num, struct ib_port_attr *port_attr); 1989 u8 port_num, struct ib_port_attr *port_attr);
1920 1990
@@ -1968,6 +2038,17 @@ static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
1968 2038
1969static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2039static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
1970{ 2040{
2041 return device->port_immutable[port_num].core_cap_flags &
2042 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2043}
2044
2045static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2046{
2047 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2048}
2049
2050static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2051{
1971 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2052 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
1972} 2053}
1973 2054
@@ -1978,8 +2059,8 @@ static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_n
1978 2059
1979static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2060static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
1980{ 2061{
1981 return device->port_immutable[port_num].core_cap_flags & 2062 return rdma_protocol_ib(device, port_num) ||
1982 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); 2063 rdma_protocol_roce(device, port_num);
1983} 2064}
1984 2065
1985/** 2066/**
@@ -2220,7 +2301,8 @@ int ib_modify_port(struct ib_device *device,
2220 struct ib_port_modify *port_modify); 2301 struct ib_port_modify *port_modify);
2221 2302
2222int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2303int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2223 struct net_device *ndev, u8 *port_num, u16 *index); 2304 enum ib_gid_type gid_type, struct net_device *ndev,
2305 u8 *port_num, u16 *index);
2224 2306
2225int ib_find_pkey(struct ib_device *device, 2307int ib_find_pkey(struct ib_device *device,
2226 u8 port_num, u16 pkey, u16 *index); 2308 u8 port_num, u16 pkey, u16 *index);
@@ -2454,6 +2536,11 @@ static inline int ib_post_recv(struct ib_qp *qp,
2454 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2536 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2455} 2537}
2456 2538
2539struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2540 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2541void ib_free_cq(struct ib_cq *cq);
2542int ib_process_cq_direct(struct ib_cq *cq, int budget);
2543
2457/** 2544/**
2458 * ib_create_cq - Creates a CQ on the specified device. 2545 * ib_create_cq - Creates a CQ on the specified device.
2459 * @device: The device on which to create the CQ. 2546 * @device: The device on which to create the CQ.
@@ -2839,13 +2926,6 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
2839} 2926}
2840 2927
2841/** 2928/**
2842 * ib_query_mr - Retrieves information about a specific memory region.
2843 * @mr: The memory region to retrieve information about.
2844 * @mr_attr: The attributes of the specified memory region.
2845 */
2846int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2847
2848/**
2849 * ib_dereg_mr - Deregisters a memory region and removes it from the 2929 * ib_dereg_mr - Deregisters a memory region and removes it from the
2850 * HCA translation table. 2930 * HCA translation table.
2851 * @mr: The memory region to deregister. 2931 * @mr: The memory region to deregister.
@@ -2882,42 +2962,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
2882} 2962}
2883 2963
2884/** 2964/**
2885 * ib_alloc_mw - Allocates a memory window.
2886 * @pd: The protection domain associated with the memory window.
2887 * @type: The type of the memory window (1 or 2).
2888 */
2889struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2890
2891/**
2892 * ib_bind_mw - Posts a work request to the send queue of the specified
2893 * QP, which binds the memory window to the given address range and
2894 * remote access attributes.
2895 * @qp: QP to post the bind work request on.
2896 * @mw: The memory window to bind.
2897 * @mw_bind: Specifies information about the memory window, including
2898 * its address range, remote access rights, and associated memory region.
2899 *
2900 * If there is no immediate error, the function will update the rkey member
2901 * of the mw parameter to its new value. The bind operation can still fail
2902 * asynchronously.
2903 */
2904static inline int ib_bind_mw(struct ib_qp *qp,
2905 struct ib_mw *mw,
2906 struct ib_mw_bind *mw_bind)
2907{
2908 /* XXX reference counting in corresponding MR? */
2909 return mw->device->bind_mw ?
2910 mw->device->bind_mw(qp, mw, mw_bind) :
2911 -ENOSYS;
2912}
2913
2914/**
2915 * ib_dealloc_mw - Deallocates a memory window.
2916 * @mw: The memory window to deallocate.
2917 */
2918int ib_dealloc_mw(struct ib_mw *mw);
2919
2920/**
2921 * ib_alloc_fmr - Allocates a unmapped fast memory region. 2965 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2922 * @pd: The protection domain associated with the unmapped region. 2966 * @pd: The protection domain associated with the unmapped region.
2923 * @mr_access_flags: Specifies the memory access rights. 2967 * @mr_access_flags: Specifies the memory access rights.
diff --git a/include/scsi/iser.h b/include/scsi/iser.h
new file mode 100644
index 000000000000..2e678fa74eca
--- /dev/null
+++ b/include/scsi/iser.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef ISCSI_ISER_H
33#define ISCSI_ISER_H
34
35#define ISER_ZBVA_NOT_SUP 0x80
36#define ISER_SEND_W_INV_NOT_SUP 0x40
37#define ISERT_ZBVA_NOT_USED 0x80
38#define ISERT_SEND_W_INV_NOT_USED 0x40
39
40#define ISCSI_CTRL 0x10
41#define ISER_HELLO 0x20
42#define ISER_HELLORPLY 0x30
43
44#define ISER_VER 0x10
45#define ISER_WSV 0x08
46#define ISER_RSV 0x04
47
48/**
49 * struct iser_cm_hdr - iSER CM header (from iSER Annex A12)
50 *
51 * @flags: flags support (zbva, send_w_inv)
52 * @rsvd: reserved
53 */
54struct iser_cm_hdr {
55 u8 flags;
56 u8 rsvd[3];
57} __packed;
58
59/**
60 * struct iser_ctrl - iSER header of iSCSI control PDU
61 *
62 * @flags: opcode and read/write valid bits
63 * @rsvd: reserved
64 * @write_stag: write rkey
65 * @write_va: write virtual address
66 * @reaf_stag: read rkey
67 * @read_va: read virtual address
68 */
69struct iser_ctrl {
70 u8 flags;
71 u8 rsvd[3];
72 __be32 write_stag;
73 __be64 write_va;
74 __be32 read_stag;
75 __be64 read_va;
76} __packed;
77
78#endif /* ISCSI_ISER_H */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ff8f6c091a15..f95f25e786ef 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -15,7 +15,7 @@ struct softirq_action;
15 softirq_name(NET_TX) \ 15 softirq_name(NET_TX) \
16 softirq_name(NET_RX) \ 16 softirq_name(NET_RX) \
17 softirq_name(BLOCK) \ 17 softirq_name(BLOCK) \
18 softirq_name(BLOCK_IOPOLL) \ 18 softirq_name(IRQ_POLL) \
19 softirq_name(TASKLET) \ 19 softirq_name(TASKLET) \
20 softirq_name(SCHED) \ 20 softirq_name(SCHED) \
21 softirq_name(HRTIMER) \ 21 softirq_name(HRTIMER) \