summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-iopoll.h46
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq_poll.h25
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h15
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/linux/mlx5/device.h40
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h48
-rw-r--r--include/linux/mlx5/qp.h46
-rw-r--r--include/linux/mlx5/transobj.h78
-rw-r--r--include/linux/mlx5/vport.h8
-rw-r--r--include/linux/sunrpc/svc_rdma.h39
13 files changed, 285 insertions, 100 deletions
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
deleted file mode 100644
index 77ae77c0b704..000000000000
--- a/include/linux/blk-iopoll.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef BLK_IOPOLL_H
2#define BLK_IOPOLL_H
3
4struct blk_iopoll;
5typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);
6
7struct blk_iopoll {
8 struct list_head list;
9 unsigned long state;
10 unsigned long data;
11 int weight;
12 int max;
13 blk_iopoll_fn *poll;
14};
15
16enum {
17 IOPOLL_F_SCHED = 0,
18 IOPOLL_F_DISABLE = 1,
19};
20
21/*
22 * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating
23 * that we were the first to acquire this iop for scheduling. If this iop
24 * is currently disabled, return "failure".
25 */
26static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)
27{
28 if (!test_bit(IOPOLL_F_DISABLE, &iop->state))
29 return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);
30
31 return 1;
32}
33
34static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)
35{
36 return test_bit(IOPOLL_F_DISABLE, &iop->state);
37}
38
39extern void blk_iopoll_sched(struct blk_iopoll *);
40extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);
41extern void blk_iopoll_complete(struct blk_iopoll *);
42extern void __blk_iopoll_complete(struct blk_iopoll *);
43extern void blk_iopoll_enable(struct blk_iopoll *);
44extern void blk_iopoll_disable(struct blk_iopoll *);
45
46#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index cb30edbfe9fc..0e95fcc75b2a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,7 @@ enum
413 NET_TX_SOFTIRQ, 413 NET_TX_SOFTIRQ,
414 NET_RX_SOFTIRQ, 414 NET_RX_SOFTIRQ,
415 BLOCK_SOFTIRQ, 415 BLOCK_SOFTIRQ,
416 BLOCK_IOPOLL_SOFTIRQ, 416 IRQ_POLL_SOFTIRQ,
417 TASKLET_SOFTIRQ, 417 TASKLET_SOFTIRQ,
418 SCHED_SOFTIRQ, 418 SCHED_SOFTIRQ,
419 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the 419 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h
new file mode 100644
index 000000000000..3e8c1b8fb9be
--- /dev/null
+++ b/include/linux/irq_poll.h
@@ -0,0 +1,25 @@
1#ifndef IRQ_POLL_H
2#define IRQ_POLL_H
3
4struct irq_poll;
5typedef int (irq_poll_fn)(struct irq_poll *, int);
6
7struct irq_poll {
8 struct list_head list;
9 unsigned long state;
10 int weight;
11 irq_poll_fn *poll;
12};
13
14enum {
15 IRQ_POLL_F_SCHED = 0,
16 IRQ_POLL_F_DISABLE = 1,
17};
18
19extern void irq_poll_sched(struct irq_poll *);
20extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *);
21extern void irq_poll_complete(struct irq_poll *);
22extern void irq_poll_enable(struct irq_poll *);
23extern void irq_poll_disable(struct irq_poll *);
24
25#endif
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 58391f2e0414..116b284bc4ce 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -206,7 +206,8 @@ enum {
206 MLX4_SET_PORT_GID_TABLE = 0x5, 206 MLX4_SET_PORT_GID_TABLE = 0x5,
207 MLX4_SET_PORT_PRIO2TC = 0x8, 207 MLX4_SET_PORT_PRIO2TC = 0x8,
208 MLX4_SET_PORT_SCHEDULER = 0x9, 208 MLX4_SET_PORT_SCHEDULER = 0x9,
209 MLX4_SET_PORT_VXLAN = 0xB 209 MLX4_SET_PORT_VXLAN = 0xB,
210 MLX4_SET_PORT_ROCE_ADDR = 0xD
210}; 211};
211 212
212enum { 213enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d3133be12d92..430a929f048b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -216,6 +216,7 @@ enum {
216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, 216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
217 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, 217 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
218 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, 218 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
219 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
219}; 220};
220 221
221enum { 222enum {
@@ -267,12 +268,14 @@ enum {
267 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 268 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
268 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 269 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
269 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 270 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
271 MLX4_BMME_FLAG_ROCE_V1_V2 = 1 << 19,
270 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24, 272 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
271 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, 273 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
272}; 274};
273 275
274enum { 276enum {
275 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP 277 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP,
278 MLX4_FLAG_ROCE_V1_V2 = MLX4_BMME_FLAG_ROCE_V1_V2
276}; 279};
277 280
278enum mlx4_event { 281enum mlx4_event {
@@ -979,14 +982,11 @@ struct mlx4_mad_ifc {
979 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 982 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
980 if ((type) == (dev)->caps.port_mask[(port)]) 983 if ((type) == (dev)->caps.port_mask[(port)])
981 984
982#define mlx4_foreach_non_ib_transport_port(port, dev) \
983 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
984 if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
985
986#define mlx4_foreach_ib_transport_port(port, dev) \ 985#define mlx4_foreach_ib_transport_port(port, dev) \
987 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 986 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
988 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ 987 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
989 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 988 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
989 ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
990 990
991#define MLX4_INVALID_SLAVE_ID 0xFF 991#define MLX4_INVALID_SLAVE_ID 0xFF
992#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1) 992#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
@@ -1457,6 +1457,7 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1457 1457
1458int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); 1458int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1459int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis); 1459int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1460int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port);
1460int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2); 1461int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1461int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); 1462int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1462int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); 1463int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index fe052e234906..587cdf943b52 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -194,7 +194,7 @@ struct mlx4_qp_context {
194 u8 mtu_msgmax; 194 u8 mtu_msgmax;
195 u8 rq_size_stride; 195 u8 rq_size_stride;
196 u8 sq_size_stride; 196 u8 sq_size_stride;
197 u8 rlkey; 197 u8 rlkey_roce_mode;
198 __be32 usr_page; 198 __be32 usr_page;
199 __be32 local_qpn; 199 __be32 local_qpn;
200 __be32 remote_qpn; 200 __be32 remote_qpn;
@@ -204,7 +204,8 @@ struct mlx4_qp_context {
204 u32 reserved1; 204 u32 reserved1;
205 __be32 next_send_psn; 205 __be32 next_send_psn;
206 __be32 cqn_send; 206 __be32 cqn_send;
207 u32 reserved2[2]; 207 __be16 roce_entropy;
208 __be16 reserved2[3];
208 __be32 last_acked_psn; 209 __be32 last_acked_psn;
209 __be32 ssn; 210 __be32 ssn;
210 __be32 params2; 211 __be32 params2;
@@ -487,4 +488,14 @@ static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
487 488
488void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp); 489void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
489 490
491static inline u16 folded_qp(u32 q)
492{
493 u16 res;
494
495 res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
496 return res;
497}
498
499u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
500
490#endif /* MLX4_QP_H */ 501#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 7be845e30689..987764afa65c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -223,6 +223,14 @@ enum {
223#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 223#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
224#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 224#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
225 225
226#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
227
228enum {
229 MLX5_EVENT_QUEUE_TYPE_QP = 0,
230 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
231 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
232};
233
226enum mlx5_event { 234enum mlx5_event {
227 MLX5_EVENT_TYPE_COMP = 0x0, 235 MLX5_EVENT_TYPE_COMP = 0x0,
228 236
@@ -280,6 +288,26 @@ enum {
280}; 288};
281 289
282enum { 290enum {
291 MLX5_ROCE_VERSION_1 = 0,
292 MLX5_ROCE_VERSION_2 = 2,
293};
294
295enum {
296 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
297 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
298};
299
300enum {
301 MLX5_ROCE_L3_TYPE_IPV4 = 0,
302 MLX5_ROCE_L3_TYPE_IPV6 = 1,
303};
304
305enum {
306 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
307 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
308};
309
310enum {
283 MLX5_OPCODE_NOP = 0x00, 311 MLX5_OPCODE_NOP = 0x00,
284 MLX5_OPCODE_SEND_INVAL = 0x01, 312 MLX5_OPCODE_SEND_INVAL = 0x01,
285 MLX5_OPCODE_RDMA_WRITE = 0x08, 313 MLX5_OPCODE_RDMA_WRITE = 0x08,
@@ -446,7 +474,7 @@ struct mlx5_init_seg {
446 __be32 rsvd2[880]; 474 __be32 rsvd2[880];
447 __be32 internal_timer_h; 475 __be32 internal_timer_h;
448 __be32 internal_timer_l; 476 __be32 internal_timer_l;
449 __be32 rsrv3[2]; 477 __be32 rsvd3[2];
450 __be32 health_counter; 478 __be32 health_counter;
451 __be32 rsvd4[1019]; 479 __be32 rsvd4[1019];
452 __be64 ieee1588_clk; 480 __be64 ieee1588_clk;
@@ -460,7 +488,9 @@ struct mlx5_eqe_comp {
460}; 488};
461 489
462struct mlx5_eqe_qp_srq { 490struct mlx5_eqe_qp_srq {
463 __be32 reserved[6]; 491 __be32 reserved1[5];
492 u8 type;
493 u8 reserved2[3];
464 __be32 qp_srq_n; 494 __be32 qp_srq_n;
465}; 495};
466 496
@@ -651,6 +681,12 @@ enum {
651}; 681};
652 682
653enum { 683enum {
684 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
685 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
686 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
687};
688
689enum {
654 CQE_L2_OK = 1 << 0, 690 CQE_L2_OK = 1 << 0,
655 CQE_L3_OK = 1 << 1, 691 CQE_L3_OK = 1 << 1,
656 CQE_L4_OK = 1 << 2, 692 CQE_L4_OK = 1 << 2,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5162f3533042..1e3006dcf35d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -115,6 +115,11 @@ enum {
115 MLX5_REG_HOST_ENDIANNESS = 0x7004, 115 MLX5_REG_HOST_ENDIANNESS = 0x7004,
116}; 116};
117 117
118enum {
119 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
120 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
121};
122
118enum mlx5_page_fault_resume_flags { 123enum mlx5_page_fault_resume_flags {
119 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, 124 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
120 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, 125 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
@@ -341,9 +346,11 @@ struct mlx5_core_mr {
341}; 346};
342 347
343enum mlx5_res_type { 348enum mlx5_res_type {
344 MLX5_RES_QP, 349 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
345 MLX5_RES_SRQ, 350 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
346 MLX5_RES_XSRQ, 351 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
352 MLX5_RES_SRQ = 3,
353 MLX5_RES_XSRQ = 4,
347}; 354};
348 355
349struct mlx5_core_rsc_common { 356struct mlx5_core_rsc_common {
@@ -651,13 +658,6 @@ extern struct workqueue_struct *mlx5_core_wq;
651 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 658 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
652 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field 659 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
653 660
654struct ib_field {
655 size_t struct_offset_bytes;
656 size_t struct_size_bytes;
657 int offset_bits;
658 int size_bits;
659};
660
661static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) 661static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
662{ 662{
663 return pci_get_drvdata(pdev); 663 return pci_get_drvdata(pdev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 68d73f82e009..231ab6bcea76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -67,6 +67,11 @@ enum {
67}; 67};
68 68
69enum { 69enum {
70 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
71 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
72};
73
74enum {
70 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 75 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
71 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 76 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
72 MLX5_CMD_OP_INIT_HCA = 0x102, 77 MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -573,21 +578,24 @@ enum {
573struct mlx5_ifc_atomic_caps_bits { 578struct mlx5_ifc_atomic_caps_bits {
574 u8 reserved_0[0x40]; 579 u8 reserved_0[0x40];
575 580
576 u8 atomic_req_endianness[0x1]; 581 u8 atomic_req_8B_endianess_mode[0x2];
577 u8 reserved_1[0x1f]; 582 u8 reserved_1[0x4];
583 u8 supported_atomic_req_8B_endianess_mode_1[0x1];
578 584
579 u8 reserved_2[0x20]; 585 u8 reserved_2[0x19];
580 586
581 u8 reserved_3[0x10]; 587 u8 reserved_3[0x20];
582 u8 atomic_operations[0x10];
583 588
584 u8 reserved_4[0x10]; 589 u8 reserved_4[0x10];
585 u8 atomic_size_qp[0x10]; 590 u8 atomic_operations[0x10];
586 591
587 u8 reserved_5[0x10]; 592 u8 reserved_5[0x10];
593 u8 atomic_size_qp[0x10];
594
595 u8 reserved_6[0x10];
588 u8 atomic_size_dc[0x10]; 596 u8 atomic_size_dc[0x10];
589 597
590 u8 reserved_6[0x720]; 598 u8 reserved_7[0x720];
591}; 599};
592 600
593struct mlx5_ifc_odp_cap_bits { 601struct mlx5_ifc_odp_cap_bits {
@@ -850,7 +858,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
850 u8 reserved_66[0x8]; 858 u8 reserved_66[0x8];
851 u8 log_uar_page_sz[0x10]; 859 u8 log_uar_page_sz[0x10];
852 860
853 u8 reserved_67[0x40]; 861 u8 reserved_67[0x20];
862 u8 device_frequency_mhz[0x20];
854 u8 device_frequency_khz[0x20]; 863 u8 device_frequency_khz[0x20];
855 u8 reserved_68[0x5f]; 864 u8 reserved_68[0x5f];
856 u8 cqe_zip[0x1]; 865 u8 cqe_zip[0x1];
@@ -2215,19 +2224,25 @@ struct mlx5_ifc_nic_vport_context_bits {
2215 2224
2216 u8 mtu[0x10]; 2225 u8 mtu[0x10];
2217 2226
2218 u8 reserved_3[0x640]; 2227 u8 system_image_guid[0x40];
2228 u8 port_guid[0x40];
2229 u8 node_guid[0x40];
2230
2231 u8 reserved_3[0x140];
2232 u8 qkey_violation_counter[0x10];
2233 u8 reserved_4[0x430];
2219 2234
2220 u8 promisc_uc[0x1]; 2235 u8 promisc_uc[0x1];
2221 u8 promisc_mc[0x1]; 2236 u8 promisc_mc[0x1];
2222 u8 promisc_all[0x1]; 2237 u8 promisc_all[0x1];
2223 u8 reserved_4[0x2]; 2238 u8 reserved_5[0x2];
2224 u8 allowed_list_type[0x3]; 2239 u8 allowed_list_type[0x3];
2225 u8 reserved_5[0xc]; 2240 u8 reserved_6[0xc];
2226 u8 allowed_list_size[0xc]; 2241 u8 allowed_list_size[0xc];
2227 2242
2228 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2243 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2229 2244
2230 u8 reserved_6[0x20]; 2245 u8 reserved_7[0x20];
2231 2246
2232 u8 current_uc_mac_address[0][0x40]; 2247 u8 current_uc_mac_address[0][0x40];
2233}; 2248};
@@ -4199,6 +4214,13 @@ struct mlx5_ifc_modify_tis_out_bits {
4199 u8 reserved_1[0x40]; 4214 u8 reserved_1[0x40];
4200}; 4215};
4201 4216
4217struct mlx5_ifc_modify_tis_bitmask_bits {
4218 u8 reserved_0[0x20];
4219
4220 u8 reserved_1[0x1f];
4221 u8 prio[0x1];
4222};
4223
4202struct mlx5_ifc_modify_tis_in_bits { 4224struct mlx5_ifc_modify_tis_in_bits {
4203 u8 opcode[0x10]; 4225 u8 opcode[0x10];
4204 u8 reserved_0[0x10]; 4226 u8 reserved_0[0x10];
@@ -4211,7 +4233,7 @@ struct mlx5_ifc_modify_tis_in_bits {
4211 4233
4212 u8 reserved_3[0x20]; 4234 u8 reserved_3[0x20];
4213 4235
4214 u8 modify_bitmask[0x40]; 4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
4215 4237
4216 u8 reserved_4[0x40]; 4238 u8 reserved_4[0x40];
4217 4239
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f079fb1a31f7..5b8c89ffaa58 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -85,7 +85,16 @@ enum mlx5_qp_state {
85 MLX5_QP_STATE_ERR = 6, 85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7, 86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9, 87 MLX5_QP_STATE_SUSPENDED = 9,
88 MLX5_QP_NUM_STATE 88 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91};
92
93enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
89}; 98};
90 99
91enum { 100enum {
@@ -130,6 +139,9 @@ enum {
130 MLX5_QP_BIT_RWE = 1 << 14, 139 MLX5_QP_BIT_RWE = 1 << 14,
131 MLX5_QP_BIT_RAE = 1 << 13, 140 MLX5_QP_BIT_RAE = 1 << 13,
132 MLX5_QP_BIT_RIC = 1 << 4, 141 MLX5_QP_BIT_RIC = 1 << 4,
142 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
143 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
144 MLX5_QP_BIT_CC_MASTER = 1 << 0
133}; 145};
134 146
135enum { 147enum {
@@ -248,8 +260,12 @@ struct mlx5_av {
248 __be32 dqp_dct; 260 __be32 dqp_dct;
249 u8 stat_rate_sl; 261 u8 stat_rate_sl;
250 u8 fl_mlid; 262 u8 fl_mlid;
251 __be16 rlid; 263 union {
252 u8 reserved0[10]; 264 __be16 rlid;
265 __be16 udp_sport;
266 };
267 u8 reserved0[4];
268 u8 rmac[6];
253 u8 tclass; 269 u8 tclass;
254 u8 hop_limit; 270 u8 hop_limit;
255 __be32 grh_gid_fl; 271 __be32 grh_gid_fl;
@@ -456,11 +472,16 @@ struct mlx5_qp_path {
456 u8 static_rate; 472 u8 static_rate;
457 u8 hop_limit; 473 u8 hop_limit;
458 __be32 tclass_flowlabel; 474 __be32 tclass_flowlabel;
459 u8 rgid[16]; 475 union {
460 u8 rsvd1[4]; 476 u8 rgid[16];
461 u8 sl; 477 u8 rip[16];
478 };
479 u8 f_dscp_ecn_prio;
480 u8 ecn_dscp;
481 __be16 udp_sport;
482 u8 dci_cfi_prio_sl;
462 u8 port; 483 u8 port;
463 u8 rsvd2[6]; 484 u8 rmac[6];
464}; 485};
465 486
466struct mlx5_qp_context { 487struct mlx5_qp_context {
@@ -620,8 +641,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
620 struct mlx5_core_qp *qp, 641 struct mlx5_core_qp *qp,
621 struct mlx5_create_qp_mbox_in *in, 642 struct mlx5_create_qp_mbox_in *in,
622 int inlen); 643 int inlen);
623int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, 644int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
624 enum mlx5_qp_state new_state,
625 struct mlx5_modify_qp_mbox_in *in, int sqd_event, 645 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
626 struct mlx5_core_qp *qp); 646 struct mlx5_core_qp *qp);
627int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 647int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
@@ -639,6 +659,14 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
639int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, 659int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
640 u8 context, int error); 660 u8 context, int error);
641#endif 661#endif
662int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
663 struct mlx5_core_qp *rq);
664void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
665 struct mlx5_core_qp *rq);
666int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
667 struct mlx5_core_qp *sq);
668void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
669 struct mlx5_core_qp *sq);
642 670
643static inline const char *mlx5_qp_type_str(int type) 671static inline const char *mlx5_qp_type_str(int type)
644{ 672{
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
new file mode 100644
index 000000000000..88441f5ece25
--- /dev/null
+++ b/include/linux/mlx5/transobj.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __TRANSOBJ_H__
34#define __TRANSOBJ_H__
35
36#include <linux/mlx5/driver.h>
37
38int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
39void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
40int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
41 u32 *rqn);
42int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
43void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
44int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
45int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
46 u32 *sqn);
47int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
48void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
49int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
50int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
51 u32 *tirn);
52int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
53 int inlen);
54void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
55int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
56 u32 *tisn);
57int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
58 int inlen);
59void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
60int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
61 u32 *rmpn);
62int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
63int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
64int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
65int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
66int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
67 u32 *rmpn);
68int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
69int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
70int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
71
72int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
73 u32 *rqtn);
74int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
75 int inlen);
76void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
77
78#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 638f2ca7a527..123771003e68 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,11 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 47 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
49 u64 *system_image_guid);
50int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
51int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
52 u16 *qkey_viol_cntr);
48int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 53int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
49 u8 port_num, u16 vf_num, u16 gid_index, 54 u8 port_num, u16 vf_num, u16 gid_index,
50 union ib_gid *gid); 55 union ib_gid *gid);
@@ -85,4 +90,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
85 u16 vlans[], 90 u16 vlans[],
86 int list_size); 91 int list_size);
87 92
93int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
94int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
95
88#endif /* __MLX5_VPORT_H__ */ 96#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f869807a0d0e..5322fea6fe4c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -51,6 +51,7 @@
51/* RPC/RDMA parameters and stats */ 51/* RPC/RDMA parameters and stats */
52extern unsigned int svcrdma_ord; 52extern unsigned int svcrdma_ord;
53extern unsigned int svcrdma_max_requests; 53extern unsigned int svcrdma_max_requests;
54extern unsigned int svcrdma_max_bc_requests;
54extern unsigned int svcrdma_max_req_size; 55extern unsigned int svcrdma_max_req_size;
55 56
56extern atomic_t rdma_stat_recv; 57extern atomic_t rdma_stat_recv;
@@ -69,6 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
69 * completes. 70 * completes.
70 */ 71 */
71struct svc_rdma_op_ctxt { 72struct svc_rdma_op_ctxt {
73 struct list_head free;
72 struct svc_rdma_op_ctxt *read_hdr; 74 struct svc_rdma_op_ctxt *read_hdr;
73 struct svc_rdma_fastreg_mr *frmr; 75 struct svc_rdma_fastreg_mr *frmr;
74 int hdr_count; 76 int hdr_count;
@@ -112,6 +114,7 @@ struct svc_rdma_fastreg_mr {
112 struct list_head frmr_list; 114 struct list_head frmr_list;
113}; 115};
114struct svc_rdma_req_map { 116struct svc_rdma_req_map {
117 struct list_head free;
115 unsigned long count; 118 unsigned long count;
116 union { 119 union {
117 struct kvec sge[RPCSVC_MAXPAGES]; 120 struct kvec sge[RPCSVC_MAXPAGES];
@@ -132,28 +135,32 @@ struct svcxprt_rdma {
132 int sc_max_sge; 135 int sc_max_sge;
133 int sc_max_sge_rd; /* max sge for read target */ 136 int sc_max_sge_rd; /* max sge for read target */
134 137
135 int sc_sq_depth; /* Depth of SQ */
136 atomic_t sc_sq_count; /* Number of SQ WR on queue */ 138 atomic_t sc_sq_count; /* Number of SQ WR on queue */
137 139 unsigned int sc_sq_depth; /* Depth of SQ */
138 int sc_max_requests; /* Depth of RQ */ 140 unsigned int sc_rq_depth; /* Depth of RQ */
141 u32 sc_max_requests; /* Forward credits */
142 u32 sc_max_bc_requests;/* Backward credits */
139 int sc_max_req_size; /* Size of each RQ WR buf */ 143 int sc_max_req_size; /* Size of each RQ WR buf */
140 144
141 struct ib_pd *sc_pd; 145 struct ib_pd *sc_pd;
142 146
143 atomic_t sc_dma_used; 147 atomic_t sc_dma_used;
144 atomic_t sc_ctxt_used; 148 spinlock_t sc_ctxt_lock;
149 struct list_head sc_ctxts;
150 int sc_ctxt_used;
151 spinlock_t sc_map_lock;
152 struct list_head sc_maps;
153
145 struct list_head sc_rq_dto_q; 154 struct list_head sc_rq_dto_q;
146 spinlock_t sc_rq_dto_lock; 155 spinlock_t sc_rq_dto_lock;
147 struct ib_qp *sc_qp; 156 struct ib_qp *sc_qp;
148 struct ib_cq *sc_rq_cq; 157 struct ib_cq *sc_rq_cq;
149 struct ib_cq *sc_sq_cq; 158 struct ib_cq *sc_sq_cq;
150 struct ib_mr *sc_phys_mr; /* MR for server memory */
151 int (*sc_reader)(struct svcxprt_rdma *, 159 int (*sc_reader)(struct svcxprt_rdma *,
152 struct svc_rqst *, 160 struct svc_rqst *,
153 struct svc_rdma_op_ctxt *, 161 struct svc_rdma_op_ctxt *,
154 int *, u32 *, u32, u32, u64, bool); 162 int *, u32 *, u32, u32, u64, bool);
155 u32 sc_dev_caps; /* distilled device caps */ 163 u32 sc_dev_caps; /* distilled device caps */
156 u32 sc_dma_lkey; /* local dma key */
157 unsigned int sc_frmr_pg_list_len; 164 unsigned int sc_frmr_pg_list_len;
158 struct list_head sc_frmr_q; 165 struct list_head sc_frmr_q;
159 spinlock_t sc_frmr_q_lock; 166 spinlock_t sc_frmr_q_lock;
@@ -179,8 +186,18 @@ struct svcxprt_rdma {
179#define RPCRDMA_MAX_REQUESTS 32 186#define RPCRDMA_MAX_REQUESTS 32
180#define RPCRDMA_MAX_REQ_SIZE 4096 187#define RPCRDMA_MAX_REQ_SIZE 4096
181 188
189/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
190 * current NFSv4.1 implementation supports one backchannel slot.
191 */
192#define RPCRDMA_MAX_BC_REQUESTS 2
193
182#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD 194#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
183 195
196/* svc_rdma_backchannel.c */
197extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
198 struct rpcrdma_msg *rmsgp,
199 struct xdr_buf *rcvbuf);
200
184/* svc_rdma_marshal.c */ 201/* svc_rdma_marshal.c */
185extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 202extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
186extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -206,6 +223,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
206 u32, u32, u64, bool); 223 u32, u32, u64, bool);
207 224
208/* svc_rdma_sendto.c */ 225/* svc_rdma_sendto.c */
226extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
227 struct svc_rdma_req_map *);
209extern int svc_rdma_sendto(struct svc_rqst *); 228extern int svc_rdma_sendto(struct svc_rqst *);
210extern struct rpcrdma_read_chunk * 229extern struct rpcrdma_read_chunk *
211 svc_rdma_get_read_chunk(struct rpcrdma_msg *); 230 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
@@ -214,13 +233,14 @@ extern struct rpcrdma_read_chunk *
214extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 233extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
215extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, 234extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
216 enum rpcrdma_errcode); 235 enum rpcrdma_errcode);
217extern int svc_rdma_post_recv(struct svcxprt_rdma *); 236extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
218extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 237extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
219extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 238extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
220extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 239extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
221extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); 240extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
222extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); 241extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *);
223extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); 242extern void svc_rdma_put_req_map(struct svcxprt_rdma *,
243 struct svc_rdma_req_map *);
224extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); 244extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
225extern void svc_rdma_put_frmr(struct svcxprt_rdma *, 245extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
226 struct svc_rdma_fastreg_mr *); 246 struct svc_rdma_fastreg_mr *);
@@ -234,6 +254,7 @@ extern struct svc_xprt_class svc_rdma_bc_class;
234#endif 254#endif
235 255
236/* svc_rdma.c */ 256/* svc_rdma.c */
257extern struct workqueue_struct *svc_rdma_wq;
237extern int svc_rdma_init(void); 258extern int svc_rdma_init(void);
238extern void svc_rdma_cleanup(void); 259extern void svc_rdma_cleanup(void);
239 260