aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx5/device.h13
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/rdma/ib_verbs.h20
-rw-r--r--include/scsi/scsi_transport_srp.h83
-rw-r--r--include/uapi/rdma/ib_user_verbs.h95
5 files changed, 179 insertions, 50 deletions
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 5eb4e31af22b..da78875807fc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -230,6 +230,15 @@ enum {
230 MLX5_MAX_PAGE_SHIFT = 31 230 MLX5_MAX_PAGE_SHIFT = 31
231}; 231};
232 232
233enum {
234 MLX5_ADAPTER_PAGE_SHIFT = 12
235};
236
237enum {
238 MLX5_CAP_OFF_DCT = 41,
239 MLX5_CAP_OFF_CMDIF_CSUM = 46,
240};
241
233struct mlx5_inbox_hdr { 242struct mlx5_inbox_hdr {
234 __be16 opcode; 243 __be16 opcode;
235 u8 rsvd[4]; 244 u8 rsvd[4];
@@ -319,9 +328,9 @@ struct mlx5_hca_cap {
319 u8 rsvd25[42]; 328 u8 rsvd25[42];
320 __be16 log_uar_page_sz; 329 __be16 log_uar_page_sz;
321 u8 rsvd26[28]; 330 u8 rsvd26[28];
322 u8 log_msx_atomic_size_qp; 331 u8 log_max_atomic_size_qp;
323 u8 rsvd27[2]; 332 u8 rsvd27[2];
324 u8 log_msx_atomic_size_dc; 333 u8 log_max_atomic_size_dc;
325 u8 rsvd28[76]; 334 u8 rsvd28[76];
326}; 335};
327 336
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6b8c496572c8..554548cd3dd4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -483,6 +483,7 @@ struct mlx5_priv {
483 struct rb_root page_root; 483 struct rb_root page_root;
484 int fw_pages; 484 int fw_pages;
485 int reg_pages; 485 int reg_pages;
486 struct list_head free_list;
486 487
487 struct mlx5_core_health health; 488 struct mlx5_core_health health;
488 489
@@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
557struct mlx5_cmd_work_ent { 558struct mlx5_cmd_work_ent {
558 struct mlx5_cmd_msg *in; 559 struct mlx5_cmd_msg *in;
559 struct mlx5_cmd_msg *out; 560 struct mlx5_cmd_msg *out;
561 void *uout;
562 int uout_size;
560 mlx5_cmd_cbk_t callback; 563 mlx5_cmd_cbk_t callback;
561 void *context; 564 void *context;
562 int idx; 565 int idx;
563 struct completion done; 566 struct completion done;
564 struct mlx5_cmd *cmd; 567 struct mlx5_cmd *cmd;
565 struct work_struct work; 568 struct work_struct work;
@@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent {
570 u8 token; 573 u8 token;
571 struct timespec ts1; 574 struct timespec ts1;
572 struct timespec ts2; 575 struct timespec ts2;
576 u16 op;
573}; 577};
574 578
575struct mlx5_pas { 579struct mlx5_pas {
@@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
653int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 657int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
654int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 658int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
655 int out_size); 659 int out_size);
660int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
661 void *out, int out_size, mlx5_cmd_cbk_t callback,
662 void *context);
656int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 663int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
657int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 664int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
658int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 665int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
676int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 683int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
677 u16 lwm, int is_srq); 684 u16 lwm, int is_srq);
678int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 685int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
679 struct mlx5_create_mkey_mbox_in *in, int inlen); 686 struct mlx5_create_mkey_mbox_in *in, int inlen,
687 mlx5_cmd_cbk_t callback, void *context,
688 struct mlx5_create_mkey_mbox_out *out);
680int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); 689int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
681int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 690int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 struct mlx5_query_mkey_mbox_out *out, int outlen); 691 struct mlx5_query_mkey_mbox_out *out, int outlen);
@@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745 return mkey_idx << 8; 754 return mkey_idx << 8;
746} 755}
747 756
757static inline u8 mlx5_mkey_variant(u32 mkey)
758{
759 return mkey & 0xff;
760}
761
748enum { 762enum {
749 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 763 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
750 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, 764 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e393171e2fac..979874c627ee 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -67,12 +67,14 @@ enum rdma_node_type {
67 RDMA_NODE_IB_CA = 1, 67 RDMA_NODE_IB_CA = 1,
68 RDMA_NODE_IB_SWITCH, 68 RDMA_NODE_IB_SWITCH,
69 RDMA_NODE_IB_ROUTER, 69 RDMA_NODE_IB_ROUTER,
70 RDMA_NODE_RNIC 70 RDMA_NODE_RNIC,
71 RDMA_NODE_USNIC,
71}; 72};
72 73
73enum rdma_transport_type { 74enum rdma_transport_type {
74 RDMA_TRANSPORT_IB, 75 RDMA_TRANSPORT_IB,
75 RDMA_TRANSPORT_IWARP 76 RDMA_TRANSPORT_IWARP,
77 RDMA_TRANSPORT_USNIC
76}; 78};
77 79
78enum rdma_transport_type 80enum rdma_transport_type
@@ -1436,6 +1438,7 @@ struct ib_device {
1436 1438
1437 int uverbs_abi_ver; 1439 int uverbs_abi_ver;
1438 u64 uverbs_cmd_mask; 1440 u64 uverbs_cmd_mask;
1441 u64 uverbs_ex_cmd_mask;
1439 1442
1440 char node_desc[64]; 1443 char node_desc[64];
1441 __be64 node_guid; 1444 __be64 node_guid;
@@ -2384,4 +2387,17 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
2384 struct ib_flow_attr *flow_attr, int domain); 2387 struct ib_flow_attr *flow_attr, int domain);
2385int ib_destroy_flow(struct ib_flow *flow_id); 2388int ib_destroy_flow(struct ib_flow *flow_id);
2386 2389
2390static inline int ib_check_mr_access(int flags)
2391{
2392 /*
2393 * Local write permission is required if remote write or
2394 * remote atomic permission is also requested.
2395 */
2396 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2397 !(flags & IB_ACCESS_LOCAL_WRITE))
2398 return -EINVAL;
2399
2400 return 0;
2401}
2402
2387#endif /* IB_VERBS_H */ 2403#endif /* IB_VERBS_H */
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index ff0f04ac91aa..4ebf6913b7b2 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -13,6 +13,27 @@ struct srp_rport_identifiers {
13 u8 roles; 13 u8 roles;
14}; 14};
15 15
16/**
17 * enum srp_rport_state - SRP transport layer state
18 * @SRP_RPORT_RUNNING: Transport layer operational.
19 * @SRP_RPORT_BLOCKED: Transport layer not operational; fast I/O fail timer
20 * is running and I/O has been blocked.
21 * @SRP_RPORT_FAIL_FAST: Fast I/O fail timer has expired; fail I/O fast.
22 * @SRP_RPORT_LOST: Device loss timer has expired; port is being removed.
23 */
24enum srp_rport_state {
25 SRP_RPORT_RUNNING,
26 SRP_RPORT_BLOCKED,
27 SRP_RPORT_FAIL_FAST,
28 SRP_RPORT_LOST,
29};
30
31/**
32 * struct srp_rport
33 * @lld_data: LLD private data.
34 * @mutex: Protects against concurrent rport reconnect / fast_io_fail /
35 * dev_loss_tmo activity.
36 */
16struct srp_rport { 37struct srp_rport {
17 /* for initiator and target drivers */ 38 /* for initiator and target drivers */
18 39
@@ -23,11 +44,43 @@ struct srp_rport {
23 44
24 /* for initiator drivers */ 45 /* for initiator drivers */
25 46
26 void *lld_data; /* LLD private data */ 47 void *lld_data;
48
49 struct mutex mutex;
50 enum srp_rport_state state;
51 bool deleted;
52 int reconnect_delay;
53 int failed_reconnects;
54 struct delayed_work reconnect_work;
55 int fast_io_fail_tmo;
56 int dev_loss_tmo;
57 struct delayed_work fast_io_fail_work;
58 struct delayed_work dev_loss_work;
27}; 59};
28 60
61/**
62 * struct srp_function_template
63 * @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and
64 * dev_loss_tmo sysfs attribute for an rport.
65 * @reset_timer_if_blocked: Whether or srp_timed_out() should reset the command
66 * timer if the device on which it has been queued is blocked.
67 * @reconnect_delay: If not NULL, points to the default reconnect_delay value.
68 * @fast_io_fail_tmo: If not NULL, points to the default fast_io_fail_tmo value.
69 * @dev_loss_tmo: If not NULL, points to the default dev_loss_tmo value.
70 * @reconnect: Callback function for reconnecting to the target. See also
71 * srp_reconnect_rport().
72 * @terminate_rport_io: Callback function for terminating all outstanding I/O
73 * requests for an rport.
74 */
29struct srp_function_template { 75struct srp_function_template {
30 /* for initiator drivers */ 76 /* for initiator drivers */
77 bool has_rport_state;
78 bool reset_timer_if_blocked;
79 int *reconnect_delay;
80 int *fast_io_fail_tmo;
81 int *dev_loss_tmo;
82 int (*reconnect)(struct srp_rport *rport);
83 void (*terminate_rport_io)(struct srp_rport *rport);
31 void (*rport_delete)(struct srp_rport *rport); 84 void (*rport_delete)(struct srp_rport *rport);
32 /* for target drivers */ 85 /* for target drivers */
33 int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); 86 int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
@@ -38,10 +91,36 @@ extern struct scsi_transport_template *
38srp_attach_transport(struct srp_function_template *); 91srp_attach_transport(struct srp_function_template *);
39extern void srp_release_transport(struct scsi_transport_template *); 92extern void srp_release_transport(struct scsi_transport_template *);
40 93
94extern void srp_rport_get(struct srp_rport *rport);
95extern void srp_rport_put(struct srp_rport *rport);
41extern struct srp_rport *srp_rport_add(struct Scsi_Host *, 96extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
42 struct srp_rport_identifiers *); 97 struct srp_rport_identifiers *);
43extern void srp_rport_del(struct srp_rport *); 98extern void srp_rport_del(struct srp_rport *);
44 99extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
100 int dev_loss_tmo);
101extern int srp_reconnect_rport(struct srp_rport *rport);
102extern void srp_start_tl_fail_timers(struct srp_rport *rport);
45extern void srp_remove_host(struct Scsi_Host *); 103extern void srp_remove_host(struct Scsi_Host *);
46 104
105/**
106 * srp_chkready() - evaluate the transport layer state before I/O
107 *
108 * Returns a SCSI result code that can be returned by the LLD queuecommand()
109 * implementation. The role of this function is similar to that of
110 * fc_remote_port_chkready().
111 */
112static inline int srp_chkready(struct srp_rport *rport)
113{
114 switch (rport->state) {
115 case SRP_RPORT_RUNNING:
116 case SRP_RPORT_BLOCKED:
117 default:
118 return 0;
119 case SRP_RPORT_FAIL_FAST:
120 return DID_TRANSPORT_FAILFAST << 16;
121 case SRP_RPORT_LOST:
122 return DID_NO_CONNECT << 16;
123 }
124}
125
47#endif 126#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index e3ddd86c90a6..cbfdd4ca9510 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -87,10 +87,11 @@ enum {
87 IB_USER_VERBS_CMD_CLOSE_XRCD, 87 IB_USER_VERBS_CMD_CLOSE_XRCD,
88 IB_USER_VERBS_CMD_CREATE_XSRQ, 88 IB_USER_VERBS_CMD_CREATE_XSRQ,
89 IB_USER_VERBS_CMD_OPEN_QP, 89 IB_USER_VERBS_CMD_OPEN_QP,
90#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 90};
91 IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 91
92 IB_USER_VERBS_CMD_DESTROY_FLOW 92enum {
93#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ 93 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
94 IB_USER_VERBS_EX_CMD_DESTROY_FLOW
94}; 95};
95 96
96/* 97/*
@@ -122,22 +123,24 @@ struct ib_uverbs_comp_event_desc {
122 * the rest of the command struct based on these value. 123 * the rest of the command struct based on these value.
123 */ 124 */
124 125
126#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
127#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
128#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
129
130#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
131
125struct ib_uverbs_cmd_hdr { 132struct ib_uverbs_cmd_hdr {
126 __u32 command; 133 __u32 command;
127 __u16 in_words; 134 __u16 in_words;
128 __u16 out_words; 135 __u16 out_words;
129}; 136};
130 137
131#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 138struct ib_uverbs_ex_cmd_hdr {
132struct ib_uverbs_cmd_hdr_ex { 139 __u64 response;
133 __u32 command;
134 __u16 in_words;
135 __u16 out_words;
136 __u16 provider_in_words; 140 __u16 provider_in_words;
137 __u16 provider_out_words; 141 __u16 provider_out_words;
138 __u32 cmd_hdr_reserved; 142 __u32 cmd_hdr_reserved;
139}; 143};
140#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
141 144
142struct ib_uverbs_get_context { 145struct ib_uverbs_get_context {
143 __u64 response; 146 __u64 response;
@@ -700,62 +703,71 @@ struct ib_uverbs_detach_mcast {
700 __u64 driver_data[0]; 703 __u64 driver_data[0];
701}; 704};
702 705
703#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING 706struct ib_uverbs_flow_spec_hdr {
704struct ib_kern_eth_filter { 707 __u32 type;
708 __u16 size;
709 __u16 reserved;
710 /* followed by flow_spec */
711 __u64 flow_spec_data[0];
712};
713
714struct ib_uverbs_flow_eth_filter {
705 __u8 dst_mac[6]; 715 __u8 dst_mac[6];
706 __u8 src_mac[6]; 716 __u8 src_mac[6];
707 __be16 ether_type; 717 __be16 ether_type;
708 __be16 vlan_tag; 718 __be16 vlan_tag;
709}; 719};
710 720
711struct ib_kern_spec_eth { 721struct ib_uverbs_flow_spec_eth {
712 __u32 type; 722 union {
713 __u16 size; 723 struct ib_uverbs_flow_spec_hdr hdr;
714 __u16 reserved; 724 struct {
715 struct ib_kern_eth_filter val; 725 __u32 type;
716 struct ib_kern_eth_filter mask; 726 __u16 size;
727 __u16 reserved;
728 };
729 };
730 struct ib_uverbs_flow_eth_filter val;
731 struct ib_uverbs_flow_eth_filter mask;
717}; 732};
718 733
719struct ib_kern_ipv4_filter { 734struct ib_uverbs_flow_ipv4_filter {
720 __be32 src_ip; 735 __be32 src_ip;
721 __be32 dst_ip; 736 __be32 dst_ip;
722}; 737};
723 738
724struct ib_kern_spec_ipv4 { 739struct ib_uverbs_flow_spec_ipv4 {
725 __u32 type; 740 union {
726 __u16 size; 741 struct ib_uverbs_flow_spec_hdr hdr;
727 __u16 reserved; 742 struct {
728 struct ib_kern_ipv4_filter val; 743 __u32 type;
729 struct ib_kern_ipv4_filter mask; 744 __u16 size;
745 __u16 reserved;
746 };
747 };
748 struct ib_uverbs_flow_ipv4_filter val;
749 struct ib_uverbs_flow_ipv4_filter mask;
730}; 750};
731 751
732struct ib_kern_tcp_udp_filter { 752struct ib_uverbs_flow_tcp_udp_filter {
733 __be16 dst_port; 753 __be16 dst_port;
734 __be16 src_port; 754 __be16 src_port;
735}; 755};
736 756
737struct ib_kern_spec_tcp_udp { 757struct ib_uverbs_flow_spec_tcp_udp {
738 __u32 type;
739 __u16 size;
740 __u16 reserved;
741 struct ib_kern_tcp_udp_filter val;
742 struct ib_kern_tcp_udp_filter mask;
743};
744
745struct ib_kern_spec {
746 union { 758 union {
759 struct ib_uverbs_flow_spec_hdr hdr;
747 struct { 760 struct {
748 __u32 type; 761 __u32 type;
749 __u16 size; 762 __u16 size;
750 __u16 reserved; 763 __u16 reserved;
751 }; 764 };
752 struct ib_kern_spec_eth eth;
753 struct ib_kern_spec_ipv4 ipv4;
754 struct ib_kern_spec_tcp_udp tcp_udp;
755 }; 765 };
766 struct ib_uverbs_flow_tcp_udp_filter val;
767 struct ib_uverbs_flow_tcp_udp_filter mask;
756}; 768};
757 769
758struct ib_kern_flow_attr { 770struct ib_uverbs_flow_attr {
759 __u32 type; 771 __u32 type;
760 __u16 size; 772 __u16 size;
761 __u16 priority; 773 __u16 priority;
@@ -767,13 +779,13 @@ struct ib_kern_flow_attr {
767 * struct ib_flow_spec_xxx 779 * struct ib_flow_spec_xxx
768 * struct ib_flow_spec_yyy 780 * struct ib_flow_spec_yyy
769 */ 781 */
782 struct ib_uverbs_flow_spec_hdr flow_specs[0];
770}; 783};
771 784
772struct ib_uverbs_create_flow { 785struct ib_uverbs_create_flow {
773 __u32 comp_mask; 786 __u32 comp_mask;
774 __u64 response;
775 __u32 qp_handle; 787 __u32 qp_handle;
776 struct ib_kern_flow_attr flow_attr; 788 struct ib_uverbs_flow_attr flow_attr;
777}; 789};
778 790
779struct ib_uverbs_create_flow_resp { 791struct ib_uverbs_create_flow_resp {
@@ -785,7 +797,6 @@ struct ib_uverbs_destroy_flow {
785 __u32 comp_mask; 797 __u32 comp_mask;
786 __u32 flow_handle; 798 __u32 flow_handle;
787}; 799};
788#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
789 800
790struct ib_uverbs_create_srq { 801struct ib_uverbs_create_srq {
791 __u64 response; 802 __u64 response;