aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx4/device.h119
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/rdma/ib_cm.h12
-rw-r--r--include/rdma/ib_sa.h33
-rw-r--r--include/rdma/rdma_cm.h10
-rw-r--r--include/rdma/rdma_user_cm.h1
6 files changed, 176 insertions, 2 deletions
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 4d7761f8c3f6..bd6c9fcdf2dd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -57,6 +57,13 @@ enum {
57 MLX4_MAX_PORTS = 2 57 MLX4_MAX_PORTS = 2
58}; 58};
59 59
60/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
61 * These qkeys must not be allowed for general use. This is a 64k range,
62 * and to test for violation, we use the mask (protect against future chg).
63 */
64#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
65#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
66
60enum { 67enum {
61 MLX4_BOARD_ID_LEN = 64 68 MLX4_BOARD_ID_LEN = 64
62}; 69};
@@ -127,7 +134,8 @@ enum {
127 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 134 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
128 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 135 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
129 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, 136 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
130 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 137 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
138 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
131}; 139};
132 140
133enum { 141enum {
@@ -170,6 +178,7 @@ enum mlx4_event {
170 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, 178 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
171 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, 179 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
172 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, 180 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
181 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
173 MLX4_EVENT_TYPE_NONE = 0xff, 182 MLX4_EVENT_TYPE_NONE = 0xff,
174}; 183};
175 184
@@ -267,12 +276,32 @@ enum {
267 MLX4_MAX_FAST_REG_PAGES = 511, 276 MLX4_MAX_FAST_REG_PAGES = 511,
268}; 277};
269 278
279enum {
280 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
281 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
282 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
283};
284
285/* Port mgmt change event handling */
286enum {
287 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
288 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
289 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
290 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
291 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
292};
293
294#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
295 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
296
270static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 297static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
271{ 298{
272 return (major << 32) | (minor << 16) | subminor; 299 return (major << 32) | (minor << 16) | subminor;
273} 300}
274 301
275struct mlx4_phys_caps { 302struct mlx4_phys_caps {
303 u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
304 u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
276 u32 num_phys_eqs; 305 u32 num_phys_eqs;
277}; 306};
278 307
@@ -305,6 +334,8 @@ struct mlx4_caps {
305 int max_qp_init_rdma; 334 int max_qp_init_rdma;
306 int max_qp_dest_rdma; 335 int max_qp_dest_rdma;
307 int sqp_start; 336 int sqp_start;
337 u32 base_sqpn;
338 u32 base_tunnel_sqpn;
308 int num_srqs; 339 int num_srqs;
309 int max_srq_wqes; 340 int max_srq_wqes;
310 int max_srq_sge; 341 int max_srq_sge;
@@ -547,6 +578,81 @@ struct mlx4_dev {
547 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 578 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
548}; 579};
549 580
581struct mlx4_eqe {
582 u8 reserved1;
583 u8 type;
584 u8 reserved2;
585 u8 subtype;
586 union {
587 u32 raw[6];
588 struct {
589 __be32 cqn;
590 } __packed comp;
591 struct {
592 u16 reserved1;
593 __be16 token;
594 u32 reserved2;
595 u8 reserved3[3];
596 u8 status;
597 __be64 out_param;
598 } __packed cmd;
599 struct {
600 __be32 qpn;
601 } __packed qp;
602 struct {
603 __be32 srqn;
604 } __packed srq;
605 struct {
606 __be32 cqn;
607 u32 reserved1;
608 u8 reserved2[3];
609 u8 syndrome;
610 } __packed cq_err;
611 struct {
612 u32 reserved1[2];
613 __be32 port;
614 } __packed port_change;
615 struct {
616 #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
617 u32 reserved;
618 u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
619 } __packed comm_channel_arm;
620 struct {
621 u8 port;
622 u8 reserved[3];
623 __be64 mac;
624 } __packed mac_update;
625 struct {
626 __be32 slave_id;
627 } __packed flr_event;
628 struct {
629 __be16 current_temperature;
630 __be16 warning_threshold;
631 } __packed warming;
632 struct {
633 u8 reserved[3];
634 u8 port;
635 union {
636 struct {
637 __be16 mstr_sm_lid;
638 __be16 port_lid;
639 __be32 changed_attr;
640 u8 reserved[3];
641 u8 mstr_sm_sl;
642 __be64 gid_prefix;
643 } __packed port_info;
644 struct {
645 __be32 block_ptr;
646 __be32 tbl_entries_mask;
647 } __packed tbl_change_info;
648 } params;
649 } __packed port_mgmt_change;
650 } event;
651 u8 slave_id;
652 u8 reserved3[2];
653 u8 owner;
654} __packed;
655
550struct mlx4_init_port_param { 656struct mlx4_init_port_param {
551 int set_guid0; 657 int set_guid0;
552 int set_node_guid; 658 int set_node_guid;
@@ -570,6 +676,15 @@ struct mlx4_init_port_param {
570 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ 676 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
571 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 677 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
572 678
679#define MLX4_INVALID_SLAVE_ID 0xFF
680
681void handle_port_mgmt_change_event(struct work_struct *work);
682
683static inline int mlx4_master_func_num(struct mlx4_dev *dev)
684{
685 return dev->caps.function;
686}
687
573static inline int mlx4_is_master(struct mlx4_dev *dev) 688static inline int mlx4_is_master(struct mlx4_dev *dev)
574{ 689{
575 return dev->flags & MLX4_FLAG_MASTER; 690 return dev->flags & MLX4_FLAG_MASTER;
@@ -799,4 +914,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
799 struct mlx4_net_trans_rule *rule, u64 *reg_id); 914 struct mlx4_net_trans_rule *rule, u64 *reg_id);
800int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); 915int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
801 916
917int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
918
802#endif /* MLX4_DEVICE_H */ 919#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 8dc485febc6b..d813704b963b 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -44,13 +44,14 @@ enum mlx4_dev_event {
44 MLX4_DEV_EVENT_PORT_UP, 44 MLX4_DEV_EVENT_PORT_UP,
45 MLX4_DEV_EVENT_PORT_DOWN, 45 MLX4_DEV_EVENT_PORT_DOWN,
46 MLX4_DEV_EVENT_PORT_REINIT, 46 MLX4_DEV_EVENT_PORT_REINIT,
47 MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
47}; 48};
48 49
49struct mlx4_interface { 50struct mlx4_interface {
50 void * (*add) (struct mlx4_dev *dev); 51 void * (*add) (struct mlx4_dev *dev);
51 void (*remove)(struct mlx4_dev *dev, void *context); 52 void (*remove)(struct mlx4_dev *dev, void *context);
52 void (*event) (struct mlx4_dev *dev, void *context, 53 void (*event) (struct mlx4_dev *dev, void *context,
53 enum mlx4_dev_event event, int port); 54 enum mlx4_dev_event event, unsigned long param);
54 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 55 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
55 struct list_head list; 56 struct list_head list;
56 enum mlx4_protocol protocol; 57 enum mlx4_protocol protocol;
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 83f77ac33957..0e3ff30647d5 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -262,6 +262,18 @@ struct ib_cm_event {
262 void *private_data; 262 void *private_data;
263}; 263};
264 264
265#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
266#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
267#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
268#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
269#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
270#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
271#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
272#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
273#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
274#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
275#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
276
265/** 277/**
266 * ib_cm_handler - User-defined callback to process communication events. 278 * ib_cm_handler - User-defined callback to process communication events.
267 * @cm_id: Communication identifier associated with the reported event. 279 * @cm_id: Communication identifier associated with the reported event.
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index d44a56388a3e..8275e539bace 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -251,6 +251,28 @@ struct ib_sa_service_rec {
251 u64 data64[2]; 251 u64 data64[2];
252}; 252};
253 253
254#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
255#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
256#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
257#define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3)
258#define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4)
259#define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5)
260#define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6)
261#define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7)
262#define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8)
263#define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9)
264#define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10)
265#define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11)
266
267struct ib_sa_guidinfo_rec {
268 __be16 lid;
269 u8 block_num;
270 /* reserved */
271 u8 res1;
272 __be32 res2;
273 u8 guid_info_list[64];
274};
275
254struct ib_sa_client { 276struct ib_sa_client {
255 atomic_t users; 277 atomic_t users;
256 struct completion comp; 278 struct completion comp;
@@ -385,4 +407,15 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
385 */ 407 */
386void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); 408void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
387 409
410/* Support GuidInfoRecord */
411int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
412 struct ib_device *device, u8 port_num,
413 struct ib_sa_guidinfo_rec *rec,
414 ib_sa_comp_mask comp_mask, u8 method,
415 int timeout_ms, gfp_t gfp_mask,
416 void (*callback)(int status,
417 struct ib_sa_guidinfo_rec *resp,
418 void *context),
419 void *context,
420 struct ib_sa_query **sa_query);
388#endif /* IB_SA_H */ 421#endif /* IB_SA_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 51988f808181..ad3a3142383a 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -357,4 +357,14 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos);
357 */ 357 */
358int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); 358int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
359 359
360/**
361 * rdma_set_afonly - Specify that listens are restricted to the
362 * bound address family only.
363 * @id: Communication identifer to configure.
364 * @afonly: Value indicating if listens are restricted.
365 *
366 * Must be set before identifier is in the listening state.
367 */
368int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
369
360#endif /* RDMA_CM_H */ 370#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index 5348a000c8f3..1ee9239ff8c2 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -224,6 +224,7 @@ enum {
224enum { 224enum {
225 RDMA_OPTION_ID_TOS = 0, 225 RDMA_OPTION_ID_TOS = 0,
226 RDMA_OPTION_ID_REUSEADDR = 1, 226 RDMA_OPTION_ID_REUSEADDR = 1,
227 RDMA_OPTION_ID_AFONLY = 2,
227 RDMA_OPTION_IB_PATH = 1 228 RDMA_OPTION_IB_PATH = 1
228}; 229};
229 230