diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mlx4_ib.h')
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6eb743f65f6f..f829fd935b79 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -110,6 +110,9 @@ struct mlx4_ib_cq { | |||
110 | struct mutex resize_mutex; | 110 | struct mutex resize_mutex; |
111 | struct ib_umem *umem; | 111 | struct ib_umem *umem; |
112 | struct ib_umem *resize_umem; | 112 | struct ib_umem *resize_umem; |
113 | /* List of qps that it serves.*/ | ||
114 | struct list_head send_qp_list; | ||
115 | struct list_head recv_qp_list; | ||
113 | }; | 116 | }; |
114 | 117 | ||
115 | struct mlx4_ib_mr { | 118 | struct mlx4_ib_mr { |
@@ -134,10 +137,17 @@ struct mlx4_ib_fmr { | |||
134 | struct mlx4_fmr mfmr; | 137 | struct mlx4_fmr mfmr; |
135 | }; | 138 | }; |
136 | 139 | ||
140 | #define MAX_REGS_PER_FLOW 2 | ||
141 | |||
142 | struct mlx4_flow_reg_id { | ||
143 | u64 id; | ||
144 | u64 mirror; | ||
145 | }; | ||
146 | |||
137 | struct mlx4_ib_flow { | 147 | struct mlx4_ib_flow { |
138 | struct ib_flow ibflow; | 148 | struct ib_flow ibflow; |
139 | /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ | 149 | /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ |
140 | u64 reg_id[2]; | 150 | struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW]; |
141 | }; | 151 | }; |
142 | 152 | ||
143 | struct mlx4_ib_wq { | 153 | struct mlx4_ib_wq { |
@@ -293,6 +303,9 @@ struct mlx4_ib_qp { | |||
293 | struct mlx4_roce_smac_vlan_info pri; | 303 | struct mlx4_roce_smac_vlan_info pri; |
294 | struct mlx4_roce_smac_vlan_info alt; | 304 | struct mlx4_roce_smac_vlan_info alt; |
295 | u64 reg_id; | 305 | u64 reg_id; |
306 | struct list_head qps_list; | ||
307 | struct list_head cq_recv_list; | ||
308 | struct list_head cq_send_list; | ||
296 | }; | 309 | }; |
297 | 310 | ||
298 | struct mlx4_ib_srq { | 311 | struct mlx4_ib_srq { |
@@ -527,6 +540,10 @@ struct mlx4_ib_dev { | |||
527 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; | 540 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; |
528 | /* lock when destroying qp1_proxy and getting netdev events */ | 541 | /* lock when destroying qp1_proxy and getting netdev events */ |
529 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; | 542 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; |
543 | u8 bond_next_port; | ||
544 | /* protect resources needed as part of reset flow */ | ||
545 | spinlock_t reset_flow_resource_lock; | ||
546 | struct list_head qp_list; | ||
530 | }; | 547 | }; |
531 | 548 | ||
532 | struct ib_event_work { | 549 | struct ib_event_work { |
@@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) | |||
622 | return container_of(ibah, struct mlx4_ib_ah, ibah); | 639 | return container_of(ibah, struct mlx4_ib_ah, ibah); |
623 | } | 640 | } |
624 | 641 | ||
642 | static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) | ||
643 | { | ||
644 | dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports; | ||
645 | |||
646 | return dev->bond_next_port + 1; | ||
647 | } | ||
648 | |||
625 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); | 649 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); |
626 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); | 650 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); |
627 | 651 | ||