aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c27
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
3 files changed, 34 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 606f1e2ef284..19e68ab66168 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147 * Snoop SM MADs for port info and P_Key table sets, so we can 147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events. 148 * synthesize LID change and P_Key change events.
149 */ 149 */
150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) 150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
151 u16 prev_lid)
151{ 152{
152 struct ib_event event; 153 struct ib_event event;
153 154
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
157 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 158 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
158 struct ib_port_info *pinfo = 159 struct ib_port_info *pinfo =
159 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 160 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
161 u16 lid = be16_to_cpu(pinfo->lid);
160 162
161 update_sm_ah(to_mdev(ibdev), port_num, 163 update_sm_ah(to_mdev(ibdev), port_num,
162 be16_to_cpu(pinfo->sm_lid), 164 be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 167 event.device = ibdev;
166 event.element.port_num = port_num; 168 event.element.port_num = port_num;
167 169
168 if (pinfo->clientrereg_resv_subnetto & 0x80) 170 if (pinfo->clientrereg_resv_subnetto & 0x80) {
169 event.event = IB_EVENT_CLIENT_REREGISTER; 171 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 172 ib_dispatch_event(&event);
171 event.event = IB_EVENT_LID_CHANGE; 173 }
172 174
173 ib_dispatch_event(&event); 175 if (prev_lid != lid) {
176 event.event = IB_EVENT_LID_CHANGE;
177 ib_dispatch_event(&event);
178 }
174 } 179 }
175 180
176 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 181 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
228 struct ib_wc *in_wc, struct ib_grh *in_grh, 233 struct ib_wc *in_wc, struct ib_grh *in_grh,
229 struct ib_mad *in_mad, struct ib_mad *out_mad) 234 struct ib_mad *in_mad, struct ib_mad *out_mad)
230{ 235{
231 u16 slid; 236 u16 slid, prev_lid = 0;
232 int err; 237 int err;
238 struct ib_port_attr pattr;
233 239
234 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 240 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
235 241
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
263 } else 269 } else
264 return IB_MAD_RESULT_SUCCESS; 270 return IB_MAD_RESULT_SUCCESS;
265 271
272 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
273 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
274 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
275 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
276 !ib_query_port(ibdev, port_num, &pattr))
277 prev_lid = pattr.lid;
278
266 err = mlx4_MAD_IFC(to_mdev(ibdev), 279 err = mlx4_MAD_IFC(to_mdev(ibdev),
267 mad_flags & IB_MAD_IGNORE_MKEY, 280 mad_flags & IB_MAD_IGNORE_MKEY,
268 mad_flags & IB_MAD_IGNORE_BKEY, 281 mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
271 return IB_MAD_RESULT_FAILURE; 284 return IB_MAD_RESULT_FAILURE;
272 285
273 if (!out_mad->mad_hdr.status) { 286 if (!out_mad->mad_hdr.status) {
274 smp_snoop(ibdev, port_num, in_mad); 287 smp_snoop(ibdev, port_num, in_mad, prev_lid);
275 node_desc_override(ibdev, out_mad); 288 node_desc_override(ibdev, out_mad);
276 } 289 }
277 290
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 61588bd273bd..2ccb9d31771f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
699 struct mlx4_ib_dev *ibdev = ibdev_ptr; 699 struct mlx4_ib_dev *ibdev = ibdev_ptr;
700 int p; 700 int p;
701 701
702 mlx4_ib_mad_cleanup(ibdev);
703 ib_unregister_device(&ibdev->ib_dev);
704
702 for (p = 1; p <= ibdev->num_ports; ++p) 705 for (p = 1; p <= ibdev->num_ports; ++p)
703 mlx4_CLOSE_PORT(dev, p); 706 mlx4_CLOSE_PORT(dev, p);
704 707
705 mlx4_ib_mad_cleanup(ibdev);
706 ib_unregister_device(&ibdev->ib_dev);
707 iounmap(ibdev->uar_map); 708 iounmap(ibdev->uar_map);
708 mlx4_uar_free(dev, &ibdev->priv_uar); 709 mlx4_uar_free(dev, &ibdev->priv_uar);
709 mlx4_pd_free(dev, ibdev->priv_pdn); 710 mlx4_pd_free(dev, ibdev->priv_pdn);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a91cb4c3fa5c..f385a24d31d2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,17 +71,17 @@ enum {
71}; 71};
72 72
73static const __be32 mlx4_ib_opcode[] = { 73static const __be32 mlx4_ib_opcode[] = {
74 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 74 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
75 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 75 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
76 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 76 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
77 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 77 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
78 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 78 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
79 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 79 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
80 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 80 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
81 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 81 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
82 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 82 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
83 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 83 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
84 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 84 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
85}; 85};
86 86
87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)