aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c118
-rw-r--r--drivers/infiniband/hw/mlx4/main.c29
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h63
-rw-r--r--include/linux/mlx4/device.h99
-rw-r--r--include/linux/mlx4/driver.h3
10 files changed, 249 insertions, 105 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 84786a9fb64f..58c45fb5bd31 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,47 +147,49 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147} 147}
148 148
149/* 149/*
150 * Snoop SM MADs for port info and P_Key table sets, so we can 150 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
151 * synthesize LID change and P_Key change events. 151 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
152 */ 152 */
153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, 153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
154 u16 prev_lid) 154 u16 prev_lid)
155{ 155{
156 struct ib_event event; 156 struct ib_port_info *pinfo;
157 u16 lid;
157 158
159 struct mlx4_ib_dev *dev = to_mdev(ibdev);
158 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 160 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
159 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 161 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
160 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 162 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
161 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 163 switch (mad->mad_hdr.attr_id) {
162 struct ib_port_info *pinfo = 164 case IB_SMP_ATTR_PORT_INFO:
163 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 165 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
164 u16 lid = be16_to_cpu(pinfo->lid); 166 lid = be16_to_cpu(pinfo->lid);
165 167
166 update_sm_ah(to_mdev(ibdev), port_num, 168 update_sm_ah(dev, port_num,
167 be16_to_cpu(pinfo->sm_lid), 169 be16_to_cpu(pinfo->sm_lid),
168 pinfo->neighbormtu_mastersmsl & 0xf); 170 pinfo->neighbormtu_mastersmsl & 0xf);
169 171
170 event.device = ibdev; 172 if (pinfo->clientrereg_resv_subnetto & 0x80)
171 event.element.port_num = port_num; 173 mlx4_ib_dispatch_event(dev, port_num,
174 IB_EVENT_CLIENT_REREGISTER);
172 175
173 if (pinfo->clientrereg_resv_subnetto & 0x80) { 176 if (prev_lid != lid)
174 event.event = IB_EVENT_CLIENT_REREGISTER; 177 mlx4_ib_dispatch_event(dev, port_num,
175 ib_dispatch_event(&event); 178 IB_EVENT_LID_CHANGE);
176 } 179 break;
177 180
178 if (prev_lid != lid) { 181 case IB_SMP_ATTR_PKEY_TABLE:
179 event.event = IB_EVENT_LID_CHANGE; 182 mlx4_ib_dispatch_event(dev, port_num,
180 ib_dispatch_event(&event); 183 IB_EVENT_PKEY_CHANGE);
181 } 184 break;
182 }
183 185
184 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 186 case IB_SMP_ATTR_GUID_INFO:
185 event.device = ibdev; 187 mlx4_ib_dispatch_event(dev, port_num,
186 event.event = IB_EVENT_PKEY_CHANGE; 188 IB_EVENT_GID_CHANGE);
187 event.element.port_num = port_num; 189 break;
188 ib_dispatch_event(&event); 190 default:
191 break;
189 } 192 }
190 }
191} 193}
192 194
193static void node_desc_override(struct ib_device *dev, 195static void node_desc_override(struct ib_device *dev,
@@ -305,7 +307,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
305 return IB_MAD_RESULT_FAILURE; 307 return IB_MAD_RESULT_FAILURE;
306 308
307 if (!out_mad->mad_hdr.status) { 309 if (!out_mad->mad_hdr.status) {
308 smp_snoop(ibdev, port_num, in_mad, prev_lid); 310 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
311 smp_snoop(ibdev, port_num, in_mad, prev_lid);
309 node_desc_override(ibdev, out_mad); 312 node_desc_override(ibdev, out_mad);
310 } 313 }
311 314
@@ -446,3 +449,62 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
446 ib_destroy_ah(dev->sm_ah[p]); 449 ib_destroy_ah(dev->sm_ah[p]);
447 } 450 }
448} 451}
452
453void handle_port_mgmt_change_event(struct work_struct *work)
454{
455 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
456 struct mlx4_ib_dev *dev = ew->ib_dev;
457 struct mlx4_eqe *eqe = &(ew->ib_eqe);
458 u8 port = eqe->event.port_mgmt_change.port;
459 u32 changed_attr;
460
461 switch (eqe->subtype) {
462 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
463 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
464
465 /* Update the SM ah - This should be done before handling
466 the other changed attributes so that MADs can be sent to the SM */
467 if (changed_attr & MSTR_SM_CHANGE_MASK) {
468 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
469 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
470 update_sm_ah(dev, port, lid, sl);
471 }
472
473 /* Check if it is a lid change event */
474 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
475 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
476
477 /* Generate GUID changed event */
478 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
479 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
480
481 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
482 mlx4_ib_dispatch_event(dev, port,
483 IB_EVENT_CLIENT_REREGISTER);
484 break;
485
486 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
487 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
488 break;
489 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
490 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
491 break;
492 default:
493 pr_warn("Unsupported subtype 0x%x for "
494 "Port Management Change event\n", eqe->subtype);
495 }
496
497 kfree(ew);
498}
499
500void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
501 enum ib_event_type type)
502{
503 struct ib_event event;
504
505 event.device = &dev->ib_dev;
506 event.element.port_num = port_num;
507 event.event = type;
508
509 ib_dispatch_event(&event);
510}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 5266b49c46ee..4f230c26622d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -898,7 +898,6 @@ static void update_gids_task(struct work_struct *work)
898 union ib_gid *gids; 898 union ib_gid *gids;
899 int err; 899 int err;
900 struct mlx4_dev *dev = gw->dev->dev; 900 struct mlx4_dev *dev = gw->dev->dev;
901 struct ib_event event;
902 901
903 mailbox = mlx4_alloc_cmd_mailbox(dev); 902 mailbox = mlx4_alloc_cmd_mailbox(dev);
904 if (IS_ERR(mailbox)) { 903 if (IS_ERR(mailbox)) {
@@ -916,10 +915,7 @@ static void update_gids_task(struct work_struct *work)
916 pr_warn("set port command failed\n"); 915 pr_warn("set port command failed\n");
917 else { 916 else {
918 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 917 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
919 event.device = &gw->dev->ib_dev; 918 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
920 event.element.port_num = gw->port;
921 event.event = IB_EVENT_GID_CHANGE;
922 ib_dispatch_event(&event);
923 } 919 }
924 920
925 mlx4_free_cmd_mailbox(dev, mailbox); 921 mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1383,10 +1379,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1383} 1379}
1384 1380
1385static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1381static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1386 enum mlx4_dev_event event, int port) 1382 enum mlx4_dev_event event, unsigned long param)
1387{ 1383{
1388 struct ib_event ibev; 1384 struct ib_event ibev;
1389 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 1385 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1386 struct mlx4_eqe *eqe = NULL;
1387 struct ib_event_work *ew;
1388 int port = 0;
1389
1390 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
1391 eqe = (struct mlx4_eqe *)param;
1392 else
1393 port = (u8)param;
1390 1394
1391 if (port > ibdev->num_ports) 1395 if (port > ibdev->num_ports)
1392 return; 1396 return;
@@ -1405,6 +1409,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1405 ibev.event = IB_EVENT_DEVICE_FATAL; 1409 ibev.event = IB_EVENT_DEVICE_FATAL;
1406 break; 1410 break;
1407 1411
1412 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
1413 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
1414 if (!ew) {
1415 pr_err("failed to allocate memory for events work\n");
1416 break;
1417 }
1418
1419 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
1420 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
1421 ew->ib_dev = ibdev;
1422 handle_port_mgmt_change_event(&ew->work);
1423 return;
1424
1408 default: 1425 default:
1409 return; 1426 return;
1410 } 1427 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 5f298afaa81f..23bfbf9ee0e0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -224,6 +224,12 @@ struct mlx4_ib_dev {
224 int eq_added; 224 int eq_added;
225}; 225};
226 226
227struct ib_event_work {
228 struct work_struct work;
229 struct mlx4_ib_dev *ib_dev;
230 struct mlx4_eqe ib_eqe;
231};
232
227static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 233static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
228{ 234{
229 return container_of(ibdev, struct mlx4_ib_dev, ib_dev); 235 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -381,4 +387,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
381int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 387int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
382 union ib_gid *gid); 388 union ib_gid *gid);
383 389
390void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
391 enum ib_event_type type);
392
384#endif /* MLX4_IB_H */ 393#endif /* MLX4_IB_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 69ba57270481..a52922ed85c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
131} 131}
132 132
133static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 133static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
134 enum mlx4_dev_event event, int port) 134 enum mlx4_dev_event event, unsigned long port)
135{ 135{
136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
137 struct mlx4_en_priv *priv; 137 struct mlx4_en_priv *priv;
@@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
156 if (port < 1 || port > dev->caps.num_ports || 156 if (port < 1 || port > dev->caps.num_ports ||
157 !mdev->pndev[port]) 157 !mdev->pndev[port])
158 return; 158 return;
159 mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); 159 mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
160 (int) port);
160 } 161 }
161} 162}
162 163
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index bce98d9c0039..9b15d0219950 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -82,6 +82,15 @@ enum {
82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
83 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 83 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
84 84
85static u64 get_async_ev_mask(struct mlx4_dev *dev)
86{
87 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
88 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
89 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
90
91 return async_ev_mask;
92}
93
85static void eq_set_ci(struct mlx4_eq *eq, int req_not) 94static void eq_set_ci(struct mlx4_eq *eq, int req_not)
86{ 95{
87 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 96 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
@@ -473,6 +482,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
473 482
474 break; 483 break;
475 484
485 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
486 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
487 (unsigned long) eqe);
488 break;
489
476 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 490 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
477 case MLX4_EVENT_TYPE_ECC_DETECT: 491 case MLX4_EVENT_TYPE_ECC_DETECT:
478 default: 492 default:
@@ -956,7 +970,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
956 priv->eq_table.have_irq = 1; 970 priv->eq_table.have_irq = 1;
957 } 971 }
958 972
959 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 973 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
960 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 974 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
961 if (err) 975 if (err)
962 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 976 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
@@ -996,7 +1010,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
996 struct mlx4_priv *priv = mlx4_priv(dev); 1010 struct mlx4_priv *priv = mlx4_priv(dev);
997 int i; 1011 int i;
998 1012
999 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, 1013 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1000 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1014 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1001 1015
1002 mlx4_free_irqs(dev); 1016 mlx4_free_irqs(dev);
@@ -1040,7 +1054,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
1040 mlx4_cmd_use_polling(dev); 1054 mlx4_cmd_use_polling(dev);
1041 1055
1042 /* Map the new eq to handle all asyncronous events */ 1056 /* Map the new eq to handle all asyncronous events */
1043 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 1057 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1044 priv->eq_table.eq[i].eqn); 1058 priv->eq_table.eq[i].eqn);
1045 if (err) { 1059 if (err) {
1046 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 1060 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
@@ -1054,7 +1068,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
1054 } 1068 }
1055 1069
1056 /* Return to default */ 1070 /* Return to default */
1057 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 1071 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1058 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1072 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1059 return err; 1073 return err;
1060} 1074}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 4281ce09add8..ee9d6b0b4d20 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
109 [41] = "Unicast VEP steering support", 109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support", 111 [48] = "Counters support",
112 [59] = "Port management change event support",
112 }; 113 };
113 int i; 114 int i;
114 115
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index b4e9f6f5cc04..116895ac8b35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
115} 115}
116EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 116EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
117 117
118void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) 118void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
119 unsigned long param)
119{ 120{
120 struct mlx4_priv *priv = mlx4_priv(dev); 121 struct mlx4_priv *priv = mlx4_priv(dev);
121 struct mlx4_device_context *dev_ctx; 122 struct mlx4_device_context *dev_ctx;
@@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por
125 126
126 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 127 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
127 if (dev_ctx->intf->event) 128 if (dev_ctx->intf->event)
128 dev_ctx->intf->event(dev, dev_ctx->context, type, port); 129 dev_ctx->intf->event(dev, dev_ctx->context, type, param);
129 130
130 spin_unlock_irqrestore(&priv->ctx_lock, flags); 131 spin_unlock_irqrestore(&priv->ctx_lock, flags);
131} 132}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d20220762c..4d11d12b9db4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -338,66 +338,6 @@ struct mlx4_srq_context {
338 __be64 db_rec_addr; 338 __be64 db_rec_addr;
339}; 339};
340 340
341struct mlx4_eqe {
342 u8 reserved1;
343 u8 type;
344 u8 reserved2;
345 u8 subtype;
346 union {
347 u32 raw[6];
348 struct {
349 __be32 cqn;
350 } __packed comp;
351 struct {
352 u16 reserved1;
353 __be16 token;
354 u32 reserved2;
355 u8 reserved3[3];
356 u8 status;
357 __be64 out_param;
358 } __packed cmd;
359 struct {
360 __be32 qpn;
361 } __packed qp;
362 struct {
363 __be32 srqn;
364 } __packed srq;
365 struct {
366 __be32 cqn;
367 u32 reserved1;
368 u8 reserved2[3];
369 u8 syndrome;
370 } __packed cq_err;
371 struct {
372 u32 reserved1[2];
373 __be32 port;
374 } __packed port_change;
375 struct {
376 #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
377 u32 reserved;
378 u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
379 } __packed comm_channel_arm;
380 struct {
381 u8 port;
382 u8 reserved[3];
383 __be64 mac;
384 } __packed mac_update;
385 struct {
386 u8 port;
387 } __packed sw_event;
388 struct {
389 __be32 slave_id;
390 } __packed flr_event;
391 struct {
392 __be16 current_temperature;
393 __be16 warning_threshold;
394 } __packed warming;
395 } event;
396 u8 slave_id;
397 u8 reserved3[2];
398 u8 owner;
399} __packed;
400
401struct mlx4_eq { 341struct mlx4_eq {
402 struct mlx4_dev *dev; 342 struct mlx4_dev *dev;
403 void __iomem *doorbell; 343 void __iomem *doorbell;
@@ -887,7 +827,8 @@ void mlx4_catas_init(void);
887int mlx4_restart_one(struct pci_dev *pdev); 827int mlx4_restart_one(struct pci_dev *pdev);
888int mlx4_register_device(struct mlx4_dev *dev); 828int mlx4_register_device(struct mlx4_dev *dev);
889void mlx4_unregister_device(struct mlx4_dev *dev); 829void mlx4_unregister_device(struct mlx4_dev *dev);
890void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); 830void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
831 unsigned long param);
891 832
892struct mlx4_dev_cap; 833struct mlx4_dev_cap;
893struct mlx4_init_hca_param; 834struct mlx4_init_hca_param;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8eadf0f14cc5..560b2201519f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -96,7 +96,8 @@ enum {
96 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 96 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
97 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 97 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
98 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, 98 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
99 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 99 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
100 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
100}; 101};
101 102
102enum { 103enum {
@@ -138,6 +139,7 @@ enum mlx4_event {
138 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, 139 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
139 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, 140 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
140 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, 141 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
142 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
141 MLX4_EVENT_TYPE_NONE = 0xff, 143 MLX4_EVENT_TYPE_NONE = 0xff,
142}; 144};
143 145
@@ -235,6 +237,24 @@ enum {
235 MLX4_MAX_FAST_REG_PAGES = 511, 237 MLX4_MAX_FAST_REG_PAGES = 511,
236}; 238};
237 239
240enum {
241 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
242 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
243 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
244};
245
246/* Port mgmt change event handling */
247enum {
248 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
249 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
250 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
251 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
252 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
253};
254
255#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
256 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
257
238static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 258static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
239{ 259{
240 return (major << 32) | (minor << 16) | subminor; 260 return (major << 32) | (minor << 16) | subminor;
@@ -511,6 +531,81 @@ struct mlx4_dev {
511 int num_vfs; 531 int num_vfs;
512}; 532};
513 533
534struct mlx4_eqe {
535 u8 reserved1;
536 u8 type;
537 u8 reserved2;
538 u8 subtype;
539 union {
540 u32 raw[6];
541 struct {
542 __be32 cqn;
543 } __packed comp;
544 struct {
545 u16 reserved1;
546 __be16 token;
547 u32 reserved2;
548 u8 reserved3[3];
549 u8 status;
550 __be64 out_param;
551 } __packed cmd;
552 struct {
553 __be32 qpn;
554 } __packed qp;
555 struct {
556 __be32 srqn;
557 } __packed srq;
558 struct {
559 __be32 cqn;
560 u32 reserved1;
561 u8 reserved2[3];
562 u8 syndrome;
563 } __packed cq_err;
564 struct {
565 u32 reserved1[2];
566 __be32 port;
567 } __packed port_change;
568 struct {
569 #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
570 u32 reserved;
571 u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
572 } __packed comm_channel_arm;
573 struct {
574 u8 port;
575 u8 reserved[3];
576 __be64 mac;
577 } __packed mac_update;
578 struct {
579 __be32 slave_id;
580 } __packed flr_event;
581 struct {
582 __be16 current_temperature;
583 __be16 warning_threshold;
584 } __packed warming;
585 struct {
586 u8 reserved[3];
587 u8 port;
588 union {
589 struct {
590 __be16 mstr_sm_lid;
591 __be16 port_lid;
592 __be32 changed_attr;
593 u8 reserved[3];
594 u8 mstr_sm_sl;
595 __be64 gid_prefix;
596 } __packed port_info;
597 struct {
598 __be32 block_ptr;
599 __be32 tbl_entries_mask;
600 } __packed tbl_change_info;
601 } params;
602 } __packed port_mgmt_change;
603 } event;
604 u8 slave_id;
605 u8 reserved3[2];
606 u8 owner;
607} __packed;
608
514struct mlx4_init_port_param { 609struct mlx4_init_port_param {
515 int set_guid0; 610 int set_guid0;
516 int set_node_guid; 611 int set_node_guid;
@@ -536,6 +631,8 @@ struct mlx4_init_port_param {
536 631
537#define MLX4_INVALID_SLAVE_ID 0xFF 632#define MLX4_INVALID_SLAVE_ID 0xFF
538 633
634void handle_port_mgmt_change_event(struct work_struct *work);
635
539static inline int mlx4_is_master(struct mlx4_dev *dev) 636static inline int mlx4_is_master(struct mlx4_dev *dev)
540{ 637{
541 return dev->flags & MLX4_FLAG_MASTER; 638 return dev->flags & MLX4_FLAG_MASTER;
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 5f1298b1b5ef..0f509229fb3d 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -42,13 +42,14 @@ enum mlx4_dev_event {
42 MLX4_DEV_EVENT_PORT_UP, 42 MLX4_DEV_EVENT_PORT_UP,
43 MLX4_DEV_EVENT_PORT_DOWN, 43 MLX4_DEV_EVENT_PORT_DOWN,
44 MLX4_DEV_EVENT_PORT_REINIT, 44 MLX4_DEV_EVENT_PORT_REINIT,
45 MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
45}; 46};
46 47
47struct mlx4_interface { 48struct mlx4_interface {
48 void * (*add) (struct mlx4_dev *dev); 49 void * (*add) (struct mlx4_dev *dev);
49 void (*remove)(struct mlx4_dev *dev, void *context); 50 void (*remove)(struct mlx4_dev *dev, void *context);
50 void (*event) (struct mlx4_dev *dev, void *context, 51 void (*event) (struct mlx4_dev *dev, void *context,
51 enum mlx4_dev_event event, int port); 52 enum mlx4_dev_event event, unsigned long param);
52 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 53 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
53 struct list_head list; 54 struct list_head list;
54 enum mlx4_protocol protocol; 55 enum mlx4_protocol protocol;