aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c162
1 files changed, 155 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index b8cb25ebce5..591c2891159 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -54,6 +54,15 @@ enum {
54#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1) 54#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
55#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3) 55#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
56 56
57 /* Port mgmt change event handling */
58
59#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
60#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
61#define NUM_IDX_IN_PKEY_TBL_BLK 32
62#define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
63#define GUID_TBL_BLK_NUM_ENTRIES 8
64#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
65
57struct mlx4_mad_rcv_buf { 66struct mlx4_mad_rcv_buf {
58 struct ib_grh grh; 67 struct ib_grh grh;
59 u8 payload[256]; 68 u8 payload[256];
@@ -76,6 +85,9 @@ struct mlx4_rcv_tunnel_mad {
76} __packed; 85} __packed;
77 86
78static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num); 87static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
88static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
89static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
90 int block, u32 change_bitmap);
79 91
80__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) 92__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
81{ 93{
@@ -220,8 +232,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
220 handle_client_rereg_event(dev, port_num); 232 handle_client_rereg_event(dev, port_num);
221 233
222 if (prev_lid != lid) 234 if (prev_lid != lid)
223 mlx4_ib_dispatch_event(dev, port_num, 235 handle_lid_change_event(dev, port_num);
224 IB_EVENT_LID_CHANGE);
225 break; 236 break;
226 237
227 case IB_SMP_ATTR_PKEY_TABLE: 238 case IB_SMP_ATTR_PKEY_TABLE:
@@ -231,6 +242,9 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
231 break; 242 break;
232 } 243 }
233 244
245 /* at this point, we are running in the master.
246 * Slaves do not receive SMPs.
247 */
234 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; 248 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
235 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); 249 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
236 pkey_change_bitmap = 0; 250 pkey_change_bitmap = 0;
@@ -248,10 +262,13 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
248 "block=0x%x, change_bitmap=0x%x\n", 262 "block=0x%x, change_bitmap=0x%x\n",
249 port_num, bn, pkey_change_bitmap); 263 port_num, bn, pkey_change_bitmap);
250 264
251 if (pkey_change_bitmap) 265 if (pkey_change_bitmap) {
252 mlx4_ib_dispatch_event(dev, port_num, 266 mlx4_ib_dispatch_event(dev, port_num,
253 IB_EVENT_PKEY_CHANGE); 267 IB_EVENT_PKEY_CHANGE);
254 268 if (!dev->sriov.is_going_down)
269 __propagate_pkey_ev(dev, port_num, bn,
270 pkey_change_bitmap);
271 }
255 break; 272 break;
256 273
257 case IB_SMP_ATTR_GUID_INFO: 274 case IB_SMP_ATTR_GUID_INFO:
@@ -259,12 +276,56 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
259 if (!mlx4_is_master(dev->dev)) 276 if (!mlx4_is_master(dev->dev))
260 mlx4_ib_dispatch_event(dev, port_num, 277 mlx4_ib_dispatch_event(dev, port_num,
261 IB_EVENT_GID_CHANGE); 278 IB_EVENT_GID_CHANGE);
279 /*if master, notify relevant slaves*/
280 if (mlx4_is_master(dev->dev) &&
281 !dev->sriov.is_going_down) {
282 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
283 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
284 (u8 *)(&((struct ib_smp *)mad)->data));
285 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
286 (u8 *)(&((struct ib_smp *)mad)->data));
287 }
262 break; 288 break;
289
263 default: 290 default:
264 break; 291 break;
265 } 292 }
266} 293}
267 294
295static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
296 int block, u32 change_bitmap)
297{
298 int i, ix, slave, err;
299 int have_event = 0;
300
301 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
302 if (slave == mlx4_master_func_num(dev->dev))
303 continue;
304 if (!mlx4_is_slave_active(dev->dev, slave))
305 continue;
306
307 have_event = 0;
308 for (i = 0; i < 32; i++) {
309 if (!(change_bitmap & (1 << i)))
310 continue;
311 for (ix = 0;
312 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
313 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
314 [ix] == i + 32 * block) {
315 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
316 pr_debug("propagate_pkey_ev: slave %d,"
317 " port %d, ix %d (%d)\n",
318 slave, port_num, ix, err);
319 have_event = 1;
320 break;
321 }
322 }
323 if (have_event)
324 break;
325 }
326 }
327}
328
268static void node_desc_override(struct ib_device *dev, 329static void node_desc_override(struct ib_device *dev,
269 struct ib_mad *mad) 330 struct ib_mad *mad)
270{ 331{
@@ -789,18 +850,90 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
789 } 850 }
790} 851}
791 852
853static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
854{
855 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
856
857 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
858 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
859 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
860}
861
792static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) 862static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
793{ 863{
794 /* re-configure the alias-guid and mcg's */ 864 /* re-configure the alias-guid and mcg's */
795 if (mlx4_is_master(dev->dev)) { 865 if (mlx4_is_master(dev->dev)) {
796 mlx4_ib_invalidate_all_guid_record(dev, port_num); 866 mlx4_ib_invalidate_all_guid_record(dev, port_num);
797 867
798 if (!dev->sriov.is_going_down) 868 if (!dev->sriov.is_going_down) {
799 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); 869 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
870 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
871 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
872 }
800 } 873 }
801 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER); 874 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
802} 875}
803 876
877static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
878 struct mlx4_eqe *eqe)
879{
880 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
881 GET_MASK_FROM_EQE(eqe));
882}
883
884static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
885 u32 guid_tbl_blk_num, u32 change_bitmap)
886{
887 struct ib_smp *in_mad = NULL;
888 struct ib_smp *out_mad = NULL;
889 u16 i;
890
891 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
892 return;
893
894 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
895 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
896 if (!in_mad || !out_mad) {
897 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
898 goto out;
899 }
900
901 guid_tbl_blk_num *= 4;
902
903 for (i = 0; i < 4; i++) {
904 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
905 continue;
906 memset(in_mad, 0, sizeof *in_mad);
907 memset(out_mad, 0, sizeof *out_mad);
908
909 in_mad->base_version = 1;
910 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
911 in_mad->class_version = 1;
912 in_mad->method = IB_MGMT_METHOD_GET;
913 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
914 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
915
916 if (mlx4_MAD_IFC(dev,
917 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
918 port_num, NULL, NULL, in_mad, out_mad)) {
919 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
920 goto out;
921 }
922
923 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
924 port_num,
925 (u8 *)(&((struct ib_smp *)out_mad)->data));
926 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
927 port_num,
928 (u8 *)(&((struct ib_smp *)out_mad)->data));
929 }
930
931out:
932 kfree(in_mad);
933 kfree(out_mad);
934 return;
935}
936
804void handle_port_mgmt_change_event(struct work_struct *work) 937void handle_port_mgmt_change_event(struct work_struct *work)
805{ 938{
806 struct ib_event_work *ew = container_of(work, struct ib_event_work, work); 939 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
@@ -808,6 +941,8 @@ void handle_port_mgmt_change_event(struct work_struct *work)
808 struct mlx4_eqe *eqe = &(ew->ib_eqe); 941 struct mlx4_eqe *eqe = &(ew->ib_eqe);
809 u8 port = eqe->event.port_mgmt_change.port; 942 u8 port = eqe->event.port_mgmt_change.port;
810 u32 changed_attr; 943 u32 changed_attr;
944 u32 tbl_block;
945 u32 change_bitmap;
811 946
812 switch (eqe->subtype) { 947 switch (eqe->subtype) {
813 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: 948 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
@@ -823,11 +958,16 @@ void handle_port_mgmt_change_event(struct work_struct *work)
823 958
824 /* Check if it is a lid change event */ 959 /* Check if it is a lid change event */
825 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) 960 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
826 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); 961 handle_lid_change_event(dev, port);
827 962
828 /* Generate GUID changed event */ 963 /* Generate GUID changed event */
829 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) 964 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
830 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 965 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
966 /*if master, notify all slaves*/
967 if (mlx4_is_master(dev->dev))
968 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
969 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
970 }
831 971
832 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) 972 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
833 handle_client_rereg_event(dev, port); 973 handle_client_rereg_event(dev, port);
@@ -835,11 +975,19 @@ void handle_port_mgmt_change_event(struct work_struct *work)
835 975
836 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: 976 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
837 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); 977 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
978 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
979 propagate_pkey_ev(dev, port, eqe);
838 break; 980 break;
839 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: 981 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
840 /* paravirtualized master's guid is guid 0 -- does not change */ 982 /* paravirtualized master's guid is guid 0 -- does not change */
841 if (!mlx4_is_master(dev->dev)) 983 if (!mlx4_is_master(dev->dev))
842 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 984 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
985 /*if master, notify relevant slaves*/
986 else if (!dev->sriov.is_going_down) {
987 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
988 change_bitmap = GET_MASK_FROM_EQE(eqe);
989 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
990 }
843 break; 991 break;
844 default: 992 default:
845 pr_warn("Unsupported subtype 0x%x for " 993 pr_warn("Unsupported subtype 0x%x for "