aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c104
1 files changed, 94 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b948b6dd5d5..ee1c577238f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -789,7 +789,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
789 list_del(&ge->list); 789 list_del(&ge->list);
790 kfree(ge); 790 kfree(ge);
791 } else 791 } else
792 printk(KERN_WARNING "could not find mgid entry\n"); 792 pr_warn("could not find mgid entry\n");
793 793
794 mutex_unlock(&mqp->mutex); 794 mutex_unlock(&mqp->mutex);
795 795
@@ -902,7 +902,7 @@ static void update_gids_task(struct work_struct *work)
902 902
903 mailbox = mlx4_alloc_cmd_mailbox(dev); 903 mailbox = mlx4_alloc_cmd_mailbox(dev);
904 if (IS_ERR(mailbox)) { 904 if (IS_ERR(mailbox)) {
905 printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox)); 905 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
906 return; 906 return;
907 } 907 }
908 908
@@ -913,7 +913,7 @@ static void update_gids_task(struct work_struct *work)
913 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 913 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
914 MLX4_CMD_NATIVE); 914 MLX4_CMD_NATIVE);
915 if (err) 915 if (err)
916 printk(KERN_WARNING "set port command failed\n"); 916 pr_warn("set port command failed\n");
917 else { 917 else {
918 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 918 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
919 event.device = &gw->dev->ib_dev; 919 event.device = &gw->dev->ib_dev;
@@ -1076,18 +1076,98 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
1076 return NOTIFY_DONE; 1076 return NOTIFY_DONE;
1077} 1077}
1078 1078
1079static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1080{
1081 char name[32];
1082 int eq_per_port = 0;
1083 int added_eqs = 0;
1084 int total_eqs = 0;
1085 int i, j, eq;
1086
1087 /* Init eq table */
1088 ibdev->eq_table = NULL;
1089 ibdev->eq_added = 0;
1090
1091 /* Legacy mode? */
1092 if (dev->caps.comp_pool == 0)
1093 return;
1094
1095 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1096 dev->caps.num_ports);
1097
1098 /* Init eq table */
1099 added_eqs = 0;
1100 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1101 added_eqs += eq_per_port;
1102
1103 total_eqs = dev->caps.num_comp_vectors + added_eqs;
1104
1105 ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1106 if (!ibdev->eq_table)
1107 return;
1108
1109 ibdev->eq_added = added_eqs;
1110
1111 eq = 0;
1112 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1113 for (j = 0; j < eq_per_port; j++) {
1114 sprintf(name, "mlx4-ib-%d-%d@%s",
1115 i, j, dev->pdev->bus->name);
1116 /* Set IRQ for specific name (per ring) */
1117 if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
1118 /* Use legacy (same as mlx4_en driver) */
1119 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1120 ibdev->eq_table[eq] =
1121 (eq % dev->caps.num_comp_vectors);
1122 }
1123 eq++;
1124 }
1125 }
1126
1127 /* Fill the reset of the vector with legacy EQ */
1128 for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1129 ibdev->eq_table[eq++] = i;
1130
1131 /* Advertise the new number of EQs to clients */
1132 ibdev->ib_dev.num_comp_vectors = total_eqs;
1133}
1134
1135static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1136{
1137 int i;
1138 int total_eqs;
1139
1140 /* Reset the advertised EQ number */
1141 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1142
1143 /* Free only the added eqs */
1144 for (i = 0; i < ibdev->eq_added; i++) {
1145 /* Don't free legacy eqs if used */
1146 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1147 continue;
1148 mlx4_release_eq(dev, ibdev->eq_table[i]);
1149 }
1150
1151 total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
1152 memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
1153 kfree(ibdev->eq_table);
1154
1155 ibdev->eq_table = NULL;
1156 ibdev->eq_added = 0;
1157}
1158
1079static void *mlx4_ib_add(struct mlx4_dev *dev) 1159static void *mlx4_ib_add(struct mlx4_dev *dev)
1080{ 1160{
1081 struct mlx4_ib_dev *ibdev; 1161 struct mlx4_ib_dev *ibdev;
1082 int num_ports = 0; 1162 int num_ports = 0;
1083 int i; 1163 int i, j;
1084 int err; 1164 int err;
1085 struct mlx4_ib_iboe *iboe; 1165 struct mlx4_ib_iboe *iboe;
1086 1166
1087 printk_once(KERN_INFO "%s", mlx4_ib_version); 1167 pr_info_once("%s", mlx4_ib_version);
1088 1168
1089 if (mlx4_is_mfunc(dev)) { 1169 if (mlx4_is_mfunc(dev)) {
1090 printk(KERN_WARNING "IB not yet supported in SRIOV\n"); 1170 pr_warn("IB not yet supported in SRIOV\n");
1091 return NULL; 1171 return NULL;
1092 } 1172 }
1093 1173
@@ -1210,6 +1290,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1210 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 1290 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1211 } 1291 }
1212 1292
1293 mlx4_ib_alloc_eqs(dev, ibdev);
1294
1213 spin_lock_init(&iboe->lock); 1295 spin_lock_init(&iboe->lock);
1214 1296
1215 if (init_node_data(ibdev)) 1297 if (init_node_data(ibdev))
@@ -1241,9 +1323,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1241 goto err_reg; 1323 goto err_reg;
1242 } 1324 }
1243 1325
1244 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { 1326 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
1245 if (device_create_file(&ibdev->ib_dev.dev, 1327 if (device_create_file(&ibdev->ib_dev.dev,
1246 mlx4_class_attributes[i])) 1328 mlx4_class_attributes[j]))
1247 goto err_notif; 1329 goto err_notif;
1248 } 1330 }
1249 1331
@@ -1253,7 +1335,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1253 1335
1254err_notif: 1336err_notif:
1255 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1337 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1256 printk(KERN_WARNING "failure unregistering notifier\n"); 1338 pr_warn("failure unregistering notifier\n");
1257 flush_workqueue(wq); 1339 flush_workqueue(wq);
1258 1340
1259err_reg: 1341err_reg:
@@ -1288,7 +1370,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1288 ib_unregister_device(&ibdev->ib_dev); 1370 ib_unregister_device(&ibdev->ib_dev);
1289 if (ibdev->iboe.nb.notifier_call) { 1371 if (ibdev->iboe.nb.notifier_call) {
1290 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1372 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1291 printk(KERN_WARNING "failure unregistering notifier\n"); 1373 pr_warn("failure unregistering notifier\n");
1292 ibdev->iboe.nb.notifier_call = NULL; 1374 ibdev->iboe.nb.notifier_call = NULL;
1293 } 1375 }
1294 iounmap(ibdev->uar_map); 1376 iounmap(ibdev->uar_map);
@@ -1298,6 +1380,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1298 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) 1380 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
1299 mlx4_CLOSE_PORT(dev, p); 1381 mlx4_CLOSE_PORT(dev, p);
1300 1382
1383 mlx4_ib_free_eqs(dev, ibdev);
1384
1301 mlx4_uar_free(dev, &ibdev->priv_uar); 1385 mlx4_uar_free(dev, &ibdev->priv_uar);
1302 mlx4_pd_free(dev, ibdev->priv_pdn); 1386 mlx4_pd_free(dev, ibdev->priv_pdn);
1303 ib_dealloc_device(&ibdev->ib_dev); 1387 ib_dealloc_device(&ibdev->ib_dev);