diff options
-rw-r--r-- | drivers/infiniband/hw/mlx4/cm.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 5 |
2 files changed, 16 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 8c79a480f2b7..ecd6cadd529a 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c | |||
@@ -168,20 +168,17 @@ static void id_map_ent_timeout(struct work_struct *work) | |||
168 | { | 168 | { |
169 | struct delayed_work *delay = to_delayed_work(work); | 169 | struct delayed_work *delay = to_delayed_work(work); |
170 | struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); | 170 | struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); |
171 | struct id_map_entry *db_ent, *found_ent; | 171 | struct id_map_entry *found_ent; |
172 | struct mlx4_ib_dev *dev = ent->dev; | 172 | struct mlx4_ib_dev *dev = ent->dev; |
173 | struct mlx4_ib_sriov *sriov = &dev->sriov; | 173 | struct mlx4_ib_sriov *sriov = &dev->sriov; |
174 | struct rb_root *sl_id_map = &sriov->sl_id_map; | 174 | struct rb_root *sl_id_map = &sriov->sl_id_map; |
175 | int pv_id = (int) ent->pv_cm_id; | ||
176 | 175 | ||
177 | spin_lock(&sriov->id_map_lock); | 176 | spin_lock(&sriov->id_map_lock); |
178 | db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); | 177 | if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) |
179 | if (!db_ent) | ||
180 | goto out; | 178 | goto out; |
181 | found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); | 179 | found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); |
182 | if (found_ent && found_ent == ent) | 180 | if (found_ent && found_ent == ent) |
183 | rb_erase(&found_ent->node, sl_id_map); | 181 | rb_erase(&found_ent->node, sl_id_map); |
184 | idr_remove(&sriov->pv_id_table, pv_id); | ||
185 | 182 | ||
186 | out: | 183 | out: |
187 | list_del(&ent->list); | 184 | list_del(&ent->list); |
@@ -196,13 +193,12 @@ static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) | |||
196 | struct id_map_entry *ent, *found_ent; | 193 | struct id_map_entry *ent, *found_ent; |
197 | 194 | ||
198 | spin_lock(&sriov->id_map_lock); | 195 | spin_lock(&sriov->id_map_lock); |
199 | ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); | 196 | ent = xa_erase(&sriov->pv_id_table, pv_cm_id); |
200 | if (!ent) | 197 | if (!ent) |
201 | goto out; | 198 | goto out; |
202 | found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); | 199 | found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); |
203 | if (found_ent && found_ent == ent) | 200 | if (found_ent && found_ent == ent) |
204 | rb_erase(&found_ent->node, sl_id_map); | 201 | rb_erase(&found_ent->node, sl_id_map); |
205 | idr_remove(&sriov->pv_id_table, pv_cm_id); | ||
206 | out: | 202 | out: |
207 | spin_unlock(&sriov->id_map_lock); | 203 | spin_unlock(&sriov->id_map_lock); |
208 | } | 204 | } |
@@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) | |||
256 | ent->dev = to_mdev(ibdev); | 252 | ent->dev = to_mdev(ibdev); |
257 | INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); | 253 | INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); |
258 | 254 | ||
259 | idr_preload(GFP_KERNEL); | 255 | ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, |
260 | spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); | 256 | xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL); |
261 | |||
262 | ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); | ||
263 | if (ret >= 0) { | 257 | if (ret >= 0) { |
264 | ent->pv_cm_id = (u32)ret; | 258 | spin_lock(&sriov->id_map_lock); |
265 | sl_id_map_add(ibdev, ent); | 259 | sl_id_map_add(ibdev, ent); |
266 | list_add_tail(&ent->list, &sriov->cm_list); | 260 | list_add_tail(&ent->list, &sriov->cm_list); |
267 | } | 261 | spin_unlock(&sriov->id_map_lock); |
268 | |||
269 | spin_unlock(&sriov->id_map_lock); | ||
270 | idr_preload_end(); | ||
271 | |||
272 | if (ret >= 0) | ||
273 | return ent; | 262 | return ent; |
263 | } | ||
274 | 264 | ||
275 | /*error flow*/ | 265 | /*error flow*/ |
276 | kfree(ent); | 266 | kfree(ent); |
277 | mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); | 267 | mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret); |
278 | return ERR_PTR(-ENOMEM); | 268 | return ERR_PTR(-ENOMEM); |
279 | } | 269 | } |
280 | 270 | ||
@@ -290,7 +280,7 @@ id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id) | |||
290 | if (ent) | 280 | if (ent) |
291 | *pv_cm_id = (int) ent->pv_cm_id; | 281 | *pv_cm_id = (int) ent->pv_cm_id; |
292 | } else | 282 | } else |
293 | ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); | 283 | ent = xa_load(&sriov->pv_id_table, *pv_cm_id); |
294 | spin_unlock(&sriov->id_map_lock); | 284 | spin_unlock(&sriov->id_map_lock); |
295 | 285 | ||
296 | return ent; | 286 | return ent; |
@@ -407,7 +397,7 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) | |||
407 | spin_lock_init(&dev->sriov.id_map_lock); | 397 | spin_lock_init(&dev->sriov.id_map_lock); |
408 | INIT_LIST_HEAD(&dev->sriov.cm_list); | 398 | INIT_LIST_HEAD(&dev->sriov.cm_list); |
409 | dev->sriov.sl_id_map = RB_ROOT; | 399 | dev->sriov.sl_id_map = RB_ROOT; |
410 | idr_init(&dev->sriov.pv_id_table); | 400 | xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC); |
411 | } | 401 | } |
412 | 402 | ||
413 | /* slave = -1 ==> all slaves */ | 403 | /* slave = -1 ==> all slaves */ |
@@ -444,7 +434,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) | |||
444 | struct id_map_entry, node); | 434 | struct id_map_entry, node); |
445 | 435 | ||
446 | rb_erase(&ent->node, sl_id_map); | 436 | rb_erase(&ent->node, sl_id_map); |
447 | idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); | 437 | xa_erase(&sriov->pv_id_table, ent->pv_cm_id); |
448 | } | 438 | } |
449 | list_splice_init(&dev->sriov.cm_list, &lh); | 439 | list_splice_init(&dev->sriov.cm_list, &lh); |
450 | } else { | 440 | } else { |
@@ -460,7 +450,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) | |||
460 | /* remove those nodes from databases */ | 450 | /* remove those nodes from databases */ |
461 | list_for_each_entry_safe(map, tmp_map, &lh, list) { | 451 | list_for_each_entry_safe(map, tmp_map, &lh, list) { |
462 | rb_erase(&map->node, sl_id_map); | 452 | rb_erase(&map->node, sl_id_map); |
463 | idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); | 453 | xa_erase(&sriov->pv_id_table, map->pv_cm_id); |
464 | } | 454 | } |
465 | 455 | ||
466 | /* add remaining nodes from cm_list */ | 456 | /* add remaining nodes from cm_list */ |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 60dc1347c5ab..24633fc29a29 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -492,10 +492,11 @@ struct mlx4_ib_sriov { | |||
492 | struct mlx4_sriov_alias_guid alias_guid; | 492 | struct mlx4_sriov_alias_guid alias_guid; |
493 | 493 | ||
494 | /* CM paravirtualization fields */ | 494 | /* CM paravirtualization fields */ |
495 | struct list_head cm_list; | 495 | struct xarray pv_id_table; |
496 | u32 pv_id_next; | ||
496 | spinlock_t id_map_lock; | 497 | spinlock_t id_map_lock; |
497 | struct rb_root sl_id_map; | 498 | struct rb_root sl_id_map; |
498 | struct idr pv_id_table; | 499 | struct list_head cm_list; |
499 | }; | 500 | }; |
500 | 501 | ||
501 | struct gid_cache_context { | 502 | struct gid_cache_context { |