aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatan Barak <matanb@mellanox.com>2014-12-11 03:57:56 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-11 14:47:35 -0500
commit7a89399ffad7b7c47b43afda010309b3b88538c0 (patch)
tree789d8dd281b3589c555c73b5ec455cab646dad32
parentab256e5ad02b36951f01bf6b5cfda25f14820847 (diff)
net/mlx4: Add mlx4_bitmap zone allocator
The zone allocator is a mechanism which manages a few mlx4_bitmaps. When allocating a resource, the user indicates the desired zone of which this resource will be allocated from. If possible, the resource will be allocated from this zone. Otherwise, the resource will be allocated from a less-than, equal-to, higher-than priority zone, according to the desired zone's properties with that respective allocation order. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h69
2 files changed, 451 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 91a8acc191bb..963dd7e6d547 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -149,6 +149,11 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
149 return bitmap->avail; 149 return bitmap->avail;
150} 150}
151 151
152static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
153{
154 return obj & (bitmap->max + bitmap->reserved_top - 1);
155}
156
152void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 157void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
153 int use_rr) 158 int use_rr)
154{ 159{
@@ -178,6 +183,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
178 bitmap->mask = mask; 183 bitmap->mask = mask;
179 bitmap->reserved_top = reserved_top; 184 bitmap->reserved_top = reserved_top;
180 bitmap->avail = num - reserved_top - reserved_bot; 185 bitmap->avail = num - reserved_top - reserved_bot;
186 bitmap->effective_len = bitmap->avail;
181 spin_lock_init(&bitmap->lock); 187 spin_lock_init(&bitmap->lock);
182 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 188 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
183 sizeof (long), GFP_KERNEL); 189 sizeof (long), GFP_KERNEL);
@@ -194,6 +200,382 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
194 kfree(bitmap->table); 200 kfree(bitmap->table);
195} 201}
196 202
203struct mlx4_zone_allocator {
204 struct list_head entries;
205 struct list_head prios;
206 u32 last_uid;
207 u32 mask;
208 /* protect the zone_allocator from concurrent accesses */
209 spinlock_t lock;
210 enum mlx4_zone_alloc_flags flags;
211};
212
213struct mlx4_zone_entry {
214 struct list_head list;
215 struct list_head prio_list;
216 u32 uid;
217 struct mlx4_zone_allocator *allocator;
218 struct mlx4_bitmap *bitmap;
219 int use_rr;
220 int priority;
221 int offset;
222 enum mlx4_zone_flags flags;
223};
224
225struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
226{
227 struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
228
229 if (NULL == zones)
230 return NULL;
231
232 INIT_LIST_HEAD(&zones->entries);
233 INIT_LIST_HEAD(&zones->prios);
234 spin_lock_init(&zones->lock);
235 zones->last_uid = 0;
236 zones->mask = 0;
237 zones->flags = flags;
238
239 return zones;
240}
241
242int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
243 struct mlx4_bitmap *bitmap,
244 u32 flags,
245 int priority,
246 int offset,
247 u32 *puid)
248{
249 u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
250 struct mlx4_zone_entry *it;
251 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
252
253 if (NULL == zone)
254 return -ENOMEM;
255
256 zone->flags = flags;
257 zone->bitmap = bitmap;
258 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
259 zone->priority = priority;
260 zone->offset = offset;
261
262 spin_lock(&zone_alloc->lock);
263
264 zone->uid = zone_alloc->last_uid++;
265 zone->allocator = zone_alloc;
266
267 if (zone_alloc->mask < mask)
268 zone_alloc->mask = mask;
269
270 list_for_each_entry(it, &zone_alloc->prios, prio_list)
271 if (it->priority >= priority)
272 break;
273
274 if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
275 list_add_tail(&zone->prio_list, &it->prio_list);
276 list_add_tail(&zone->list, &it->list);
277
278 spin_unlock(&zone_alloc->lock);
279
280 *puid = zone->uid;
281
282 return 0;
283}
284
285/* Should be called under a lock */
286static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
287{
288 struct mlx4_zone_allocator *zone_alloc = entry->allocator;
289
290 if (!list_empty(&entry->prio_list)) {
291 /* Check if we need to add an alternative node to the prio list */
292 if (!list_is_last(&entry->list, &zone_alloc->entries)) {
293 struct mlx4_zone_entry *next = list_first_entry(&entry->list,
294 typeof(*next),
295 list);
296
297 if (next->priority == entry->priority)
298 list_add_tail(&next->prio_list, &entry->prio_list);
299 }
300
301 list_del(&entry->prio_list);
302 }
303
304 list_del(&entry->list);
305
306 if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
307 u32 mask = 0;
308 struct mlx4_zone_entry *it;
309
310 list_for_each_entry(it, &zone_alloc->prios, prio_list) {
311 u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
312
313 if (mask < cur_mask)
314 mask = cur_mask;
315 }
316 zone_alloc->mask = mask;
317 }
318
319 return 0;
320}
321
322void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
323{
324 struct mlx4_zone_entry *zone, *tmp;
325
326 spin_lock(&zone_alloc->lock);
327
328 list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
329 list_del(&zone->list);
330 list_del(&zone->prio_list);
331 kfree(zone);
332 }
333
334 spin_unlock(&zone_alloc->lock);
335 kfree(zone_alloc);
336}
337
338/* Should be called under a lock */
339static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
340 int align, u32 skip_mask, u32 *puid)
341{
342 u32 uid;
343 u32 res;
344 struct mlx4_zone_allocator *zone_alloc = zone->allocator;
345 struct mlx4_zone_entry *curr_node;
346
347 res = mlx4_bitmap_alloc_range(zone->bitmap, count,
348 align, skip_mask);
349
350 if (res != (u32)-1) {
351 res += zone->offset;
352 uid = zone->uid;
353 goto out;
354 }
355
356 list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
357 if (unlikely(curr_node->priority == zone->priority))
358 break;
359 }
360
361 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
362 struct mlx4_zone_entry *it = curr_node;
363
364 list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
365 res = mlx4_bitmap_alloc_range(it->bitmap, count,
366 align, skip_mask);
367 if (res != (u32)-1) {
368 res += it->offset;
369 uid = it->uid;
370 goto out;
371 }
372 }
373 }
374
375 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
376 struct mlx4_zone_entry *it = curr_node;
377
378 list_for_each_entry_from(it, &zone_alloc->entries, list) {
379 if (unlikely(it == zone))
380 continue;
381
382 if (unlikely(it->priority != curr_node->priority))
383 break;
384
385 res = mlx4_bitmap_alloc_range(it->bitmap, count,
386 align, skip_mask);
387 if (res != (u32)-1) {
388 res += it->offset;
389 uid = it->uid;
390 goto out;
391 }
392 }
393 }
394
395 if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
396 if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
397 goto out;
398
399 curr_node = list_first_entry(&curr_node->prio_list,
400 typeof(*curr_node),
401 prio_list);
402
403 list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
404 res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
405 align, skip_mask);
406 if (res != (u32)-1) {
407 res += curr_node->offset;
408 uid = curr_node->uid;
409 goto out;
410 }
411 }
412 }
413
414out:
415 if (NULL != puid && res != (u32)-1)
416 *puid = uid;
417 return res;
418}
419
420/* Should be called under a lock */
421static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
422 u32 count)
423{
424 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
425}
426
427/* Should be called under a lock */
428static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
429 struct mlx4_zone_allocator *zones, u32 uid)
430{
431 struct mlx4_zone_entry *zone;
432
433 list_for_each_entry(zone, &zones->entries, list) {
434 if (zone->uid == uid)
435 return zone;
436 }
437
438 return NULL;
439}
440
441struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
442{
443 struct mlx4_zone_entry *zone;
444 struct mlx4_bitmap *bitmap;
445
446 spin_lock(&zones->lock);
447
448 zone = __mlx4_find_zone_by_uid(zones, uid);
449
450 bitmap = zone == NULL ? NULL : zone->bitmap;
451
452 spin_unlock(&zones->lock);
453
454 return bitmap;
455}
456
457int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
458{
459 struct mlx4_zone_entry *zone;
460 int res;
461
462 spin_lock(&zones->lock);
463
464 zone = __mlx4_find_zone_by_uid(zones, uid);
465
466 if (NULL == zone) {
467 res = -1;
468 goto out;
469 }
470
471 res = __mlx4_zone_remove_one_entry(zone);
472
473out:
474 spin_unlock(&zones->lock);
475 kfree(zone);
476
477 return res;
478}
479
480/* Should be called under a lock */
481static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
482 struct mlx4_zone_allocator *zones, u32 obj)
483{
484 struct mlx4_zone_entry *zone, *zone_candidate = NULL;
485 u32 dist = (u32)-1;
486
487 /* Search for the smallest zone that this obj could be
488 * allocated from. This is done in order to handle
489 * situations when small bitmaps are allocated from bigger
490 * bitmaps (and the allocated space is marked as reserved in
491 * the bigger bitmap.
492 */
493 list_for_each_entry(zone, &zones->entries, list) {
494 if (obj >= zone->offset) {
495 u32 mobj = (obj - zone->offset) & zones->mask;
496
497 if (mobj < zone->bitmap->max) {
498 u32 curr_dist = zone->bitmap->effective_len;
499
500 if (curr_dist < dist) {
501 dist = curr_dist;
502 zone_candidate = zone;
503 }
504 }
505 }
506 }
507
508 return zone_candidate;
509}
510
511u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
512 int align, u32 skip_mask, u32 *puid)
513{
514 struct mlx4_zone_entry *zone;
515 int res = -1;
516
517 spin_lock(&zones->lock);
518
519 zone = __mlx4_find_zone_by_uid(zones, uid);
520
521 if (NULL == zone)
522 goto out;
523
524 res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
525
526out:
527 spin_unlock(&zones->lock);
528
529 return res;
530}
531
532u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
533{
534 struct mlx4_zone_entry *zone;
535 int res = 0;
536
537 spin_lock(&zones->lock);
538
539 zone = __mlx4_find_zone_by_uid(zones, uid);
540
541 if (NULL == zone) {
542 res = -1;
543 goto out;
544 }
545
546 __mlx4_free_from_zone(zone, obj, count);
547
548out:
549 spin_unlock(&zones->lock);
550
551 return res;
552}
553
554u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
555{
556 struct mlx4_zone_entry *zone;
557 int res;
558
559 if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
560 return -EFAULT;
561
562 spin_lock(&zones->lock);
563
564 zone = __mlx4_find_zone_by_uid_unique(zones, obj);
565
566 if (NULL == zone) {
567 res = -1;
568 goto out;
569 }
570
571 __mlx4_free_from_zone(zone, obj, count);
572 res = 0;
573
574out:
575 spin_unlock(&zones->lock);
576
577 return res;
578}
197/* 579/*
198 * Handling for queue buffers -- we allocate a bunch of memory and 580 * Handling for queue buffers -- we allocate a bunch of memory and
199 * register it in a memory region at HCA virtual address 0. If the 581 * register it in a memory region at HCA virtual address 0. If the
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6834da6c35ed..bc1505efa436 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -245,6 +245,7 @@ struct mlx4_bitmap {
245 u32 reserved_top; 245 u32 reserved_top;
246 u32 mask; 246 u32 mask;
247 u32 avail; 247 u32 avail;
248 u32 effective_len;
248 spinlock_t lock; 249 spinlock_t lock;
249 unsigned long *table; 250 unsigned long *table;
250}; 251};
@@ -1345,4 +1346,72 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1345int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1346int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1346int mlx4_config_mad_demux(struct mlx4_dev *dev); 1347int mlx4_config_mad_demux(struct mlx4_dev *dev);
1347 1348
1349enum mlx4_zone_flags {
1350 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
1351 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1,
1352 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2,
1353 MLX4_ZONE_USE_RR = 1UL << 3,
1354};
1355
1356enum mlx4_zone_alloc_flags {
1357 /* No two objects could overlap between zones. UID
1358 * could be left unused. If this flag is given and
1359 * two overlapped zones are used, an object will be free'd
1360 * from the smallest possible matching zone.
1361 */
1362 MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0,
1363};
1364
1365struct mlx4_zone_allocator;
1366
1367/* Create a new zone allocator */
1368struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags);
1369
1370/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator
1371 * <zone_alloc>. Allocating an object from this zone adds an offset <offset>.
1372 * Similarly, when searching for an object to free, this offset it taken into
1373 * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap>
1374 * is given through the MLX4_ZONE_USE_RR flag in <flags>.
1375 * When an allocation fails, <zone_alloc> tries to allocate from other zones
1376 * according to the policy set by <flags>. <puid> is the unique identifier
1377 * received to this zone.
1378 */
1379int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
1380 struct mlx4_bitmap *bitmap,
1381 u32 flags,
1382 int priority,
1383 int offset,
1384 u32 *puid);
1385
1386/* Remove bitmap indicated by <uid> from <zone_alloc> */
1387int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid);
1388
1389/* Delete the zone allocator <zone_alloc. This function doesn't destroy
1390 * the attached bitmaps.
1391 */
1392void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
1393
1394/* Allocate <count> objects with align <align> and skip_mask <skip_mask>
1395 * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually
1396 * allocated from is returned in <puid>. If the allocation fails, a negative
1397 * number is returned. Otherwise, the offset of the first object is returned.
1398 */
1399u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
1400 int align, u32 skip_mask, u32 *puid);
1401
1402/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
1403 * <zones>.
1404 */
1405u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
1406 u32 uid, u32 obj, u32 count);
1407
1408/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
1409 * specifying the uid when freeing an object, zone allocator could figure it by
1410 * itself. Other parameters are similar to mlx4_zone_free.
1411 */
1412u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count);
1413
1414/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */
1415struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid);
1416
1348#endif /* MLX4_H */ 1417#endif /* MLX4_H */