aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c224
1 files changed, 108 insertions, 116 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..9d260e838cff 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -297,7 +297,7 @@ static int futex_handle_fault(unsigned long address, int attempt)
297 struct vm_area_struct * vma; 297 struct vm_area_struct * vma;
298 struct mm_struct *mm = current->mm; 298 struct mm_struct *mm = current->mm;
299 299
300 if (attempt >= 2 || !(vma = find_vma(mm, address)) || 300 if (attempt > 2 || !(vma = find_vma(mm, address)) ||
301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) 301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
302 return -EFAULT; 302 return -EFAULT;
303 303
@@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
397 p = NULL; 397 p = NULL;
398 goto out_unlock; 398 goto out_unlock;
399 } 399 }
400 if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { 400 if (p->exit_state != 0) {
401 p = NULL; 401 p = NULL;
402 goto out_unlock; 402 goto out_unlock;
403 } 403 }
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -729,8 +747,10 @@ retry:
729 */ 747 */
730 if (attempt++) { 748 if (attempt++) {
731 if (futex_handle_fault((unsigned long)uaddr2, 749 if (futex_handle_fault((unsigned long)uaddr2,
732 attempt)) 750 attempt)) {
751 ret = -EFAULT;
733 goto out; 752 goto out;
753 }
734 goto retry; 754 goto retry;
735 } 755 }
736 756
@@ -930,6 +950,7 @@ static int unqueue_me(struct futex_q *q)
930 /* In the common case we don't take the spinlock, which is nice. */ 950 /* In the common case we don't take the spinlock, which is nice. */
931 retry: 951 retry:
932 lock_ptr = q->lock_ptr; 952 lock_ptr = q->lock_ptr;
953 barrier();
933 if (lock_ptr != 0) { 954 if (lock_ptr != 0) {
934 spin_lock(lock_ptr); 955 spin_lock(lock_ptr);
935 /* 956 /*
@@ -1099,9 +1120,10 @@ static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
1099 * if there are waiters then it will block, it does PI, etc. (Due to 1120 * if there are waiters then it will block, it does PI, etc. (Due to
1100 * races the kernel might see a 0 value of the futex too.) 1121 * races the kernel might see a 0 value of the futex too.)
1101 */ 1122 */
1102static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, 1123static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1103 struct hrtimer_sleeper *to) 1124 long nsec, int trylock)
1104{ 1125{
1126 struct hrtimer_sleeper timeout, *to = NULL;
1105 struct task_struct *curr = current; 1127 struct task_struct *curr = current;
1106 struct futex_hash_bucket *hb; 1128 struct futex_hash_bucket *hb;
1107 u32 uval, newval, curval; 1129 u32 uval, newval, curval;
@@ -1111,6 +1133,13 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1111 if (refill_pi_state_cache()) 1133 if (refill_pi_state_cache())
1112 return -ENOMEM; 1134 return -ENOMEM;
1113 1135
1136 if (sec != MAX_SCHEDULE_TIMEOUT) {
1137 to = &timeout;
1138 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1139 hrtimer_init_sleeper(to, current);
1140 to->timer.expires = ktime_set(sec, nsec);
1141 }
1142
1114 q.pi_state = NULL; 1143 q.pi_state = NULL;
1115 retry: 1144 retry:
1116 down_read(&curr->mm->mmap_sem); 1145 down_read(&curr->mm->mmap_sem);
@@ -1236,6 +1265,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1265 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1266 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1267 spin_lock_irq(&q.pi_state->owner->pi_lock);
1268 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1269 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1270 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1271 } else
@@ -1244,6 +1274,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1274 q.pi_state->owner = current;
1245 1275
1246 spin_lock_irq(&current->pi_lock); 1276 spin_lock_irq(&current->pi_lock);
1277 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1278 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1279 spin_unlock_irq(&current->pi_lock);
1249 1280
@@ -1284,7 +1315,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1284 if (!detect && ret == -EDEADLK && 0) 1315 if (!detect && ret == -EDEADLK && 0)
1285 force_sig(SIGKILL, current); 1316 force_sig(SIGKILL, current);
1286 1317
1287 return ret; 1318 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1288 1319
1289 out_unlock_release_sem: 1320 out_unlock_release_sem:
1290 queue_unlock(&q, hb); 1321 queue_unlock(&q, hb);
@@ -1301,9 +1332,10 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1301 * still holding the mmap_sem. 1332 * still holding the mmap_sem.
1302 */ 1333 */
1303 if (attempt++) { 1334 if (attempt++) {
1304 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1335 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1336 ret = -EFAULT;
1305 goto out_unlock_release_sem; 1337 goto out_unlock_release_sem;
1306 1338 }
1307 goto retry_locked; 1339 goto retry_locked;
1308 } 1340 }
1309 1341
@@ -1318,76 +1350,6 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1318} 1350}
1319 1351
1320/* 1352/*
1321 * Restart handler
1322 */
1323static long futex_lock_pi_restart(struct restart_block *restart)
1324{
1325 struct hrtimer_sleeper timeout, *to = NULL;
1326 int ret;
1327
1328 restart->fn = do_no_restart_syscall;
1329
1330 if (restart->arg2 || restart->arg3) {
1331 to = &timeout;
1332 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1333 hrtimer_init_sleeper(to, current);
1334 to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
1335 (u64) restart->arg0;
1336 }
1337
1338 pr_debug("lock_pi restart: %p, %d (%d)\n",
1339 (u32 __user *)restart->arg0, current->pid);
1340
1341 ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
1342 0, to);
1343
1344 if (ret != -EINTR)
1345 return ret;
1346
1347 restart->fn = futex_lock_pi_restart;
1348
1349 /* The other values are filled in */
1350 return -ERESTART_RESTARTBLOCK;
1351}
1352
1353/*
1354 * Called from the syscall entry below.
1355 */
1356static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1357 long nsec, int trylock)
1358{
1359 struct hrtimer_sleeper timeout, *to = NULL;
1360 struct restart_block *restart;
1361 int ret;
1362
1363 if (sec != MAX_SCHEDULE_TIMEOUT) {
1364 to = &timeout;
1365 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1366 hrtimer_init_sleeper(to, current);
1367 to->timer.expires = ktime_set(sec, nsec);
1368 }
1369
1370 ret = do_futex_lock_pi(uaddr, detect, trylock, to);
1371
1372 if (ret != -EINTR)
1373 return ret;
1374
1375 pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
1376
1377 restart = &current_thread_info()->restart_block;
1378 restart->fn = futex_lock_pi_restart;
1379 restart->arg0 = (unsigned long) uaddr;
1380 restart->arg1 = detect;
1381 if (to) {
1382 restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
1383 restart->arg3 = to->timer.expires.tv64 >> 32;
1384 } else
1385 restart->arg2 = restart->arg3 = 0;
1386
1387 return -ERESTART_RESTARTBLOCK;
1388}
1389
1390/*
1391 * Userspace attempted a TID -> 0 atomic transition, and failed. 1353 * Userspace attempted a TID -> 0 atomic transition, and failed.
1392 * This is the in-kernel slowpath: we look up the PI state (if any), 1354 * This is the in-kernel slowpath: we look up the PI state (if any),
1393 * and do the rt-mutex unlock. 1355 * and do the rt-mutex unlock.
@@ -1427,9 +1389,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1389 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1390 * anyone else up:
1429 */ 1391 */
1430 inc_preempt_count(); 1392 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1393 inc_preempt_count();
1432 dec_preempt_count(); 1394 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1395 dec_preempt_count();
1396 }
1433 1397
1434 if (unlikely(uval == -EFAULT)) 1398 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1399 goto pi_faulted;
@@ -1462,9 +1426,11 @@ retry_locked:
1462 /* 1426 /*
1463 * No waiters - kernel unlocks the futex: 1427 * No waiters - kernel unlocks the futex:
1464 */ 1428 */
1465 ret = unlock_futex_pi(uaddr, uval); 1429 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1430 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1431 if (ret == -EFAULT)
1432 goto pi_faulted;
1433 }
1468 1434
1469out_unlock: 1435out_unlock:
1470 spin_unlock(&hb->lock); 1436 spin_unlock(&hb->lock);
@@ -1481,9 +1447,10 @@ pi_faulted:
1481 * still holding the mmap_sem. 1447 * still holding the mmap_sem.
1482 */ 1448 */
1483 if (attempt++) { 1449 if (attempt++) {
1484 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1450 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1451 ret = -EFAULT;
1485 goto out_unlock; 1452 goto out_unlock;
1486 1453 }
1487 goto retry_locked; 1454 goto retry_locked;
1488 } 1455 }
1489 1456
@@ -1683,9 +1650,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1650 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1651 * dying task, and do notification if so:
1685 */ 1652 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1653int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1654{
1688 u32 uval, nval; 1655 u32 uval, nval, mval;
1689 1656
1690retry: 1657retry:
1691 if (get_user(uval, uaddr)) 1658 if (get_user(uval, uaddr))
@@ -1702,21 +1669,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1669 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1670 * userspace.
1704 */ 1671 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1672 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1673 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1674
1707 if (nval == -EFAULT) 1675 if (nval == -EFAULT)
1708 return -1; 1676 return -1;
1709 1677
1710 if (nval != uval) 1678 if (nval != uval)
1711 goto retry; 1679 goto retry;
1712 1680
1713 if (uval & FUTEX_WAITERS) 1681 /*
1714 futex_wake(uaddr, 1); 1682 * Wake robust non-PI futexes here. The wakeup of
1683 * PI futexes happens in exit_pi_state():
1684 */
1685 if (!pi) {
1686 if (uval & FUTEX_WAITERS)
1687 futex_wake(uaddr, 1);
1688 }
1715 } 1689 }
1716 return 0; 1690 return 0;
1717} 1691}
1718 1692
1719/* 1693/*
1694 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1695 */
1696static inline int fetch_robust_entry(struct robust_list __user **entry,
1697 struct robust_list __user **head, int *pi)
1698{
1699 unsigned long uentry;
1700
1701 if (get_user(uentry, (unsigned long *)head))
1702 return -EFAULT;
1703
1704 *entry = (void *)(uentry & ~1UL);
1705 *pi = uentry & 1;
1706
1707 return 0;
1708}
1709
1710/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1711 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1712 * and mark any locks found there dead, and notify any waiters.
1722 * 1713 *
@@ -1726,14 +1717,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1717{
1727 struct robust_list_head __user *head = curr->robust_list; 1718 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1719 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1720 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1721 unsigned long futex_offset;
1731 1722
1732 /* 1723 /*
1733 * Fetch the list head (which was registered earlier, via 1724 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1725 * sys_set_robust_list()):
1735 */ 1726 */
1736 if (get_user(entry, &head->list.next)) 1727 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1728 return;
1738 /* 1729 /*
1739 * Fetch the relative futex offset: 1730 * Fetch the relative futex offset:
@@ -1744,10 +1735,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1735 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1736 * if it exists:
1746 */ 1737 */
1747 if (get_user(pending, &head->list_op_pending)) 1738 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1739 return;
1740
1749 if (pending) 1741 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1742 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1743
1752 while (entry != &head->list) { 1744 while (entry != &head->list) {
1753 /* 1745 /*
@@ -1756,12 +1748,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1748 */
1757 if (entry != pending) 1749 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1750 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1751 curr, pi))
1760 return; 1752 return;
1761 /* 1753 /*
1762 * Fetch the next entry in the list: 1754 * Fetch the next entry in the list:
1763 */ 1755 */
1764 if (get_user(entry, &entry->next)) 1756 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1757 return;
1766 /* 1758 /*
1767 * Avoid excessively long or circular lists: 1759 * Avoid excessively long or circular lists: