aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Jackson <pj@sgi.com>2006-01-08 04:01:54 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:13:43 -0500
commitcf2a473c4089aa41c26f653200673f5a4cc25047 (patch)
tree0bce21f4684a382b13e93ba5b85409cf5eab1c2c /kernel
parentb4b2641843db124637fa3d2cb2101982035dcc82 (diff)
[PATCH] cpuset: combine refresh_mems and update_mems
The important code paths through alloc_pages_current() and alloc_page_vma(), by which most kernel page allocations go, both called cpuset_update_current_mems_allowed(), which in turn called refresh_mems(). -Both- of these latter two routines did a tasklock, got the tasks cpuset pointer, and checked for out of date cpuset->mems_generation. That was a silly duplication of code and waste of CPU cycles on an important code path. Consolidated those two routines into a single routine, called cpuset_update_task_memory_state(), since it updates more than just mems_allowed. Changed all callers of either routine to call the new consolidated routine. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c95
1 files changed, 41 insertions, 54 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d9349cc48b95..e9917d71628a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -584,13 +584,26 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
584 BUG_ON(!nodes_intersects(*pmask, node_online_map)); 584 BUG_ON(!nodes_intersects(*pmask, node_online_map));
585} 585}
586 586
587/* 587/**
588 * Refresh current tasks mems_allowed and mems_generation from current 588 * cpuset_update_task_memory_state - update task memory placement
589 * tasks cpuset.
590 * 589 *
591 * Call without callback_sem or task_lock() held. May be called with 590 * If the current tasks cpusets mems_allowed changed behind our
592 * or without manage_sem held. Will acquire task_lock() and might 591 * backs, update current->mems_allowed, mems_generation and task NUMA
593 * acquire callback_sem during call. 592 * mempolicy to the new value.
593 *
594 * Task mempolicy is updated by rebinding it relative to the
595 * current->cpuset if a task has its memory placement changed.
596 * Do not call this routine if in_interrupt().
597 *
598 * Call without callback_sem or task_lock() held. May be called
599 * with or without manage_sem held. Except in early boot or
600 * an exiting task, when tsk->cpuset is NULL, this routine will
601 * acquire task_lock(). We don't need to use task_lock to guard
602 * against another task changing a non-NULL cpuset pointer to NULL,
603 * as that is only done by a task on itself, and if the current task
604 * is here, it is not simultaneously in the exit code NULL'ing its
605 * cpuset pointer. This routine also might acquire callback_sem and
606 * current->mm->mmap_sem during call.
594 * 607 *
595 * The task_lock() is required to dereference current->cpuset safely. 608 * The task_lock() is required to dereference current->cpuset safely.
596 * Without it, we could pick up the pointer value of current->cpuset 609 * Without it, we could pick up the pointer value of current->cpuset
@@ -605,32 +618,36 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
605 * task has been modifying its cpuset. 618 * task has been modifying its cpuset.
606 */ 619 */
607 620
608static void refresh_mems(void) 621void cpuset_update_task_memory_state()
609{ 622{
610 int my_cpusets_mem_gen; 623 int my_cpusets_mem_gen;
624 struct task_struct *tsk = current;
625 struct cpuset *cs = tsk->cpuset;
611 626
612 task_lock(current); 627 if (unlikely(!cs))
613 my_cpusets_mem_gen = current->cpuset->mems_generation; 628 return;
614 task_unlock(current); 629
630 task_lock(tsk);
631 my_cpusets_mem_gen = cs->mems_generation;
632 task_unlock(tsk);
615 633
616 if (current->cpuset_mems_generation != my_cpusets_mem_gen) { 634 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
617 struct cpuset *cs; 635 nodemask_t oldmem = tsk->mems_allowed;
618 nodemask_t oldmem = current->mems_allowed;
619 int migrate; 636 int migrate;
620 637
621 down(&callback_sem); 638 down(&callback_sem);
622 task_lock(current); 639 task_lock(tsk);
623 cs = current->cpuset; 640 cs = tsk->cpuset; /* Maybe changed when task not locked */
624 migrate = is_memory_migrate(cs); 641 migrate = is_memory_migrate(cs);
625 guarantee_online_mems(cs, &current->mems_allowed); 642 guarantee_online_mems(cs, &tsk->mems_allowed);
626 current->cpuset_mems_generation = cs->mems_generation; 643 tsk->cpuset_mems_generation = cs->mems_generation;
627 task_unlock(current); 644 task_unlock(tsk);
628 up(&callback_sem); 645 up(&callback_sem);
629 if (!nodes_equal(oldmem, current->mems_allowed)) { 646 numa_policy_rebind(&oldmem, &tsk->mems_allowed);
630 numa_policy_rebind(&oldmem, &current->mems_allowed); 647 if (!nodes_equal(oldmem, tsk->mems_allowed)) {
631 if (migrate) { 648 if (migrate) {
632 do_migrate_pages(current->mm, &oldmem, 649 do_migrate_pages(tsk->mm, &oldmem,
633 &current->mems_allowed, 650 &tsk->mems_allowed,
634 MPOL_MF_MOVE_ALL); 651 MPOL_MF_MOVE_ALL);
635 } 652 }
636 } 653 }
@@ -1630,7 +1647,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1630 return -ENOMEM; 1647 return -ENOMEM;
1631 1648
1632 down(&manage_sem); 1649 down(&manage_sem);
1633 refresh_mems(); 1650 cpuset_update_task_memory_state();
1634 cs->flags = 0; 1651 cs->flags = 0;
1635 if (notify_on_release(parent)) 1652 if (notify_on_release(parent))
1636 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 1653 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1688,7 +1705,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1688 /* the vfs holds both inode->i_sem already */ 1705 /* the vfs holds both inode->i_sem already */
1689 1706
1690 down(&manage_sem); 1707 down(&manage_sem);
1691 refresh_mems(); 1708 cpuset_update_task_memory_state();
1692 if (atomic_read(&cs->count) > 0) { 1709 if (atomic_read(&cs->count) > 0) {
1693 up(&manage_sem); 1710 up(&manage_sem);
1694 return -EBUSY; 1711 return -EBUSY;
@@ -1873,36 +1890,6 @@ void cpuset_init_current_mems_allowed(void)
1873} 1890}
1874 1891
1875/** 1892/**
1876 * cpuset_update_current_mems_allowed - update mems parameters to new values
1877 *
1878 * If the current tasks cpusets mems_allowed changed behind our backs,
1879 * update current->mems_allowed and mems_generation to the new value.
1880 * Do not call this routine if in_interrupt().
1881 *
1882 * Call without callback_sem or task_lock() held. May be called
1883 * with or without manage_sem held. Unless exiting, it will acquire
1884 * task_lock(). Also might acquire callback_sem during call to
1885 * refresh_mems().
1886 */
1887
1888void cpuset_update_current_mems_allowed(void)
1889{
1890 struct cpuset *cs;
1891 int need_to_refresh = 0;
1892
1893 task_lock(current);
1894 cs = current->cpuset;
1895 if (!cs)
1896 goto done;
1897 if (current->cpuset_mems_generation != cs->mems_generation)
1898 need_to_refresh = 1;
1899done:
1900 task_unlock(current);
1901 if (need_to_refresh)
1902 refresh_mems();
1903}
1904
1905/**
1906 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed 1893 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
1907 * @zl: the zonelist to be checked 1894 * @zl: the zonelist to be checked
1908 * 1895 *