aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-04-07 18:37:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:35:54 -0400
commitf0432d159601f96839f514f286eaa5b75c4112dc (patch)
tree654f94de69e9b7465480172b09e6838f08e81989 /mm/mempolicy.c
parent2a389610a7331d22344698f23ef2e8c55b2cde7b (diff)
mm, mempolicy: remove per-process flag
PF_MEMPOLICY is an unnecessary optimization for CONFIG_SLAB users. There's no significant performance degradation to checking current->mempolicy rather than current->flags & PF_MEMPOLICY in the allocation path, especially since this is considered unlikely(). Running TCP_RR with netperf-2.4.5 through localhost on 16 cpu machine with 64GB of memory and without a mempolicy: threads before after 16 1249409 1244487 32 1281786 1246783 48 1239175 1239138 64 1244642 1241841 80 1244346 1248918 96 1266436 1254316 112 1307398 1312135 128 1327607 1326502 Per-process flags are a scarce resource so we should free them up whenever possible and make them available. We'll be using it shortly for memcg oom reserves. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Tim Hockin <thockin@google.com> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c31
1 files changed, 0 insertions, 31 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0ad0ba31979f..78e1472933ea 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -795,36 +795,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
795 return err; 795 return err;
796} 796}
797 797
798/*
799 * Update task->flags PF_MEMPOLICY bit: set iff non-default
800 * mempolicy. Allows more rapid checking of this (combined perhaps
801 * with other PF_* flag bits) on memory allocation hot code paths.
802 *
803 * If called from outside this file, the task 'p' should -only- be
804 * a newly forked child not yet visible on the task list, because
805 * manipulating the task flags of a visible task is not safe.
806 *
807 * The above limitation is why this routine has the funny name
808 * mpol_fix_fork_child_flag().
809 *
810 * It is also safe to call this with a task pointer of current,
811 * which the static wrapper mpol_set_task_struct_flag() does,
812 * for use within this file.
813 */
814
815void mpol_fix_fork_child_flag(struct task_struct *p)
816{
817 if (p->mempolicy)
818 p->flags |= PF_MEMPOLICY;
819 else
820 p->flags &= ~PF_MEMPOLICY;
821}
822
823static void mpol_set_task_struct_flag(void)
824{
825 mpol_fix_fork_child_flag(current);
826}
827
828/* Set the process memory policy */ 798/* Set the process memory policy */
829static long do_set_mempolicy(unsigned short mode, unsigned short flags, 799static long do_set_mempolicy(unsigned short mode, unsigned short flags,
830 nodemask_t *nodes) 800 nodemask_t *nodes)
@@ -861,7 +831,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
861 } 831 }
862 old = current->mempolicy; 832 old = current->mempolicy;
863 current->mempolicy = new; 833 current->mempolicy = new;
864 mpol_set_task_struct_flag();
865 if (new && new->mode == MPOL_INTERLEAVE && 834 if (new && new->mode == MPOL_INTERLEAVE &&
866 nodes_weight(new->v.nodes)) 835 nodes_weight(new->v.nodes))
867 current->il_next = first_node(new->v.nodes); 836 current->il_next = first_node(new->v.nodes);