diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2011-01-13 18:47:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:49 -0500 |
commit | c06b1fca18c3ad868bfcaca230146e3038583422 (patch) | |
tree | 3f9efb869931a6c6055579524bff8ad6505d074b /mm/page_alloc.c | |
parent | fd4a4663db293bfd5dc20fb4113977f62895e550 (diff) |
mm/page_alloc.c: don't cache `current' in a local
It's old-fashioned and unneeded.
akpm:/usr/src/25> size mm/page_alloc.o
text data bss dec hex filename
39884 1241317 18808 1300009 13d629 mm/page_alloc.o (before)
39838 1241317 18808 1299963 13d5fb mm/page_alloc.o (after)
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 24 |
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bda1db301d44..90c1439549fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1809,15 +1809,14 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1809 | bool sync_migration) | 1809 | bool sync_migration) |
1810 | { | 1810 | { |
1811 | struct page *page; | 1811 | struct page *page; |
1812 | struct task_struct *tsk = current; | ||
1813 | 1812 | ||
1814 | if (!order || compaction_deferred(preferred_zone)) | 1813 | if (!order || compaction_deferred(preferred_zone)) |
1815 | return NULL; | 1814 | return NULL; |
1816 | 1815 | ||
1817 | tsk->flags |= PF_MEMALLOC; | 1816 | current->flags |= PF_MEMALLOC; |
1818 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 1817 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
1819 | nodemask, sync_migration); | 1818 | nodemask, sync_migration); |
1820 | tsk->flags &= ~PF_MEMALLOC; | 1819 | current->flags &= ~PF_MEMALLOC; |
1821 | if (*did_some_progress != COMPACT_SKIPPED) { | 1820 | if (*did_some_progress != COMPACT_SKIPPED) { |
1822 | 1821 | ||
1823 | /* Page migration frees to the PCP lists but we want merging */ | 1822 | /* Page migration frees to the PCP lists but we want merging */ |
@@ -1869,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1869 | { | 1868 | { |
1870 | struct page *page = NULL; | 1869 | struct page *page = NULL; |
1871 | struct reclaim_state reclaim_state; | 1870 | struct reclaim_state reclaim_state; |
1872 | struct task_struct *p = current; | ||
1873 | bool drained = false; | 1871 | bool drained = false; |
1874 | 1872 | ||
1875 | cond_resched(); | 1873 | cond_resched(); |
1876 | 1874 | ||
1877 | /* We now go into synchronous reclaim */ | 1875 | /* We now go into synchronous reclaim */ |
1878 | cpuset_memory_pressure_bump(); | 1876 | cpuset_memory_pressure_bump(); |
1879 | p->flags |= PF_MEMALLOC; | 1877 | current->flags |= PF_MEMALLOC; |
1880 | lockdep_set_current_reclaim_state(gfp_mask); | 1878 | lockdep_set_current_reclaim_state(gfp_mask); |
1881 | reclaim_state.reclaimed_slab = 0; | 1879 | reclaim_state.reclaimed_slab = 0; |
1882 | p->reclaim_state = &reclaim_state; | 1880 | current->reclaim_state = &reclaim_state; |
1883 | 1881 | ||
1884 | *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); | 1882 | *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); |
1885 | 1883 | ||
1886 | p->reclaim_state = NULL; | 1884 | current->reclaim_state = NULL; |
1887 | lockdep_clear_current_reclaim_state(); | 1885 | lockdep_clear_current_reclaim_state(); |
1888 | p->flags &= ~PF_MEMALLOC; | 1886 | current->flags &= ~PF_MEMALLOC; |
1889 | 1887 | ||
1890 | cond_resched(); | 1888 | cond_resched(); |
1891 | 1889 | ||
@@ -1950,7 +1948,6 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, | |||
1950 | static inline int | 1948 | static inline int |
1951 | gfp_to_alloc_flags(gfp_t gfp_mask) | 1949 | gfp_to_alloc_flags(gfp_t gfp_mask) |
1952 | { | 1950 | { |
1953 | struct task_struct *p = current; | ||
1954 | int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; | 1951 | int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; |
1955 | const gfp_t wait = gfp_mask & __GFP_WAIT; | 1952 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
1956 | 1953 | ||
@@ -1977,12 +1974,12 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
1977 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | 1974 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. |
1978 | */ | 1975 | */ |
1979 | alloc_flags &= ~ALLOC_CPUSET; | 1976 | alloc_flags &= ~ALLOC_CPUSET; |
1980 | } else if (unlikely(rt_task(p)) && !in_interrupt()) | 1977 | } else if (unlikely(rt_task(current)) && !in_interrupt()) |
1981 | alloc_flags |= ALLOC_HARDER; | 1978 | alloc_flags |= ALLOC_HARDER; |
1982 | 1979 | ||
1983 | if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { | 1980 | if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { |
1984 | if (!in_interrupt() && | 1981 | if (!in_interrupt() && |
1985 | ((p->flags & PF_MEMALLOC) || | 1982 | ((current->flags & PF_MEMALLOC) || |
1986 | unlikely(test_thread_flag(TIF_MEMDIE)))) | 1983 | unlikely(test_thread_flag(TIF_MEMDIE)))) |
1987 | alloc_flags |= ALLOC_NO_WATERMARKS; | 1984 | alloc_flags |= ALLOC_NO_WATERMARKS; |
1988 | } | 1985 | } |
@@ -2001,7 +1998,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2001 | int alloc_flags; | 1998 | int alloc_flags; |
2002 | unsigned long pages_reclaimed = 0; | 1999 | unsigned long pages_reclaimed = 0; |
2003 | unsigned long did_some_progress; | 2000 | unsigned long did_some_progress; |
2004 | struct task_struct *p = current; | ||
2005 | bool sync_migration = false; | 2001 | bool sync_migration = false; |
2006 | 2002 | ||
2007 | /* | 2003 | /* |
@@ -2060,7 +2056,7 @@ rebalance: | |||
2060 | goto nopage; | 2056 | goto nopage; |
2061 | 2057 | ||
2062 | /* Avoid recursion of direct reclaim */ | 2058 | /* Avoid recursion of direct reclaim */ |
2063 | if (p->flags & PF_MEMALLOC) | 2059 | if (current->flags & PF_MEMALLOC) |
2064 | goto nopage; | 2060 | goto nopage; |
2065 | 2061 | ||
2066 | /* Avoid allocations with no watermarks from looping endlessly */ | 2062 | /* Avoid allocations with no watermarks from looping endlessly */ |
@@ -2153,7 +2149,7 @@ nopage: | |||
2153 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { | 2149 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { |
2154 | printk(KERN_WARNING "%s: page allocation failure." | 2150 | printk(KERN_WARNING "%s: page allocation failure." |
2155 | " order:%d, mode:0x%x\n", | 2151 | " order:%d, mode:0x%x\n", |
2156 | p->comm, order, gfp_mask); | 2152 | current->comm, order, gfp_mask); |
2157 | dump_stack(); | 2153 | dump_stack(); |
2158 | show_mem(); | 2154 | show_mem(); |
2159 | } | 2155 | } |