diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-08 19:29:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:27 -0400 |
commit | 26234f36ef3ec7efcfa9acb181427849c1f9db7c (patch) | |
tree | 00c84ab6794fe54c75586eb211d75d80bb038646 /mm | |
parent | 420256ef02660af0acf28c12fe4b7d514ca88a4d (diff) |
thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page
They are used to abstract the difference between NUMA enabled and NUMA
disabled to make the code more readable
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 166 |
1 files changed, 98 insertions, 68 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9c4390f60c3e..f0e999379dd7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1827,28 +1827,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
1827 | } | 1827 | } |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | static void collapse_huge_page(struct mm_struct *mm, | 1830 | static void khugepaged_alloc_sleep(void) |
1831 | unsigned long address, | ||
1832 | struct page **hpage, | ||
1833 | struct vm_area_struct *vma, | ||
1834 | int node) | ||
1835 | { | 1831 | { |
1836 | pgd_t *pgd; | 1832 | wait_event_freezable_timeout(khugepaged_wait, false, |
1837 | pud_t *pud; | 1833 | msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); |
1838 | pmd_t *pmd, _pmd; | 1834 | } |
1839 | pte_t *pte; | ||
1840 | pgtable_t pgtable; | ||
1841 | struct page *new_page; | ||
1842 | spinlock_t *ptl; | ||
1843 | int isolated; | ||
1844 | unsigned long hstart, hend; | ||
1845 | 1835 | ||
1846 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 1836 | #ifdef CONFIG_NUMA |
1847 | #ifndef CONFIG_NUMA | 1837 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) |
1848 | up_read(&mm->mmap_sem); | 1838 | { |
1849 | VM_BUG_ON(!*hpage); | 1839 | if (IS_ERR(*hpage)) { |
1850 | new_page = *hpage; | 1840 | if (!*wait) |
1851 | #else | 1841 | return false; |
1842 | |||
1843 | *wait = false; | ||
1844 | khugepaged_alloc_sleep(); | ||
1845 | } else if (*hpage) { | ||
1846 | put_page(*hpage); | ||
1847 | *hpage = NULL; | ||
1848 | } | ||
1849 | |||
1850 | return true; | ||
1851 | } | ||
1852 | |||
1853 | static struct page | ||
1854 | *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, | ||
1855 | struct vm_area_struct *vma, unsigned long address, | ||
1856 | int node) | ||
1857 | { | ||
1852 | VM_BUG_ON(*hpage); | 1858 | VM_BUG_ON(*hpage); |
1853 | /* | 1859 | /* |
1854 | * Allocate the page while the vma is still valid and under | 1860 | * Allocate the page while the vma is still valid and under |
@@ -1860,7 +1866,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1860 | * mmap_sem in read mode is good idea also to allow greater | 1866 | * mmap_sem in read mode is good idea also to allow greater |
1861 | * scalability. | 1867 | * scalability. |
1862 | */ | 1868 | */ |
1863 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, | 1869 | *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
1864 | node, __GFP_OTHER_NODE); | 1870 | node, __GFP_OTHER_NODE); |
1865 | 1871 | ||
1866 | /* | 1872 | /* |
@@ -1868,15 +1874,81 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1868 | * preparation for taking it in write mode. | 1874 | * preparation for taking it in write mode. |
1869 | */ | 1875 | */ |
1870 | up_read(&mm->mmap_sem); | 1876 | up_read(&mm->mmap_sem); |
1871 | if (unlikely(!new_page)) { | 1877 | if (unlikely(!*hpage)) { |
1872 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | 1878 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
1873 | *hpage = ERR_PTR(-ENOMEM); | 1879 | *hpage = ERR_PTR(-ENOMEM); |
1874 | return; | 1880 | return NULL; |
1875 | } | 1881 | } |
1876 | *hpage = new_page; | 1882 | |
1877 | count_vm_event(THP_COLLAPSE_ALLOC); | 1883 | count_vm_event(THP_COLLAPSE_ALLOC); |
1884 | return *hpage; | ||
1885 | } | ||
1886 | #else | ||
1887 | static struct page *khugepaged_alloc_hugepage(bool *wait) | ||
1888 | { | ||
1889 | struct page *hpage; | ||
1890 | |||
1891 | do { | ||
1892 | hpage = alloc_hugepage(khugepaged_defrag()); | ||
1893 | if (!hpage) { | ||
1894 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
1895 | if (!*wait) | ||
1896 | return NULL; | ||
1897 | |||
1898 | *wait = false; | ||
1899 | khugepaged_alloc_sleep(); | ||
1900 | } else | ||
1901 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
1902 | } while (unlikely(!hpage) && likely(khugepaged_enabled())); | ||
1903 | |||
1904 | return hpage; | ||
1905 | } | ||
1906 | |||
1907 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) | ||
1908 | { | ||
1909 | if (!*hpage) | ||
1910 | *hpage = khugepaged_alloc_hugepage(wait); | ||
1911 | |||
1912 | if (unlikely(!*hpage)) | ||
1913 | return false; | ||
1914 | |||
1915 | return true; | ||
1916 | } | ||
1917 | |||
1918 | static struct page | ||
1919 | *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, | ||
1920 | struct vm_area_struct *vma, unsigned long address, | ||
1921 | int node) | ||
1922 | { | ||
1923 | up_read(&mm->mmap_sem); | ||
1924 | VM_BUG_ON(!*hpage); | ||
1925 | return *hpage; | ||
1926 | } | ||
1878 | #endif | 1927 | #endif |
1879 | 1928 | ||
1929 | static void collapse_huge_page(struct mm_struct *mm, | ||
1930 | unsigned long address, | ||
1931 | struct page **hpage, | ||
1932 | struct vm_area_struct *vma, | ||
1933 | int node) | ||
1934 | { | ||
1935 | pgd_t *pgd; | ||
1936 | pud_t *pud; | ||
1937 | pmd_t *pmd, _pmd; | ||
1938 | pte_t *pte; | ||
1939 | pgtable_t pgtable; | ||
1940 | struct page *new_page; | ||
1941 | spinlock_t *ptl; | ||
1942 | int isolated; | ||
1943 | unsigned long hstart, hend; | ||
1944 | |||
1945 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
1946 | |||
1947 | /* release the mmap_sem read lock. */ | ||
1948 | new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); | ||
1949 | if (!new_page) | ||
1950 | return; | ||
1951 | |||
1880 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) | 1952 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) |
1881 | return; | 1953 | return; |
1882 | 1954 | ||
@@ -2215,34 +2287,6 @@ static int khugepaged_wait_event(void) | |||
2215 | kthread_should_stop(); | 2287 | kthread_should_stop(); |
2216 | } | 2288 | } |
2217 | 2289 | ||
2218 | static void khugepaged_alloc_sleep(void) | ||
2219 | { | ||
2220 | wait_event_freezable_timeout(khugepaged_wait, false, | ||
2221 | msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); | ||
2222 | } | ||
2223 | |||
2224 | #ifndef CONFIG_NUMA | ||
2225 | static struct page *khugepaged_alloc_hugepage(bool *wait) | ||
2226 | { | ||
2227 | struct page *hpage; | ||
2228 | |||
2229 | do { | ||
2230 | hpage = alloc_hugepage(khugepaged_defrag()); | ||
2231 | if (!hpage) { | ||
2232 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
2233 | if (!*wait) | ||
2234 | return NULL; | ||
2235 | |||
2236 | *wait = false; | ||
2237 | khugepaged_alloc_sleep(); | ||
2238 | } else | ||
2239 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
2240 | } while (unlikely(!hpage) && likely(khugepaged_enabled())); | ||
2241 | |||
2242 | return hpage; | ||
2243 | } | ||
2244 | #endif | ||
2245 | |||
2246 | static void khugepaged_do_scan(void) | 2290 | static void khugepaged_do_scan(void) |
2247 | { | 2291 | { |
2248 | struct page *hpage = NULL; | 2292 | struct page *hpage = NULL; |
@@ -2253,23 +2297,9 @@ static void khugepaged_do_scan(void) | |||
2253 | barrier(); /* write khugepaged_pages_to_scan to local stack */ | 2297 | barrier(); /* write khugepaged_pages_to_scan to local stack */ |
2254 | 2298 | ||
2255 | while (progress < pages) { | 2299 | while (progress < pages) { |
2256 | #ifndef CONFIG_NUMA | 2300 | if (!khugepaged_prealloc_page(&hpage, &wait)) |
2257 | if (!hpage) | ||
2258 | hpage = khugepaged_alloc_hugepage(&wait); | ||
2259 | |||
2260 | if (unlikely(!hpage)) | ||
2261 | break; | 2301 | break; |
2262 | #else | 2302 | |
2263 | if (IS_ERR(hpage)) { | ||
2264 | if (!wait) | ||
2265 | break; | ||
2266 | wait = false; | ||
2267 | khugepaged_alloc_sleep(); | ||
2268 | } else if (hpage) { | ||
2269 | put_page(hpage); | ||
2270 | hpage = NULL; | ||
2271 | } | ||
2272 | #endif | ||
2273 | cond_resched(); | 2303 | cond_resched(); |
2274 | 2304 | ||
2275 | if (unlikely(kthread_should_stop() || freezing(current))) | 2305 | if (unlikely(kthread_should_stop() || freezing(current))) |