aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c89
1 files changed, 44 insertions, 45 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 21631ab8c08b..55373983c9c6 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -300,12 +300,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
300 * Now we know that no one else is looking at the page. 300 * Now we know that no one else is looking at the page.
301 */ 301 */
302 get_page(newpage); /* add cache reference */ 302 get_page(newpage); /* add cache reference */
303#ifdef CONFIG_SWAP
304 if (PageSwapCache(page)) { 303 if (PageSwapCache(page)) {
305 SetPageSwapCache(newpage); 304 SetPageSwapCache(newpage);
306 set_page_private(newpage, page_private(page)); 305 set_page_private(newpage, page_private(page));
307 } 306 }
308#endif
309 307
310 radix_tree_replace_slot(pslot, newpage); 308 radix_tree_replace_slot(pslot, newpage);
311 309
@@ -373,9 +371,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
373 371
374 mlock_migrate_page(newpage, page); 372 mlock_migrate_page(newpage, page);
375 373
376#ifdef CONFIG_SWAP
377 ClearPageSwapCache(page); 374 ClearPageSwapCache(page);
378#endif
379 ClearPagePrivate(page); 375 ClearPagePrivate(page);
380 set_page_private(page, 0); 376 set_page_private(page, 0);
381 /* page->mapping contains a flag for PageAnon() */ 377 /* page->mapping contains a flag for PageAnon() */
@@ -848,12 +844,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
848 struct vm_area_struct *vma; 844 struct vm_area_struct *vma;
849 struct page *page; 845 struct page *page;
850 846
851 /*
852 * A valid page pointer that will not match any of the
853 * pages that will be moved.
854 */
855 pp->page = ZERO_PAGE(0);
856
857 err = -EFAULT; 847 err = -EFAULT;
858 vma = find_vma(mm, pp->addr); 848 vma = find_vma(mm, pp->addr);
859 if (!vma || !vma_migratable(vma)) 849 if (!vma || !vma_migratable(vma))
@@ -919,41 +909,43 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
919 const int __user *nodes, 909 const int __user *nodes,
920 int __user *status, int flags) 910 int __user *status, int flags)
921{ 911{
922 struct page_to_node *pm = NULL; 912 struct page_to_node *pm;
923 nodemask_t task_nodes; 913 nodemask_t task_nodes;
924 int err = 0; 914 unsigned long chunk_nr_pages;
925 int i; 915 unsigned long chunk_start;
916 int err;
926 917
927 task_nodes = cpuset_mems_allowed(task); 918 task_nodes = cpuset_mems_allowed(task);
928 919
929 /* Limit nr_pages so that the multiplication may not overflow */ 920 err = -ENOMEM;
930 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { 921 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
931 err = -E2BIG; 922 if (!pm)
932 goto out;
933 }
934
935 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
936 if (!pm) {
937 err = -ENOMEM;
938 goto out; 923 goto out;
939 }
940
941 /* 924 /*
942 * Get parameters from user space and initialize the pm 925 * Store a chunk of page_to_node array in a page,
943 * array. Return various errors if the user did something wrong. 926 * but keep the last one as a marker
944 */ 927 */
945 for (i = 0; i < nr_pages; i++) { 928 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
946 const void __user *p;
947 929
948 err = -EFAULT; 930 for (chunk_start = 0;
949 if (get_user(p, pages + i)) 931 chunk_start < nr_pages;
950 goto out_pm; 932 chunk_start += chunk_nr_pages) {
933 int j;
951 934
952 pm[i].addr = (unsigned long)p; 935 if (chunk_start + chunk_nr_pages > nr_pages)
953 if (nodes) { 936 chunk_nr_pages = nr_pages - chunk_start;
937
938 /* fill the chunk pm with addrs and nodes from user-space */
939 for (j = 0; j < chunk_nr_pages; j++) {
940 const void __user *p;
954 int node; 941 int node;
955 942
956 if (get_user(node, nodes + i)) 943 err = -EFAULT;
944 if (get_user(p, pages + j + chunk_start))
945 goto out_pm;
946 pm[j].addr = (unsigned long) p;
947
948 if (get_user(node, nodes + j + chunk_start))
957 goto out_pm; 949 goto out_pm;
958 950
959 err = -ENODEV; 951 err = -ENODEV;
@@ -964,22 +956,29 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
964 if (!node_isset(node, task_nodes)) 956 if (!node_isset(node, task_nodes))
965 goto out_pm; 957 goto out_pm;
966 958
967 pm[i].node = node; 959 pm[j].node = node;
968 } else 960 }
969 pm[i].node = 0; /* anything to not match MAX_NUMNODES */ 961
970 } 962 /* End marker for this chunk */
971 /* End marker */ 963 pm[chunk_nr_pages].node = MAX_NUMNODES;
972 pm[nr_pages].node = MAX_NUMNODES; 964
965 /* Migrate this chunk */
966 err = do_move_page_to_node_array(mm, pm,
967 flags & MPOL_MF_MOVE_ALL);
968 if (err < 0)
969 goto out_pm;
973 970
974 err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
975 if (err >= 0)
976 /* Return status information */ 971 /* Return status information */
977 for (i = 0; i < nr_pages; i++) 972 for (j = 0; j < chunk_nr_pages; j++)
978 if (put_user(pm[i].status, status + i)) 973 if (put_user(pm[j].status, status + j + chunk_start)) {
979 err = -EFAULT; 974 err = -EFAULT;
975 goto out_pm;
976 }
977 }
978 err = 0;
980 979
981out_pm: 980out_pm:
982 vfree(pm); 981 free_page((unsigned long)pm);
983out: 982out:
984 return err; 983 return err;
985} 984}