diff options
author | Brice Goglin <Brice.Goglin@inria.fr> | 2009-01-06 17:38:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 18:58:58 -0500 |
commit | 3140a2273009c01c27d316f35ab76a37e105fdd8 (patch) | |
tree | 25d4f805dbf72a9491bc146bb77418ef507d21bf /mm | |
parent | 390722baa7fc447b0a4f0c3c3f537ed056dbc944 (diff) |
mm: rework do_pages_move() to work on page_sized chunks
Rework do_pages_move() to work by page-sized chunks of struct page_to_node
that are passed to do_move_page_to_node_array(). We now only have to
allocate a single page instead a possibly very large vmalloc area to store
all page_to_node entries.
As a result, new_page_node() will now have a very small lookup, hidding
much of the overall sys_move_pages() overhead.
Signed-off-by: Brice Goglin <Brice.Goglin@inria.fr>
Signed-off-by: Nathalie Furmento <Nathalie.Furmento@labri.fr>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/migrate.c | 79 |
1 files changed, 44 insertions, 35 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 21631ab8c08b..0a75716cb736 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -919,41 +919,43 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, | |||
919 | const int __user *nodes, | 919 | const int __user *nodes, |
920 | int __user *status, int flags) | 920 | int __user *status, int flags) |
921 | { | 921 | { |
922 | struct page_to_node *pm = NULL; | 922 | struct page_to_node *pm; |
923 | nodemask_t task_nodes; | 923 | nodemask_t task_nodes; |
924 | int err = 0; | 924 | unsigned long chunk_nr_pages; |
925 | int i; | 925 | unsigned long chunk_start; |
926 | int err; | ||
926 | 927 | ||
927 | task_nodes = cpuset_mems_allowed(task); | 928 | task_nodes = cpuset_mems_allowed(task); |
928 | 929 | ||
929 | /* Limit nr_pages so that the multiplication may not overflow */ | 930 | err = -ENOMEM; |
930 | if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { | 931 | pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); |
931 | err = -E2BIG; | 932 | if (!pm) |
932 | goto out; | ||
933 | } | ||
934 | |||
935 | pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); | ||
936 | if (!pm) { | ||
937 | err = -ENOMEM; | ||
938 | goto out; | 933 | goto out; |
939 | } | ||
940 | |||
941 | /* | 934 | /* |
942 | * Get parameters from user space and initialize the pm | 935 | * Store a chunk of page_to_node array in a page, |
943 | * array. Return various errors if the user did something wrong. | 936 | * but keep the last one as a marker |
944 | */ | 937 | */ |
945 | for (i = 0; i < nr_pages; i++) { | 938 | chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; |
946 | const void __user *p; | ||
947 | 939 | ||
948 | err = -EFAULT; | 940 | for (chunk_start = 0; |
949 | if (get_user(p, pages + i)) | 941 | chunk_start < nr_pages; |
950 | goto out_pm; | 942 | chunk_start += chunk_nr_pages) { |
943 | int j; | ||
951 | 944 | ||
952 | pm[i].addr = (unsigned long)p; | 945 | if (chunk_start + chunk_nr_pages > nr_pages) |
953 | if (nodes) { | 946 | chunk_nr_pages = nr_pages - chunk_start; |
947 | |||
948 | /* fill the chunk pm with addrs and nodes from user-space */ | ||
949 | for (j = 0; j < chunk_nr_pages; j++) { | ||
950 | const void __user *p; | ||
954 | int node; | 951 | int node; |
955 | 952 | ||
956 | if (get_user(node, nodes + i)) | 953 | err = -EFAULT; |
954 | if (get_user(p, pages + j + chunk_start)) | ||
955 | goto out_pm; | ||
956 | pm[j].addr = (unsigned long) p; | ||
957 | |||
958 | if (get_user(node, nodes + j + chunk_start)) | ||
957 | goto out_pm; | 959 | goto out_pm; |
958 | 960 | ||
959 | err = -ENODEV; | 961 | err = -ENODEV; |
@@ -964,22 +966,29 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, | |||
964 | if (!node_isset(node, task_nodes)) | 966 | if (!node_isset(node, task_nodes)) |
965 | goto out_pm; | 967 | goto out_pm; |
966 | 968 | ||
967 | pm[i].node = node; | 969 | pm[j].node = node; |
968 | } else | 970 | } |
969 | pm[i].node = 0; /* anything to not match MAX_NUMNODES */ | 971 | |
970 | } | 972 | /* End marker for this chunk */ |
971 | /* End marker */ | 973 | pm[chunk_nr_pages].node = MAX_NUMNODES; |
972 | pm[nr_pages].node = MAX_NUMNODES; | 974 | |
975 | /* Migrate this chunk */ | ||
976 | err = do_move_page_to_node_array(mm, pm, | ||
977 | flags & MPOL_MF_MOVE_ALL); | ||
978 | if (err < 0) | ||
979 | goto out_pm; | ||
973 | 980 | ||
974 | err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL); | ||
975 | if (err >= 0) | ||
976 | /* Return status information */ | 981 | /* Return status information */ |
977 | for (i = 0; i < nr_pages; i++) | 982 | for (j = 0; j < chunk_nr_pages; j++) |
978 | if (put_user(pm[i].status, status + i)) | 983 | if (put_user(pm[j].status, status + j + chunk_start)) { |
979 | err = -EFAULT; | 984 | err = -EFAULT; |
985 | goto out_pm; | ||
986 | } | ||
987 | } | ||
988 | err = 0; | ||
980 | 989 | ||
981 | out_pm: | 990 | out_pm: |
982 | vfree(pm); | 991 | free_page((unsigned long)pm); |
983 | out: | 992 | out: |
984 | return err; | 993 | return err; |
985 | } | 994 | } |