diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-09-21 20:03:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:40 -0400 |
commit | 1c3aff1ceec2cc86810e2690e67873ff0c505862 (patch) | |
tree | bf2f1badfd3f8859299f00c8a95c0a11e5cfa778 | |
parent | 408e82b78bcc9f1b47c76e833c3df97f675947de (diff) |
mm: remove unused GUP flags
GUP_FLAGS_IGNORE_VMA_PERMISSIONS and GUP_FLAGS_IGNORE_SIGKILL were
flags added solely to prevent __get_user_pages() from doing some of
what it usually does, in the munlock case: we can now remove them.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/internal.h | 6 | ||||
-rw-r--r-- | mm/memory.c | 14 | ||||
-rw-r--r-- | mm/nommu.c | 6 |
3 files changed, 8 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h index f290c4db528b..166765cd58d6 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -250,10 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
250 | } | 250 | } |
251 | #endif /* CONFIG_SPARSEMEM */ | 251 | #endif /* CONFIG_SPARSEMEM */ |
252 | 252 | ||
253 | #define GUP_FLAGS_WRITE 0x1 | 253 | #define GUP_FLAGS_WRITE 0x01 |
254 | #define GUP_FLAGS_FORCE 0x2 | 254 | #define GUP_FLAGS_FORCE 0x02 |
255 | #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 | ||
256 | #define GUP_FLAGS_IGNORE_SIGKILL 0x8 | ||
257 | 255 | ||
258 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 256 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
259 | unsigned long start, int len, int flags, | 257 | unsigned long start, int len, int flags, |
diff --git a/mm/memory.c b/mm/memory.c index 3cbeaaba5642..4b5200f5f35a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1217,8 +1217,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1217 | unsigned int vm_flags = 0; | 1217 | unsigned int vm_flags = 0; |
1218 | int write = !!(flags & GUP_FLAGS_WRITE); | 1218 | int write = !!(flags & GUP_FLAGS_WRITE); |
1219 | int force = !!(flags & GUP_FLAGS_FORCE); | 1219 | int force = !!(flags & GUP_FLAGS_FORCE); |
1220 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); | ||
1221 | int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); | ||
1222 | 1220 | ||
1223 | if (nr_pages <= 0) | 1221 | if (nr_pages <= 0) |
1224 | return 0; | 1222 | return 0; |
@@ -1244,7 +1242,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1244 | pte_t *pte; | 1242 | pte_t *pte; |
1245 | 1243 | ||
1246 | /* user gate pages are read-only */ | 1244 | /* user gate pages are read-only */ |
1247 | if (!ignore && write) | 1245 | if (write) |
1248 | return i ? : -EFAULT; | 1246 | return i ? : -EFAULT; |
1249 | if (pg > TASK_SIZE) | 1247 | if (pg > TASK_SIZE) |
1250 | pgd = pgd_offset_k(pg); | 1248 | pgd = pgd_offset_k(pg); |
@@ -1278,7 +1276,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1278 | 1276 | ||
1279 | if (!vma || | 1277 | if (!vma || |
1280 | (vma->vm_flags & (VM_IO | VM_PFNMAP)) || | 1278 | (vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
1281 | (!ignore && !(vm_flags & vma->vm_flags))) | 1279 | !(vm_flags & vma->vm_flags)) |
1282 | return i ? : -EFAULT; | 1280 | return i ? : -EFAULT; |
1283 | 1281 | ||
1284 | if (is_vm_hugetlb_page(vma)) { | 1282 | if (is_vm_hugetlb_page(vma)) { |
@@ -1298,13 +1296,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1298 | 1296 | ||
1299 | /* | 1297 | /* |
1300 | * If we have a pending SIGKILL, don't keep faulting | 1298 | * If we have a pending SIGKILL, don't keep faulting |
1301 | * pages and potentially allocating memory, unless | 1299 | * pages and potentially allocating memory. |
1302 | * current is handling munlock--e.g., on exit. In | ||
1303 | * that case, we are not allocating memory. Rather, | ||
1304 | * we're only unlocking already resident/mapped pages. | ||
1305 | */ | 1300 | */ |
1306 | if (unlikely(!ignore_sigkill && | 1301 | if (unlikely(fatal_signal_pending(current))) |
1307 | fatal_signal_pending(current))) | ||
1308 | return i ? i : -ERESTARTSYS; | 1302 | return i ? i : -ERESTARTSYS; |
1309 | 1303 | ||
1310 | if (write) | 1304 | if (write) |
diff --git a/mm/nommu.c b/mm/nommu.c index 3b90086e85a2..386443e9d2c6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -176,7 +176,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
176 | int i; | 176 | int i; |
177 | int write = !!(flags & GUP_FLAGS_WRITE); | 177 | int write = !!(flags & GUP_FLAGS_WRITE); |
178 | int force = !!(flags & GUP_FLAGS_FORCE); | 178 | int force = !!(flags & GUP_FLAGS_FORCE); |
179 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); | ||
180 | 179 | ||
181 | /* calculate required read or write permissions. | 180 | /* calculate required read or write permissions. |
182 | * - if 'force' is set, we only require the "MAY" flags. | 181 | * - if 'force' is set, we only require the "MAY" flags. |
@@ -190,8 +189,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
190 | goto finish_or_fault; | 189 | goto finish_or_fault; |
191 | 190 | ||
192 | /* protect what we can, including chardevs */ | 191 | /* protect what we can, including chardevs */ |
193 | if (vma->vm_flags & (VM_IO | VM_PFNMAP) || | 192 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
194 | (!ignore && !(vm_flags & vma->vm_flags))) | 193 | !(vm_flags & vma->vm_flags)) |
195 | goto finish_or_fault; | 194 | goto finish_or_fault; |
196 | 195 | ||
197 | if (pages) { | 196 | if (pages) { |
@@ -210,7 +209,6 @@ finish_or_fault: | |||
210 | return i ? : -EFAULT; | 209 | return i ? : -EFAULT; |
211 | } | 210 | } |
212 | 211 | ||
213 | |||
214 | /* | 212 | /* |
215 | * get a list of pages in an address range belonging to the specified process | 213 | * get a list of pages in an address range belonging to the specified process |
216 | * and indicate the VMA that covers each page | 214 | * and indicate the VMA that covers each page |