diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 19 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 32 |
3 files changed, 26 insertions, 27 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ceb3ebb3c399..67f29516662a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -107,7 +107,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | |||
107 | set_page_count(page, 1); | 107 | set_page_count(page, 1); |
108 | page[1].mapping = (void *)free_huge_page; | 108 | page[1].mapping = (void *)free_huge_page; |
109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) | 109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) |
110 | clear_highpage(&page[i]); | 110 | clear_user_highpage(&page[i], addr); |
111 | return page; | 111 | return page; |
112 | } | 112 | } |
113 | 113 | ||
@@ -391,12 +391,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
391 | 391 | ||
392 | if (!new_page) { | 392 | if (!new_page) { |
393 | page_cache_release(old_page); | 393 | page_cache_release(old_page); |
394 | 394 | return VM_FAULT_OOM; | |
395 | /* Logically this is OOM, not a SIGBUS, but an OOM | ||
396 | * could cause the kernel to go killing other | ||
397 | * processes which won't help the hugepage situation | ||
398 | * at all (?) */ | ||
399 | return VM_FAULT_SIGBUS; | ||
400 | } | 395 | } |
401 | 396 | ||
402 | spin_unlock(&mm->page_table_lock); | 397 | spin_unlock(&mm->page_table_lock); |
@@ -444,15 +439,7 @@ retry: | |||
444 | page = alloc_huge_page(vma, address); | 439 | page = alloc_huge_page(vma, address); |
445 | if (!page) { | 440 | if (!page) { |
446 | hugetlb_put_quota(mapping); | 441 | hugetlb_put_quota(mapping); |
447 | /* | 442 | ret = VM_FAULT_OOM; |
448 | * No huge pages available. So this is an OOM | ||
449 | * condition but we do not want to trigger the OOM | ||
450 | * killer, so we return VM_FAULT_SIGBUS. | ||
451 | * | ||
452 | * A program using hugepages may fault with Bus Error | ||
453 | * because no huge pages are available in the cpuset, per | ||
454 | * memory policy or because all are in use! | ||
455 | */ | ||
456 | goto out; | 443 | goto out; |
457 | } | 444 | } |
458 | 445 | ||
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(slab_reclaim_pages); | |||
336 | 336 | ||
337 | #ifdef CONFIG_SMP | 337 | #ifdef CONFIG_SMP |
338 | 338 | ||
339 | void *__alloc_percpu(size_t size, size_t align) | 339 | void *__alloc_percpu(size_t size) |
340 | { | 340 | { |
341 | int i; | 341 | int i; |
342 | struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); | 342 | struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); |
@@ -34,19 +34,22 @@ | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 35 | int page_cluster; |
36 | 36 | ||
37 | void put_page(struct page *page) | 37 | static void put_compound_page(struct page *page) |
38 | { | 38 | { |
39 | if (unlikely(PageCompound(page))) { | 39 | page = (struct page *)page_private(page); |
40 | page = (struct page *)page_private(page); | 40 | if (put_page_testzero(page)) { |
41 | if (put_page_testzero(page)) { | 41 | void (*dtor)(struct page *page); |
42 | void (*dtor)(struct page *page); | ||
43 | 42 | ||
44 | dtor = (void (*)(struct page *))page[1].mapping; | 43 | dtor = (void (*)(struct page *))page[1].mapping; |
45 | (*dtor)(page); | 44 | (*dtor)(page); |
46 | } | ||
47 | return; | ||
48 | } | 45 | } |
49 | if (put_page_testzero(page)) | 46 | } |
47 | |||
48 | void put_page(struct page *page) | ||
49 | { | ||
50 | if (unlikely(PageCompound(page))) | ||
51 | put_compound_page(page); | ||
52 | else if (put_page_testzero(page)) | ||
50 | __page_cache_release(page); | 53 | __page_cache_release(page); |
51 | } | 54 | } |
52 | EXPORT_SYMBOL(put_page); | 55 | EXPORT_SYMBOL(put_page); |
@@ -244,6 +247,15 @@ void release_pages(struct page **pages, int nr, int cold) | |||
244 | struct page *page = pages[i]; | 247 | struct page *page = pages[i]; |
245 | struct zone *pagezone; | 248 | struct zone *pagezone; |
246 | 249 | ||
250 | if (unlikely(PageCompound(page))) { | ||
251 | if (zone) { | ||
252 | spin_unlock_irq(&zone->lru_lock); | ||
253 | zone = NULL; | ||
254 | } | ||
255 | put_compound_page(page); | ||
256 | continue; | ||
257 | } | ||
258 | |||
247 | if (!put_page_testzero(page)) | 259 | if (!put_page_testzero(page)) |
248 | continue; | 260 | continue; |
249 | 261 | ||