diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 23 | ||||
-rw-r--r-- | mm/hugetlb.c | 13 | ||||
-rw-r--r-- | mm/mempolicy.c | 7 | ||||
-rw-r--r-- | mm/slub.c | 12 |
4 files changed, 35 insertions, 20 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 5c74b68935ac..df343d1e6345 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/backing-dev.h> | 28 | #include <linux/backing-dev.h> |
29 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
30 | #include <linux/blkdev.h> | 30 | #include <linux/blkdev.h> |
31 | #include <linux/backing-dev.h> | ||
32 | #include <linux/security.h> | 31 | #include <linux/security.h> |
33 | #include <linux/syscalls.h> | 32 | #include <linux/syscalls.h> |
34 | #include <linux/cpuset.h> | 33 | #include <linux/cpuset.h> |
@@ -1743,21 +1742,27 @@ size_t iov_iter_copy_from_user(struct page *page, | |||
1743 | } | 1742 | } |
1744 | EXPORT_SYMBOL(iov_iter_copy_from_user); | 1743 | EXPORT_SYMBOL(iov_iter_copy_from_user); |
1745 | 1744 | ||
1746 | static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) | 1745 | void iov_iter_advance(struct iov_iter *i, size_t bytes) |
1747 | { | 1746 | { |
1747 | BUG_ON(i->count < bytes); | ||
1748 | |||
1748 | if (likely(i->nr_segs == 1)) { | 1749 | if (likely(i->nr_segs == 1)) { |
1749 | i->iov_offset += bytes; | 1750 | i->iov_offset += bytes; |
1751 | i->count -= bytes; | ||
1750 | } else { | 1752 | } else { |
1751 | const struct iovec *iov = i->iov; | 1753 | const struct iovec *iov = i->iov; |
1752 | size_t base = i->iov_offset; | 1754 | size_t base = i->iov_offset; |
1753 | 1755 | ||
1754 | /* | 1756 | /* |
1755 | * The !iov->iov_len check ensures we skip over unlikely | 1757 | * The !iov->iov_len check ensures we skip over unlikely |
1756 | * zero-length segments. | 1758 | * zero-length segments (without overruning the iovec). |
1757 | */ | 1759 | */ |
1758 | while (bytes || !iov->iov_len) { | 1760 | while (bytes || unlikely(!iov->iov_len && i->count)) { |
1759 | int copy = min(bytes, iov->iov_len - base); | 1761 | int copy; |
1760 | 1762 | ||
1763 | copy = min(bytes, iov->iov_len - base); | ||
1764 | BUG_ON(!i->count || i->count < copy); | ||
1765 | i->count -= copy; | ||
1761 | bytes -= copy; | 1766 | bytes -= copy; |
1762 | base += copy; | 1767 | base += copy; |
1763 | if (iov->iov_len == base) { | 1768 | if (iov->iov_len == base) { |
@@ -1769,14 +1774,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) | |||
1769 | i->iov_offset = base; | 1774 | i->iov_offset = base; |
1770 | } | 1775 | } |
1771 | } | 1776 | } |
1772 | |||
1773 | void iov_iter_advance(struct iov_iter *i, size_t bytes) | ||
1774 | { | ||
1775 | BUG_ON(i->count < bytes); | ||
1776 | |||
1777 | __iov_iter_advance_iov(i, bytes); | ||
1778 | i->count -= bytes; | ||
1779 | } | ||
1780 | EXPORT_SYMBOL(iov_iter_advance); | 1777 | EXPORT_SYMBOL(iov_iter_advance); |
1781 | 1778 | ||
1782 | /* | 1779 | /* |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dcacc811e70e..74c1b6b0b37b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -286,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, | |||
286 | 286 | ||
287 | spin_lock(&hugetlb_lock); | 287 | spin_lock(&hugetlb_lock); |
288 | if (page) { | 288 | if (page) { |
289 | /* | ||
290 | * This page is now managed by the hugetlb allocator and has | ||
291 | * no users -- drop the buddy allocator's reference. | ||
292 | */ | ||
293 | put_page_testzero(page); | ||
294 | VM_BUG_ON(page_count(page)); | ||
289 | nid = page_to_nid(page); | 295 | nid = page_to_nid(page); |
290 | set_compound_page_dtor(page, free_huge_page); | 296 | set_compound_page_dtor(page, free_huge_page); |
291 | /* | 297 | /* |
@@ -369,13 +375,14 @@ free: | |||
369 | enqueue_huge_page(page); | 375 | enqueue_huge_page(page); |
370 | else { | 376 | else { |
371 | /* | 377 | /* |
372 | * Decrement the refcount and free the page using its | 378 | * The page has a reference count of zero already, so |
373 | * destructor. This must be done with hugetlb_lock | 379 | * call free_huge_page directly instead of using |
380 | * put_page. This must be done with hugetlb_lock | ||
374 | * unlocked which is safe because free_huge_page takes | 381 | * unlocked which is safe because free_huge_page takes |
375 | * hugetlb_lock before deciding how to free the page. | 382 | * hugetlb_lock before deciding how to free the page. |
376 | */ | 383 | */ |
377 | spin_unlock(&hugetlb_lock); | 384 | spin_unlock(&hugetlb_lock); |
378 | put_page(page); | 385 | free_huge_page(page); |
379 | spin_lock(&hugetlb_lock); | 386 | spin_lock(&hugetlb_lock); |
380 | } | 387 | } |
381 | } | 388 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 6c7ba1a63d23..3c3601121509 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1296,7 +1296,9 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | |||
1296 | unsigned nid; | 1296 | unsigned nid; |
1297 | 1297 | ||
1298 | nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); | 1298 | nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); |
1299 | __mpol_free(pol); /* finished with pol */ | 1299 | if (unlikely(pol != &default_policy && |
1300 | pol != current->mempolicy)) | ||
1301 | __mpol_free(pol); /* finished with pol */ | ||
1300 | return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags); | 1302 | return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags); |
1301 | } | 1303 | } |
1302 | 1304 | ||
@@ -1360,6 +1362,9 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) | |||
1360 | unsigned nid; | 1362 | unsigned nid; |
1361 | 1363 | ||
1362 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); | 1364 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); |
1365 | if (unlikely(pol != &default_policy && | ||
1366 | pol != current->mempolicy)) | ||
1367 | __mpol_free(pol); /* finished with pol */ | ||
1363 | return alloc_page_interleave(gfp, 0, nid); | 1368 | return alloc_page_interleave(gfp, 0, nid); |
1364 | } | 1369 | } |
1365 | zl = zonelist_policy(gfp, pol); | 1370 | zl = zonelist_policy(gfp, pol); |
@@ -1536,9 +1536,15 @@ new_slab: | |||
1536 | * That is only possible if certain conditions are met that are being | 1536 | * That is only possible if certain conditions are met that are being |
1537 | * checked when a slab is created. | 1537 | * checked when a slab is created. |
1538 | */ | 1538 | */ |
1539 | if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) | 1539 | if (!(gfpflags & __GFP_NORETRY) && |
1540 | return kmalloc_large(s->objsize, gfpflags); | 1540 | (s->flags & __PAGE_ALLOC_FALLBACK)) { |
1541 | 1541 | if (gfpflags & __GFP_WAIT) | |
1542 | local_irq_enable(); | ||
1543 | object = kmalloc_large(s->objsize, gfpflags); | ||
1544 | if (gfpflags & __GFP_WAIT) | ||
1545 | local_irq_disable(); | ||
1546 | return object; | ||
1547 | } | ||
1542 | return NULL; | 1548 | return NULL; |
1543 | debug: | 1549 | debug: |
1544 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1550 | if (!alloc_debug_processing(s, c->page, object, addr)) |