diff options
author | David S. Miller <davem@davemloft.net> | 2011-03-04 00:27:42 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-04 00:27:42 -0500 |
commit | 0a0e9ae1bd788bc19adc4d4ae08c98b233697402 (patch) | |
tree | 13825eeb5bbeae27d66e95f12168eff4b60701ab /mm | |
parent | 01a16b21d6adf992aa863186c3c4e561a57c1714 (diff) | |
parent | b65a0e0c84cf489bfa00d6aa6c48abc5a237100f (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/bnx2x/bnx2x.h
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 6 | ||||
-rw-r--r-- | mm/mremap.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/truncate.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 32 |
8 files changed, 34 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c index 8e8c18324863..5823698c2b71 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2648 | details.last_index = ULONG_MAX; | 2648 | details.last_index = ULONG_MAX; |
2649 | details.i_mmap_lock = &mapping->i_mmap_lock; | 2649 | details.i_mmap_lock = &mapping->i_mmap_lock; |
2650 | 2650 | ||
2651 | mutex_lock(&mapping->unmap_mutex); | ||
2651 | spin_lock(&mapping->i_mmap_lock); | 2652 | spin_lock(&mapping->i_mmap_lock); |
2652 | 2653 | ||
2653 | /* Protect against endless unmapping loops */ | 2654 | /* Protect against endless unmapping loops */ |
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2664 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) | 2665 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) |
2665 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); | 2666 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); |
2666 | spin_unlock(&mapping->i_mmap_lock); | 2667 | spin_unlock(&mapping->i_mmap_lock); |
2668 | mutex_unlock(&mapping->unmap_mutex); | ||
2667 | } | 2669 | } |
2668 | EXPORT_SYMBOL(unmap_mapping_range); | 2670 | EXPORT_SYMBOL(unmap_mapping_range); |
2669 | 2671 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 368fc9d23610..49355a970be2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
1830 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { | 1830 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { |
1831 | unsigned nid; | 1831 | unsigned nid; |
1832 | 1832 | ||
1833 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); | 1833 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); |
1834 | mpol_cond_put(pol); | 1834 | mpol_cond_put(pol); |
1835 | page = alloc_page_interleave(gfp, order, nid); | 1835 | page = alloc_page_interleave(gfp, order, nid); |
1836 | put_mems_allowed(); | 1836 | put_mems_allowed(); |
diff --git a/mm/migrate.c b/mm/migrate.c index 766115253807..352de555626c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | |||
1287 | return -EPERM; | 1287 | return -EPERM; |
1288 | 1288 | ||
1289 | /* Find the mm_struct */ | 1289 | /* Find the mm_struct */ |
1290 | read_lock(&tasklist_lock); | 1290 | rcu_read_lock(); |
1291 | task = pid ? find_task_by_vpid(pid) : current; | 1291 | task = pid ? find_task_by_vpid(pid) : current; |
1292 | if (!task) { | 1292 | if (!task) { |
1293 | read_unlock(&tasklist_lock); | 1293 | rcu_read_unlock(); |
1294 | return -ESRCH; | 1294 | return -ESRCH; |
1295 | } | 1295 | } |
1296 | mm = get_task_mm(task); | 1296 | mm = get_task_mm(task); |
1297 | read_unlock(&tasklist_lock); | 1297 | rcu_read_unlock(); |
1298 | 1298 | ||
1299 | if (!mm) | 1299 | if (!mm) |
1300 | return -EINVAL; | 1300 | return -EINVAL; |
diff --git a/mm/mremap.c b/mm/mremap.c index 9925b6391b80..1de98d492ddc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
94 | */ | 94 | */ |
95 | mapping = vma->vm_file->f_mapping; | 95 | mapping = vma->vm_file->f_mapping; |
96 | spin_lock(&mapping->i_mmap_lock); | 96 | spin_lock(&mapping->i_mmap_lock); |
97 | if (new_vma->vm_truncate_count && | 97 | new_vma->vm_truncate_count = 0; |
98 | new_vma->vm_truncate_count != vma->vm_truncate_count) | ||
99 | new_vma->vm_truncate_count = 0; | ||
100 | } | 98 | } |
101 | 99 | ||
102 | /* | 100 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a873e61e312e..cdef1d4b4e47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) | |||
5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | 5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { |
5377 | unsigned long check = pfn + iter; | 5377 | unsigned long check = pfn + iter; |
5378 | 5378 | ||
5379 | if (!pfn_valid_within(check)) { | 5379 | if (!pfn_valid_within(check)) |
5380 | iter++; | ||
5381 | continue; | 5380 | continue; |
5382 | } | 5381 | |
5383 | page = pfn_to_page(check); | 5382 | page = pfn_to_page(check); |
5384 | if (!page_count(page)) { | 5383 | if (!page_count(page)) { |
5385 | if (PageBuddy(page)) | 5384 | if (PageBuddy(page)) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa8..0341c5700e34 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
1940 | 1940 | ||
1941 | error = -EINVAL; | 1941 | error = -EINVAL; |
1942 | if (S_ISBLK(inode->i_mode)) { | 1942 | if (S_ISBLK(inode->i_mode)) { |
1943 | bdev = I_BDEV(inode); | 1943 | bdev = bdgrab(I_BDEV(inode)); |
1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, | 1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, |
1945 | sys_swapon); | 1945 | sys_swapon); |
1946 | if (error < 0) { | 1946 | if (error < 0) { |
diff --git a/mm/truncate.c b/mm/truncate.c index 49feb46e77b8..d64296be00d3 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
225 | next = start; | 225 | next = start; |
226 | while (next <= end && | 226 | while (next <= end && |
227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | 227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
228 | mem_cgroup_uncharge_start(); | ||
228 | for (i = 0; i < pagevec_count(&pvec); i++) { | 229 | for (i = 0; i < pagevec_count(&pvec); i++) { |
229 | struct page *page = pvec.pages[i]; | 230 | struct page *page = pvec.pages[i]; |
230 | pgoff_t page_index = page->index; | 231 | pgoff_t page_index = page->index; |
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
247 | unlock_page(page); | 248 | unlock_page(page); |
248 | } | 249 | } |
249 | pagevec_release(&pvec); | 250 | pagevec_release(&pvec); |
251 | mem_cgroup_uncharge_end(); | ||
250 | cond_resched(); | 252 | cond_resched(); |
251 | } | 253 | } |
252 | 254 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 17497d0cd8b9..6771ea70bfe7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone, | |||
1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) | 1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) |
1842 | return false; | 1842 | return false; |
1843 | 1843 | ||
1844 | /* | 1844 | /* Consider stopping depending on scan and reclaim activity */ |
1845 | * If we failed to reclaim and have scanned the full list, stop. | 1845 | if (sc->gfp_mask & __GFP_REPEAT) { |
1846 | * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far | 1846 | /* |
1847 | * faster but obviously would be less likely to succeed | 1847 | * For __GFP_REPEAT allocations, stop reclaiming if the |
1848 | * allocation. If this is desirable, use GFP_REPEAT to decide | 1848 | * full LRU list has been scanned and we are still failing |
1849 | * if both reclaimed and scanned should be checked or just | 1849 | * to reclaim pages. This full LRU scan is potentially |
1850 | * reclaimed | 1850 | * expensive but a __GFP_REPEAT caller really wants to succeed |
1851 | */ | 1851 | */ |
1852 | if (!nr_reclaimed && !nr_scanned) | 1852 | if (!nr_reclaimed && !nr_scanned) |
1853 | return false; | 1853 | return false; |
1854 | } else { | ||
1855 | /* | ||
1856 | * For non-__GFP_REPEAT allocations which can presumably | ||
1857 | * fail without consequence, stop if we failed to reclaim | ||
1858 | * any pages from the last SWAP_CLUSTER_MAX number of | ||
1859 | * pages that were scanned. This will return to the | ||
1860 | * caller faster at the risk reclaim/compaction and | ||
1861 | * the resulting allocation attempt fails | ||
1862 | */ | ||
1863 | if (!nr_reclaimed) | ||
1864 | return false; | ||
1865 | } | ||
1854 | 1866 | ||
1855 | /* | 1867 | /* |
1856 | * If we have not reclaimed enough pages for compaction and the | 1868 | * If we have not reclaimed enough pages for compaction and the |