diff options
author | David S. Miller <davem@davemloft.net> | 2008-04-03 17:33:42 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-04-03 17:33:42 -0400 |
commit | 3bb5da3837cc1aa17736b05139c9a22c3794851a (patch) | |
tree | c92d5684a866542b1cb20641607ac1643ce03a47 /mm | |
parent | 7feb49c82a74bc7c091b8ab2a3f96baa33d08ece (diff) | |
parent | 9597362d354f8655ece324b01d0c640a0e99c077 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 25 | ||||
-rw-r--r-- | mm/filemap.c | 20 | ||||
-rw-r--r-- | mm/fremap.c | 2 | ||||
-rw-r--r-- | mm/highmem.c | 30 | ||||
-rw-r--r-- | mm/hugetlb.c | 17 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 9 | ||||
-rw-r--r-- | mm/pagewalk.c | 10 | ||||
-rw-r--r-- | mm/readahead.c | 5 | ||||
-rw-r--r-- | mm/rmap.c | 13 | ||||
-rw-r--r-- | mm/shmem.c | 25 | ||||
-rw-r--r-- | mm/slab.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 17 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 8 | ||||
-rw-r--r-- | mm/swap.c | 5 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/tiny-shmem.c | 10 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 27 |
19 files changed, 154 insertions, 88 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index f6ff4337b424..2ccea700968f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -125,6 +125,7 @@ static int __init reserve_bootmem_core(bootmem_data_t *bdata, | |||
125 | BUG_ON(!size); | 125 | BUG_ON(!size); |
126 | BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); | 126 | BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); |
127 | BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); | 127 | BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); |
128 | BUG_ON(addr < bdata->node_boot_start); | ||
128 | 129 | ||
129 | sidx = PFN_DOWN(addr - bdata->node_boot_start); | 130 | sidx = PFN_DOWN(addr - bdata->node_boot_start); |
130 | eidx = PFN_UP(addr + size - bdata->node_boot_start); | 131 | eidx = PFN_UP(addr + size - bdata->node_boot_start); |
@@ -156,21 +157,31 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | |||
156 | unsigned long sidx, eidx; | 157 | unsigned long sidx, eidx; |
157 | unsigned long i; | 158 | unsigned long i; |
158 | 159 | ||
160 | BUG_ON(!size); | ||
161 | |||
162 | /* out range */ | ||
163 | if (addr + size < bdata->node_boot_start || | ||
164 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
165 | return; | ||
159 | /* | 166 | /* |
160 | * round down end of usable mem, partially free pages are | 167 | * round down end of usable mem, partially free pages are |
161 | * considered reserved. | 168 | * considered reserved. |
162 | */ | 169 | */ |
163 | BUG_ON(!size); | ||
164 | BUG_ON(PFN_DOWN(addr + size) > bdata->node_low_pfn); | ||
165 | 170 | ||
166 | if (addr < bdata->last_success) | 171 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) |
167 | bdata->last_success = addr; | 172 | bdata->last_success = addr; |
168 | 173 | ||
169 | /* | 174 | /* |
170 | * Round up the beginning of the address. | 175 | * Round up to index to the range. |
171 | */ | 176 | */ |
172 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | 177 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) |
178 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | ||
179 | else | ||
180 | sidx = 0; | ||
181 | |||
173 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | 182 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); |
183 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | ||
184 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | ||
174 | 185 | ||
175 | for (i = sidx; i < eidx; i++) { | 186 | for (i = sidx; i < eidx; i++) { |
176 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | 187 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) |
@@ -421,7 +432,9 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, | |||
421 | 432 | ||
422 | void __init free_bootmem(unsigned long addr, unsigned long size) | 433 | void __init free_bootmem(unsigned long addr, unsigned long size) |
423 | { | 434 | { |
424 | free_bootmem_core(NODE_DATA(0)->bdata, addr, size); | 435 | bootmem_data_t *bdata; |
436 | list_for_each_entry(bdata, &bdata_list, list) | ||
437 | free_bootmem_core(bdata, addr, size); | ||
425 | } | 438 | } |
426 | 439 | ||
427 | unsigned long __init free_all_bootmem(void) | 440 | unsigned long __init free_all_bootmem(void) |
diff --git a/mm/filemap.c b/mm/filemap.c index df343d1e6345..07e9d9258b48 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, | |||
343 | EXPORT_SYMBOL(sync_page_range); | 343 | EXPORT_SYMBOL(sync_page_range); |
344 | 344 | ||
345 | /** | 345 | /** |
346 | * sync_page_range_nolock | 346 | * sync_page_range_nolock - write & wait on all pages in the passed range without locking |
347 | * @inode: target inode | 347 | * @inode: target inode |
348 | * @mapping: target address_space | 348 | * @mapping: target address_space |
349 | * @pos: beginning offset in pages to write | 349 | * @pos: beginning offset in pages to write |
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page) | |||
611 | sync_page_killable, TASK_KILLABLE); | 611 | sync_page_killable, TASK_KILLABLE); |
612 | } | 612 | } |
613 | 613 | ||
614 | /* | 614 | /** |
615 | * __lock_page_nosync - get a lock on the page, without calling sync_page() | ||
616 | * @page: the page to lock | ||
617 | * | ||
615 | * Variant of lock_page that does not require the caller to hold a reference | 618 | * Variant of lock_page that does not require the caller to hold a reference |
616 | * on the page's mapping. | 619 | * on the page's mapping. |
617 | */ | 620 | */ |
@@ -1538,9 +1541,20 @@ repeat: | |||
1538 | return page; | 1541 | return page; |
1539 | } | 1542 | } |
1540 | 1543 | ||
1541 | /* | 1544 | /** |
1545 | * read_cache_page_async - read into page cache, fill it if needed | ||
1546 | * @mapping: the page's address_space | ||
1547 | * @index: the page index | ||
1548 | * @filler: function to perform the read | ||
1549 | * @data: destination for read data | ||
1550 | * | ||
1542 | * Same as read_cache_page, but don't wait for page to become unlocked | 1551 | * Same as read_cache_page, but don't wait for page to become unlocked |
1543 | * after submitting it to the filler. | 1552 | * after submitting it to the filler. |
1553 | * | ||
1554 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
1555 | * not set, try to fill the page but don't wait for it to become unlocked. | ||
1556 | * | ||
1557 | * If the page does not get brought uptodate, return -EIO. | ||
1544 | */ | 1558 | */ |
1545 | struct page *read_cache_page_async(struct address_space *mapping, | 1559 | struct page *read_cache_page_async(struct address_space *mapping, |
1546 | pgoff_t index, | 1560 | pgoff_t index, |
diff --git a/mm/fremap.c b/mm/fremap.c index 69a37c2bdf81..07a9c82ce1a3 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, | |||
113 | * mmap()/mremap() it does not create any new vmas. The new mappings are | 113 | * mmap()/mremap() it does not create any new vmas. The new mappings are |
114 | * also safe across swapout. | 114 | * also safe across swapout. |
115 | * | 115 | * |
116 | * NOTE: the 'prot' parameter right now is ignored (but must be zero), | 116 | * NOTE: the @prot parameter right now is ignored (but must be zero), |
117 | * and the vma's default protection is used. Arbitrary protections | 117 | * and the vma's default protection is used. Arbitrary protections |
118 | * might be implemented in the future. | 118 | * might be implemented in the future. |
119 | */ | 119 | */ |
diff --git a/mm/highmem.c b/mm/highmem.c index 35d47733cde4..7da4a7b6af11 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -104,8 +104,9 @@ static void flush_all_zero_pkmaps(void) | |||
104 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | 104 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
105 | } | 105 | } |
106 | 106 | ||
107 | /* Flush all unused kmap mappings in order to remove stray | 107 | /** |
108 | mappings. */ | 108 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings |
109 | */ | ||
109 | void kmap_flush_unused(void) | 110 | void kmap_flush_unused(void) |
110 | { | 111 | { |
111 | spin_lock(&kmap_lock); | 112 | spin_lock(&kmap_lock); |
@@ -163,6 +164,14 @@ start: | |||
163 | return vaddr; | 164 | return vaddr; |
164 | } | 165 | } |
165 | 166 | ||
167 | /** | ||
168 | * kmap_high - map a highmem page into memory | ||
169 | * @page: &struct page to map | ||
170 | * | ||
171 | * Returns the page's virtual memory address. | ||
172 | * | ||
173 | * We cannot call this from interrupts, as it may block. | ||
174 | */ | ||
166 | void *kmap_high(struct page *page) | 175 | void *kmap_high(struct page *page) |
167 | { | 176 | { |
168 | unsigned long vaddr; | 177 | unsigned long vaddr; |
@@ -170,8 +179,6 @@ void *kmap_high(struct page *page) | |||
170 | /* | 179 | /* |
171 | * For highmem pages, we can't trust "virtual" until | 180 | * For highmem pages, we can't trust "virtual" until |
172 | * after we have the lock. | 181 | * after we have the lock. |
173 | * | ||
174 | * We cannot call this from interrupts, as it may block | ||
175 | */ | 182 | */ |
176 | spin_lock(&kmap_lock); | 183 | spin_lock(&kmap_lock); |
177 | vaddr = (unsigned long)page_address(page); | 184 | vaddr = (unsigned long)page_address(page); |
@@ -185,6 +192,10 @@ void *kmap_high(struct page *page) | |||
185 | 192 | ||
186 | EXPORT_SYMBOL(kmap_high); | 193 | EXPORT_SYMBOL(kmap_high); |
187 | 194 | ||
195 | /** | ||
196 | * kunmap_high - map a highmem page into memory | ||
197 | * @page: &struct page to unmap | ||
198 | */ | ||
188 | void kunmap_high(struct page *page) | 199 | void kunmap_high(struct page *page) |
189 | { | 200 | { |
190 | unsigned long vaddr; | 201 | unsigned long vaddr; |
@@ -259,6 +270,12 @@ static struct page_address_slot *page_slot(struct page *page) | |||
259 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | 270 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
260 | } | 271 | } |
261 | 272 | ||
273 | /** | ||
274 | * page_address - get the mapped virtual address of a page | ||
275 | * @page: &struct page to get the virtual address of | ||
276 | * | ||
277 | * Returns the page's virtual address. | ||
278 | */ | ||
262 | void *page_address(struct page *page) | 279 | void *page_address(struct page *page) |
263 | { | 280 | { |
264 | unsigned long flags; | 281 | unsigned long flags; |
@@ -288,6 +305,11 @@ done: | |||
288 | 305 | ||
289 | EXPORT_SYMBOL(page_address); | 306 | EXPORT_SYMBOL(page_address); |
290 | 307 | ||
308 | /** | ||
309 | * set_page_address - set a page's virtual address | ||
310 | * @page: &struct page to set | ||
311 | * @virtual: virtual address to use | ||
312 | */ | ||
291 | void set_page_address(struct page *page, void *virtual) | 313 | void set_page_address(struct page *page, void *virtual) |
292 | { | 314 | { |
293 | unsigned long flags; | 315 | unsigned long flags; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 74c1b6b0b37b..51c9e2c01640 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -401,12 +401,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages) | |||
401 | struct page *page; | 401 | struct page *page; |
402 | unsigned long nr_pages; | 402 | unsigned long nr_pages; |
403 | 403 | ||
404 | /* | ||
405 | * We want to release as many surplus pages as possible, spread | ||
406 | * evenly across all nodes. Iterate across all nodes until we | ||
407 | * can no longer free unreserved surplus pages. This occurs when | ||
408 | * the nodes with surplus pages have no free pages. | ||
409 | */ | ||
410 | unsigned long remaining_iterations = num_online_nodes(); | ||
411 | |||
404 | /* Uncommit the reservation */ | 412 | /* Uncommit the reservation */ |
405 | resv_huge_pages -= unused_resv_pages; | 413 | resv_huge_pages -= unused_resv_pages; |
406 | 414 | ||
407 | nr_pages = min(unused_resv_pages, surplus_huge_pages); | 415 | nr_pages = min(unused_resv_pages, surplus_huge_pages); |
408 | 416 | ||
409 | while (nr_pages) { | 417 | while (remaining_iterations-- && nr_pages) { |
410 | nid = next_node(nid, node_online_map); | 418 | nid = next_node(nid, node_online_map); |
411 | if (nid == MAX_NUMNODES) | 419 | if (nid == MAX_NUMNODES) |
412 | nid = first_node(node_online_map); | 420 | nid = first_node(node_online_map); |
@@ -424,6 +432,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages) | |||
424 | surplus_huge_pages--; | 432 | surplus_huge_pages--; |
425 | surplus_huge_pages_node[nid]--; | 433 | surplus_huge_pages_node[nid]--; |
426 | nr_pages--; | 434 | nr_pages--; |
435 | remaining_iterations = num_online_nodes(); | ||
427 | } | 436 | } |
428 | } | 437 | } |
429 | } | 438 | } |
@@ -671,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf) | |||
671 | { | 680 | { |
672 | return sprintf(buf, | 681 | return sprintf(buf, |
673 | "Node %d HugePages_Total: %5u\n" | 682 | "Node %d HugePages_Total: %5u\n" |
674 | "Node %d HugePages_Free: %5u\n", | 683 | "Node %d HugePages_Free: %5u\n" |
684 | "Node %d HugePages_Surp: %5u\n", | ||
675 | nid, nr_huge_pages_node[nid], | 685 | nid, nr_huge_pages_node[nid], |
676 | nid, free_huge_pages_node[nid]); | 686 | nid, free_huge_pages_node[nid], |
687 | nid, surplus_huge_pages_node[nid]); | ||
677 | } | 688 | } |
678 | 689 | ||
679 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 690 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8b9f6cae938e..9b648bd63451 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1079,7 +1079,7 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
1079 | * Only thread group leaders are allowed to migrate, the mm_struct is | 1079 | * Only thread group leaders are allowed to migrate, the mm_struct is |
1080 | * in effect owned by the leader | 1080 | * in effect owned by the leader |
1081 | */ | 1081 | */ |
1082 | if (p->tgid != p->pid) | 1082 | if (!thread_group_leader(p)) |
1083 | goto out; | 1083 | goto out; |
1084 | 1084 | ||
1085 | css_get(&mem->css); | 1085 | css_get(&mem->css); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 44b2da11bf43..f255eda693b0 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -37,6 +37,7 @@ static DEFINE_SPINLOCK(zone_scan_mutex); | |||
37 | * badness - calculate a numeric value for how bad this task has been | 37 | * badness - calculate a numeric value for how bad this task has been |
38 | * @p: task struct of which task we should calculate | 38 | * @p: task struct of which task we should calculate |
39 | * @uptime: current uptime in seconds | 39 | * @uptime: current uptime in seconds |
40 | * @mem: target memory controller | ||
40 | * | 41 | * |
41 | * The formula used is relatively simple and documented inline in the | 42 | * The formula used is relatively simple and documented inline in the |
42 | * function. The main rationale is that we want to select a good task | 43 | * function. The main rationale is that we want to select a good task |
@@ -264,6 +265,9 @@ static struct task_struct *select_bad_process(unsigned long *ppoints, | |||
264 | } | 265 | } |
265 | 266 | ||
266 | /** | 267 | /** |
268 | * dump_tasks - dump current memory state of all system tasks | ||
269 | * @mem: target memory controller | ||
270 | * | ||
267 | * Dumps the current memory state of all system tasks, excluding kernel threads. | 271 | * Dumps the current memory state of all system tasks, excluding kernel threads. |
268 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj | 272 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj |
269 | * score, and name. | 273 | * score, and name. |
@@ -298,7 +302,7 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
298 | } while_each_thread(g, p); | 302 | } while_each_thread(g, p); |
299 | } | 303 | } |
300 | 304 | ||
301 | /** | 305 | /* |
302 | * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO | 306 | * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO |
303 | * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO | 307 | * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO |
304 | * set. | 308 | * set. |
@@ -504,6 +508,9 @@ void clear_zonelist_oom(struct zonelist *zonelist) | |||
504 | 508 | ||
505 | /** | 509 | /** |
506 | * out_of_memory - kill the "best" process when we run out of memory | 510 | * out_of_memory - kill the "best" process when we run out of memory |
511 | * @zonelist: zonelist pointer | ||
512 | * @gfp_mask: memory allocation flags | ||
513 | * @order: amount of memory being requested as a power of 2 | ||
507 | * | 514 | * |
508 | * If we run out of memory, we have the choice between either | 515 | * If we run out of memory, we have the choice between either |
509 | * killing a random task (bad), letting the system crash (worse) | 516 | * killing a random task (bad), letting the system crash (worse) |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index b4f27d22da91..1cf1417ef8b7 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
77 | 77 | ||
78 | /** | 78 | /** |
79 | * walk_page_range - walk a memory map's page tables with a callback | 79 | * walk_page_range - walk a memory map's page tables with a callback |
80 | * @mm - memory map to walk | 80 | * @mm: memory map to walk |
81 | * @addr - starting address | 81 | * @addr: starting address |
82 | * @end - ending address | 82 | * @end: ending address |
83 | * @walk - set of callbacks to invoke for each level of the tree | 83 | * @walk: set of callbacks to invoke for each level of the tree |
84 | * @private - private data passed to the callback function | 84 | * @private: private data passed to the callback function |
85 | * | 85 | * |
86 | * Recursively walk the page table for the memory area in a VMA, | 86 | * Recursively walk the page table for the memory area in a VMA, |
87 | * calling supplied callbacks. Callbacks are called in-order (first | 87 | * calling supplied callbacks. Callbacks are called in-order (first |
diff --git a/mm/readahead.c b/mm/readahead.c index c9c50ca1ec38..8762e8988972 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -443,9 +443,10 @@ EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |||
443 | * pagecache pages | 443 | * pagecache pages |
444 | * | 444 | * |
445 | * page_cache_async_ondemand() should be called when a page is used which | 445 | * page_cache_async_ondemand() should be called when a page is used which |
446 | * has the PG_readahead flag: this is a marker to suggest that the application | 446 | * has the PG_readahead flag; this is a marker to suggest that the application |
447 | * has used up enough of the readahead window that we should start pulling in | 447 | * has used up enough of the readahead window that we should start pulling in |
448 | * more pages. */ | 448 | * more pages. |
449 | */ | ||
449 | void | 450 | void |
450 | page_cache_async_readahead(struct address_space *mapping, | 451 | page_cache_async_readahead(struct address_space *mapping, |
451 | struct file_ra_state *ra, struct file *filp, | 452 | struct file_ra_state *ra, struct file *filp, |
@@ -335,6 +335,7 @@ static int page_referenced_anon(struct page *page, | |||
335 | /** | 335 | /** |
336 | * page_referenced_file - referenced check for object-based rmap | 336 | * page_referenced_file - referenced check for object-based rmap |
337 | * @page: the page we're checking references on. | 337 | * @page: the page we're checking references on. |
338 | * @mem_cont: target memory controller | ||
338 | * | 339 | * |
339 | * For an object-based mapped page, find all the places it is mapped and | 340 | * For an object-based mapped page, find all the places it is mapped and |
340 | * check/clear the referenced flag. This is done by following the page->mapping | 341 | * check/clear the referenced flag. This is done by following the page->mapping |
@@ -402,6 +403,7 @@ static int page_referenced_file(struct page *page, | |||
402 | * page_referenced - test if the page was referenced | 403 | * page_referenced - test if the page was referenced |
403 | * @page: the page to test | 404 | * @page: the page to test |
404 | * @is_locked: caller holds lock on the page | 405 | * @is_locked: caller holds lock on the page |
406 | * @mem_cont: target memory controller | ||
405 | * | 407 | * |
406 | * Quick test_and_clear_referenced for all mappings to a page, | 408 | * Quick test_and_clear_referenced for all mappings to a page, |
407 | * returns the number of ptes which referenced the page. | 409 | * returns the number of ptes which referenced the page. |
@@ -506,7 +508,7 @@ int page_mkclean(struct page *page) | |||
506 | EXPORT_SYMBOL_GPL(page_mkclean); | 508 | EXPORT_SYMBOL_GPL(page_mkclean); |
507 | 509 | ||
508 | /** | 510 | /** |
509 | * page_set_anon_rmap - setup new anonymous rmap | 511 | * __page_set_anon_rmap - setup new anonymous rmap |
510 | * @page: the page to add the mapping to | 512 | * @page: the page to add the mapping to |
511 | * @vma: the vm area in which the mapping is added | 513 | * @vma: the vm area in which the mapping is added |
512 | * @address: the user virtual address mapped | 514 | * @address: the user virtual address mapped |
@@ -530,7 +532,7 @@ static void __page_set_anon_rmap(struct page *page, | |||
530 | } | 532 | } |
531 | 533 | ||
532 | /** | 534 | /** |
533 | * page_set_anon_rmap - sanity check anonymous rmap addition | 535 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
534 | * @page: the page to add the mapping to | 536 | * @page: the page to add the mapping to |
535 | * @vma: the vm area in which the mapping is added | 537 | * @vma: the vm area in which the mapping is added |
536 | * @address: the user virtual address mapped | 538 | * @address: the user virtual address mapped |
@@ -583,7 +585,7 @@ void page_add_anon_rmap(struct page *page, | |||
583 | } | 585 | } |
584 | } | 586 | } |
585 | 587 | ||
586 | /* | 588 | /** |
587 | * page_add_new_anon_rmap - add pte mapping to a new anonymous page | 589 | * page_add_new_anon_rmap - add pte mapping to a new anonymous page |
588 | * @page: the page to add the mapping to | 590 | * @page: the page to add the mapping to |
589 | * @vma: the vm area in which the mapping is added | 591 | * @vma: the vm area in which the mapping is added |
@@ -623,6 +625,8 @@ void page_add_file_rmap(struct page *page) | |||
623 | /** | 625 | /** |
624 | * page_dup_rmap - duplicate pte mapping to a page | 626 | * page_dup_rmap - duplicate pte mapping to a page |
625 | * @page: the page to add the mapping to | 627 | * @page: the page to add the mapping to |
628 | * @vma: the vm area being duplicated | ||
629 | * @address: the user virtual address mapped | ||
626 | * | 630 | * |
627 | * For copy_page_range only: minimal extract from page_add_file_rmap / | 631 | * For copy_page_range only: minimal extract from page_add_file_rmap / |
628 | * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's | 632 | * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's |
@@ -642,6 +646,7 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long | |||
642 | /** | 646 | /** |
643 | * page_remove_rmap - take down pte mapping from a page | 647 | * page_remove_rmap - take down pte mapping from a page |
644 | * @page: page to remove mapping from | 648 | * @page: page to remove mapping from |
649 | * @vma: the vm area in which the mapping is removed | ||
645 | * | 650 | * |
646 | * The caller needs to hold the pte lock. | 651 | * The caller needs to hold the pte lock. |
647 | */ | 652 | */ |
@@ -890,6 +895,7 @@ static int try_to_unmap_anon(struct page *page, int migration) | |||
890 | /** | 895 | /** |
891 | * try_to_unmap_file - unmap file page using the object-based rmap method | 896 | * try_to_unmap_file - unmap file page using the object-based rmap method |
892 | * @page: the page to unmap | 897 | * @page: the page to unmap |
898 | * @migration: migration flag | ||
893 | * | 899 | * |
894 | * Find all the mappings of a page using the mapping pointer and the vma chains | 900 | * Find all the mappings of a page using the mapping pointer and the vma chains |
895 | * contained in the address_space struct it points to. | 901 | * contained in the address_space struct it points to. |
@@ -986,6 +992,7 @@ out: | |||
986 | /** | 992 | /** |
987 | * try_to_unmap - try to remove all page table mappings to a page | 993 | * try_to_unmap - try to remove all page table mappings to a page |
988 | * @page: the page to get unmapped | 994 | * @page: the page to get unmapped |
995 | * @migration: migration flag | ||
989 | * | 996 | * |
990 | * Tries to remove all the page table entries which are mapping this | 997 | * Tries to remove all the page table entries which are mapping this |
991 | * page, used in the pageout path. Caller must hold the page lock. | 998 | * page, used in the pageout path. Caller must hold the page lock. |
diff --git a/mm/shmem.c b/mm/shmem.c index 3372bc579e89..f514dd392cd9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -244,9 +244,8 @@ static void shmem_free_inode(struct super_block *sb) | |||
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | /* | 247 | /** |
248 | * shmem_recalc_inode - recalculate the size of an inode | 248 | * shmem_recalc_inode - recalculate the size of an inode |
249 | * | ||
250 | * @inode: inode to recalc | 249 | * @inode: inode to recalc |
251 | * | 250 | * |
252 | * We have to calculate the free blocks since the mm can drop | 251 | * We have to calculate the free blocks since the mm can drop |
@@ -270,9 +269,8 @@ static void shmem_recalc_inode(struct inode *inode) | |||
270 | } | 269 | } |
271 | } | 270 | } |
272 | 271 | ||
273 | /* | 272 | /** |
274 | * shmem_swp_entry - find the swap vector position in the info structure | 273 | * shmem_swp_entry - find the swap vector position in the info structure |
275 | * | ||
276 | * @info: info structure for the inode | 274 | * @info: info structure for the inode |
277 | * @index: index of the page to find | 275 | * @index: index of the page to find |
278 | * @page: optional page to add to the structure. Has to be preset to | 276 | * @page: optional page to add to the structure. Has to be preset to |
@@ -374,13 +372,13 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns | |||
374 | } | 372 | } |
375 | } | 373 | } |
376 | 374 | ||
377 | /* | 375 | /** |
378 | * shmem_swp_alloc - get the position of the swap entry for the page. | 376 | * shmem_swp_alloc - get the position of the swap entry for the page. |
379 | * If it does not exist allocate the entry. | ||
380 | * | ||
381 | * @info: info structure for the inode | 377 | * @info: info structure for the inode |
382 | * @index: index of the page to find | 378 | * @index: index of the page to find |
383 | * @sgp: check and recheck i_size? skip allocation? | 379 | * @sgp: check and recheck i_size? skip allocation? |
380 | * | ||
381 | * If the entry does not exist, allocate it. | ||
384 | */ | 382 | */ |
385 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) | 383 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) |
386 | { | 384 | { |
@@ -440,9 +438,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long | |||
440 | return entry; | 438 | return entry; |
441 | } | 439 | } |
442 | 440 | ||
443 | /* | 441 | /** |
444 | * shmem_free_swp - free some swap entries in a directory | 442 | * shmem_free_swp - free some swap entries in a directory |
445 | * | ||
446 | * @dir: pointer to the directory | 443 | * @dir: pointer to the directory |
447 | * @edir: pointer after last entry of the directory | 444 | * @edir: pointer after last entry of the directory |
448 | * @punch_lock: pointer to spinlock when needed for the holepunch case | 445 | * @punch_lock: pointer to spinlock when needed for the holepunch case |
@@ -2022,7 +2019,7 @@ static const struct inode_operations shmem_symlink_inode_operations = { | |||
2022 | }; | 2019 | }; |
2023 | 2020 | ||
2024 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2021 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2025 | /** | 2022 | /* |
2026 | * Superblocks without xattr inode operations will get security.* xattr | 2023 | * Superblocks without xattr inode operations will get security.* xattr |
2027 | * support from the VFS "for free". As soon as we have any other xattrs | 2024 | * support from the VFS "for free". As soon as we have any other xattrs |
2028 | * like ACLs, we also need to implement the security.* handlers at | 2025 | * like ACLs, we also need to implement the security.* handlers at |
@@ -2561,12 +2558,11 @@ out4: | |||
2561 | } | 2558 | } |
2562 | module_init(init_tmpfs) | 2559 | module_init(init_tmpfs) |
2563 | 2560 | ||
2564 | /* | 2561 | /** |
2565 | * shmem_file_setup - get an unlinked file living in tmpfs | 2562 | * shmem_file_setup - get an unlinked file living in tmpfs |
2566 | * | ||
2567 | * @name: name for dentry (to be seen in /proc/<pid>/maps | 2563 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
2568 | * @size: size to be set for the file | 2564 | * @size: size to be set for the file |
2569 | * | 2565 | * @flags: vm_flags |
2570 | */ | 2566 | */ |
2571 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | 2567 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) |
2572 | { | 2568 | { |
@@ -2621,9 +2617,8 @@ put_memory: | |||
2621 | return ERR_PTR(error); | 2617 | return ERR_PTR(error); |
2622 | } | 2618 | } |
2623 | 2619 | ||
2624 | /* | 2620 | /** |
2625 | * shmem_zero_setup - setup a shared anonymous mapping | 2621 | * shmem_zero_setup - setup a shared anonymous mapping |
2626 | * | ||
2627 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | 2622 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
2628 | */ | 2623 | */ |
2629 | int shmem_zero_setup(struct vm_area_struct *vma) | 2624 | int shmem_zero_setup(struct vm_area_struct *vma) |
@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void) | |||
1481 | list_add(&cache_cache.next, &cache_chain); | 1481 | list_add(&cache_cache.next, &cache_chain); |
1482 | cache_cache.colour_off = cache_line_size(); | 1482 | cache_cache.colour_off = cache_line_size(); |
1483 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1483 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; |
1484 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; | 1484 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
1485 | 1485 | ||
1486 | /* | 1486 | /* |
1487 | * struct kmem_cache size depends on nr_node_ids, which | 1487 | * struct kmem_cache size depends on nr_node_ids, which |
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void) | |||
1602 | int nid; | 1602 | int nid; |
1603 | 1603 | ||
1604 | for_each_online_node(nid) { | 1604 | for_each_online_node(nid) { |
1605 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid); | 1605 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1606 | 1606 | ||
1607 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1607 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
1608 | &initkmem_list3[SIZE_AC + nid], nid); | 1608 | &initkmem_list3[SIZE_AC + nid], nid); |
@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3624 | EXPORT_SYMBOL(kmem_cache_alloc); | 3624 | EXPORT_SYMBOL(kmem_cache_alloc); |
3625 | 3625 | ||
3626 | /** | 3626 | /** |
3627 | * kmem_ptr_validate - check if an untrusted pointer might | 3627 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3628 | * be a slab entry. | ||
3629 | * @cachep: the cache we're checking against | 3628 | * @cachep: the cache we're checking against |
3630 | * @ptr: pointer to validate | 3629 | * @ptr: pointer to validate |
3631 | * | 3630 | * |
3632 | * This verifies that the untrusted pointer looks sane: | 3631 | * This verifies that the untrusted pointer looks sane; |
3633 | * it is _not_ a guarantee that the pointer is actually | 3632 | * it is _not_ a guarantee that the pointer is actually |
3634 | * part of the slab cache in question, but it at least | 3633 | * part of the slab cache in question, but it at least |
3635 | * validates that the pointer can be dereferenced and | 3634 | * validates that the pointer can be dereferenced and |
@@ -1470,6 +1470,9 @@ static void *__slab_alloc(struct kmem_cache *s, | |||
1470 | void **object; | 1470 | void **object; |
1471 | struct page *new; | 1471 | struct page *new; |
1472 | 1472 | ||
1473 | /* We handle __GFP_ZERO in the caller */ | ||
1474 | gfpflags &= ~__GFP_ZERO; | ||
1475 | |||
1473 | if (!c->page) | 1476 | if (!c->page) |
1474 | goto new_slab; | 1477 | goto new_slab; |
1475 | 1478 | ||
@@ -1536,9 +1539,15 @@ new_slab: | |||
1536 | * That is only possible if certain conditions are met that are being | 1539 | * That is only possible if certain conditions are met that are being |
1537 | * checked when a slab is created. | 1540 | * checked when a slab is created. |
1538 | */ | 1541 | */ |
1539 | if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) | 1542 | if (!(gfpflags & __GFP_NORETRY) && |
1540 | return kmalloc_large(s->objsize, gfpflags); | 1543 | (s->flags & __PAGE_ALLOC_FALLBACK)) { |
1541 | 1544 | if (gfpflags & __GFP_WAIT) | |
1545 | local_irq_enable(); | ||
1546 | object = kmalloc_large(s->objsize, gfpflags); | ||
1547 | if (gfpflags & __GFP_WAIT) | ||
1548 | local_irq_disable(); | ||
1549 | return object; | ||
1550 | } | ||
1542 | return NULL; | 1551 | return NULL; |
1543 | debug: | 1552 | debug: |
1544 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1553 | if (!alloc_debug_processing(s, c->page, object, addr)) |
@@ -2679,6 +2688,7 @@ void kfree(const void *x) | |||
2679 | } | 2688 | } |
2680 | EXPORT_SYMBOL(kfree); | 2689 | EXPORT_SYMBOL(kfree); |
2681 | 2690 | ||
2691 | #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO) | ||
2682 | static unsigned long count_partial(struct kmem_cache_node *n) | 2692 | static unsigned long count_partial(struct kmem_cache_node *n) |
2683 | { | 2693 | { |
2684 | unsigned long flags; | 2694 | unsigned long flags; |
@@ -2691,6 +2701,7 @@ static unsigned long count_partial(struct kmem_cache_node *n) | |||
2691 | spin_unlock_irqrestore(&n->list_lock, flags); | 2701 | spin_unlock_irqrestore(&n->list_lock, flags); |
2692 | return x; | 2702 | return x; |
2693 | } | 2703 | } |
2704 | #endif | ||
2694 | 2705 | ||
2695 | /* | 2706 | /* |
2696 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts | 2707 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index cd75b21dd4c3..99c4f36eb8a3 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -76,7 +76,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | |||
76 | pte_t entry; | 76 | pte_t entry; |
77 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 77 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
78 | if (!p) | 78 | if (!p) |
79 | return 0; | 79 | return NULL; |
80 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | 80 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
81 | set_pte_at(&init_mm, addr, pte, entry); | 81 | set_pte_at(&init_mm, addr, pte, entry); |
82 | } | 82 | } |
@@ -89,7 +89,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) | |||
89 | if (pmd_none(*pmd)) { | 89 | if (pmd_none(*pmd)) { |
90 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 90 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
91 | if (!p) | 91 | if (!p) |
92 | return 0; | 92 | return NULL; |
93 | pmd_populate_kernel(&init_mm, pmd, p); | 93 | pmd_populate_kernel(&init_mm, pmd, p); |
94 | } | 94 | } |
95 | return pmd; | 95 | return pmd; |
@@ -101,7 +101,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) | |||
101 | if (pud_none(*pud)) { | 101 | if (pud_none(*pud)) { |
102 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 102 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
103 | if (!p) | 103 | if (!p) |
104 | return 0; | 104 | return NULL; |
105 | pud_populate(&init_mm, pud, p); | 105 | pud_populate(&init_mm, pud, p); |
106 | } | 106 | } |
107 | return pud; | 107 | return pud; |
@@ -113,7 +113,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) | |||
113 | if (pgd_none(*pgd)) { | 113 | if (pgd_none(*pgd)) { |
114 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 114 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
115 | if (!p) | 115 | if (!p) |
116 | return 0; | 116 | return NULL; |
117 | pgd_populate(&init_mm, pgd, p); | 117 | pgd_populate(&init_mm, pgd, p); |
118 | } | 118 | } |
119 | return pgd; | 119 | return pgd; |
@@ -78,12 +78,11 @@ void put_page(struct page *page) | |||
78 | EXPORT_SYMBOL(put_page); | 78 | EXPORT_SYMBOL(put_page); |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * put_pages_list(): release a list of pages | 81 | * put_pages_list() - release a list of pages |
82 | * @pages: list of pages threaded on page->lru | ||
82 | * | 83 | * |
83 | * Release a list of pages which are strung together on page.lru. Currently | 84 | * Release a list of pages which are strung together on page.lru. Currently |
84 | * used by read_cache_pages() and related error recovery code. | 85 | * used by read_cache_pages() and related error recovery code. |
85 | * | ||
86 | * @pages: list of pages threaded on page->lru | ||
87 | */ | 86 | */ |
88 | void put_pages_list(struct list_head *pages) | 87 | void put_pages_list(struct list_head *pages) |
89 | { | 88 | { |
diff --git a/mm/swap_state.c b/mm/swap_state.c index ec42f01a8d02..50757ee3f9f3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page) | |||
115 | /** | 115 | /** |
116 | * add_to_swap - allocate swap space for a page | 116 | * add_to_swap - allocate swap space for a page |
117 | * @page: page we want to move to swap | 117 | * @page: page we want to move to swap |
118 | * @gfp_mask: memory allocation flags | ||
118 | * | 119 | * |
119 | * Allocate swap space for the page and add the page to the | 120 | * Allocate swap space for the page and add the page to the |
120 | * swap cache. Caller needs to hold the page lock. | 121 | * swap cache. Caller needs to hold the page lock. |
@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
315 | /** | 316 | /** |
316 | * swapin_readahead - swap in pages in hope we need them soon | 317 | * swapin_readahead - swap in pages in hope we need them soon |
317 | * @entry: swap entry of this memory | 318 | * @entry: swap entry of this memory |
319 | * @gfp_mask: memory allocation flags | ||
318 | * @vma: user vma this address belongs to | 320 | * @vma: user vma this address belongs to |
319 | * @addr: target address for mempolicy | 321 | * @addr: target address for mempolicy |
320 | * | 322 | * |
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index 702083638c16..ae532f501943 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c | |||
@@ -39,12 +39,11 @@ static int __init init_tmpfs(void) | |||
39 | } | 39 | } |
40 | module_init(init_tmpfs) | 40 | module_init(init_tmpfs) |
41 | 41 | ||
42 | /* | 42 | /** |
43 | * shmem_file_setup - get an unlinked file living in tmpfs | 43 | * shmem_file_setup - get an unlinked file living in tmpfs |
44 | * | ||
45 | * @name: name for dentry (to be seen in /proc/<pid>/maps | 44 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
46 | * @size: size to be set for the file | 45 | * @size: size to be set for the file |
47 | * | 46 | * @flags: vm_flags |
48 | */ | 47 | */ |
49 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | 48 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) |
50 | { | 49 | { |
@@ -89,15 +88,16 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |||
89 | 88 | ||
90 | close_file: | 89 | close_file: |
91 | put_filp(file); | 90 | put_filp(file); |
91 | return ERR_PTR(error); | ||
92 | |||
92 | put_dentry: | 93 | put_dentry: |
93 | dput(dentry); | 94 | dput(dentry); |
94 | put_memory: | 95 | put_memory: |
95 | return ERR_PTR(error); | 96 | return ERR_PTR(error); |
96 | } | 97 | } |
97 | 98 | ||
98 | /* | 99 | /** |
99 | * shmem_zero_setup - setup a shared anonymous mapping | 100 | * shmem_zero_setup - setup a shared anonymous mapping |
100 | * | ||
101 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | 101 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
102 | */ | 102 | */ |
103 | int shmem_zero_setup(struct vm_area_struct *vma) | 103 | int shmem_zero_setup(struct vm_area_struct *vma) |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 950c0be9ca81..ecf91f8034bf 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -757,7 +757,8 @@ finished: | |||
757 | * @vma: vma to cover (map full range of vma) | 757 | * @vma: vma to cover (map full range of vma) |
758 | * @addr: vmalloc memory | 758 | * @addr: vmalloc memory |
759 | * @pgoff: number of pages into addr before first page to map | 759 | * @pgoff: number of pages into addr before first page to map |
760 | * @returns: 0 for success, -Exxx on failure | 760 | * |
761 | * Returns: 0 for success, -Exxx on failure | ||
761 | * | 762 | * |
762 | * This function checks that addr is a valid vmalloc'ed area, and | 763 | * This function checks that addr is a valid vmalloc'ed area, and |
763 | * that it is big enough to cover the vma. Will return failure if | 764 | * that it is big enough to cover the vma. Will return failure if |
@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) | |||
829 | /** | 830 | /** |
830 | * alloc_vm_area - allocate a range of kernel address space | 831 | * alloc_vm_area - allocate a range of kernel address space |
831 | * @size: size of the area | 832 | * @size: size of the area |
832 | * @returns: NULL on failure, vm_struct on success | 833 | * |
834 | * Returns: NULL on failure, vm_struct on success | ||
833 | * | 835 | * |
834 | * This function reserves a range of kernel address space, and | 836 | * This function reserves a range of kernel address space, and |
835 | * allocates pagetables to map that range. No actual mappings | 837 | * allocates pagetables to map that range. No actual mappings |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 45711585684e..4046434046e6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -70,13 +70,6 @@ struct scan_control { | |||
70 | 70 | ||
71 | int order; | 71 | int order; |
72 | 72 | ||
73 | /* | ||
74 | * Pages that have (or should have) IO pending. If we run into | ||
75 | * a lot of these, we're better off waiting a little for IO to | ||
76 | * finish rather than scanning more pages in the VM. | ||
77 | */ | ||
78 | int nr_io_pages; | ||
79 | |||
80 | /* Which cgroup do we reclaim from */ | 73 | /* Which cgroup do we reclaim from */ |
81 | struct mem_cgroup *mem_cgroup; | 74 | struct mem_cgroup *mem_cgroup; |
82 | 75 | ||
@@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
512 | */ | 505 | */ |
513 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) | 506 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) |
514 | wait_on_page_writeback(page); | 507 | wait_on_page_writeback(page); |
515 | else { | 508 | else |
516 | sc->nr_io_pages++; | ||
517 | goto keep_locked; | 509 | goto keep_locked; |
518 | } | ||
519 | } | 510 | } |
520 | 511 | ||
521 | referenced = page_referenced(page, 1, sc->mem_cgroup); | 512 | referenced = page_referenced(page, 1, sc->mem_cgroup); |
@@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
554 | if (PageDirty(page)) { | 545 | if (PageDirty(page)) { |
555 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) | 546 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) |
556 | goto keep_locked; | 547 | goto keep_locked; |
557 | if (!may_enter_fs) { | 548 | if (!may_enter_fs) |
558 | sc->nr_io_pages++; | ||
559 | goto keep_locked; | 549 | goto keep_locked; |
560 | } | ||
561 | if (!sc->may_writepage) | 550 | if (!sc->may_writepage) |
562 | goto keep_locked; | 551 | goto keep_locked; |
563 | 552 | ||
@@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
568 | case PAGE_ACTIVATE: | 557 | case PAGE_ACTIVATE: |
569 | goto activate_locked; | 558 | goto activate_locked; |
570 | case PAGE_SUCCESS: | 559 | case PAGE_SUCCESS: |
571 | if (PageWriteback(page) || PageDirty(page)) { | 560 | if (PageWriteback(page) || PageDirty(page)) |
572 | sc->nr_io_pages++; | ||
573 | goto keep; | 561 | goto keep; |
574 | } | ||
575 | /* | 562 | /* |
576 | * A synchronous write - probably a ramdisk. Go | 563 | * A synchronous write - probably a ramdisk. Go |
577 | * ahead and try to reclaim the page. | 564 | * ahead and try to reclaim the page. |
@@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
1344 | 1331 | ||
1345 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 1332 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
1346 | sc->nr_scanned = 0; | 1333 | sc->nr_scanned = 0; |
1347 | sc->nr_io_pages = 0; | ||
1348 | if (!priority) | 1334 | if (!priority) |
1349 | disable_swap_token(); | 1335 | disable_swap_token(); |
1350 | nr_reclaimed += shrink_zones(priority, zones, sc); | 1336 | nr_reclaimed += shrink_zones(priority, zones, sc); |
@@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
1379 | } | 1365 | } |
1380 | 1366 | ||
1381 | /* Take a nap, wait for some writeback to complete */ | 1367 | /* Take a nap, wait for some writeback to complete */ |
1382 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2 && | 1368 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2) |
1383 | sc->nr_io_pages > sc->swap_cluster_max) | ||
1384 | congestion_wait(WRITE, HZ/10); | 1369 | congestion_wait(WRITE, HZ/10); |
1385 | } | 1370 | } |
1386 | /* top priority shrink_caches still had more to do? don't OOM, then */ | 1371 | /* top priority shrink_caches still had more to do? don't OOM, then */ |
@@ -1514,7 +1499,6 @@ loop_again: | |||
1514 | if (!priority) | 1499 | if (!priority) |
1515 | disable_swap_token(); | 1500 | disable_swap_token(); |
1516 | 1501 | ||
1517 | sc.nr_io_pages = 0; | ||
1518 | all_zones_ok = 1; | 1502 | all_zones_ok = 1; |
1519 | 1503 | ||
1520 | /* | 1504 | /* |
@@ -1607,8 +1591,7 @@ loop_again: | |||
1607 | * OK, kswapd is getting into trouble. Take a nap, then take | 1591 | * OK, kswapd is getting into trouble. Take a nap, then take |
1608 | * another pass across the zones. | 1592 | * another pass across the zones. |
1609 | */ | 1593 | */ |
1610 | if (total_scanned && priority < DEF_PRIORITY - 2 && | 1594 | if (total_scanned && priority < DEF_PRIORITY - 2) |
1611 | sc.nr_io_pages > sc.swap_cluster_max) | ||
1612 | congestion_wait(WRITE, HZ/10); | 1595 | congestion_wait(WRITE, HZ/10); |
1613 | 1596 | ||
1614 | /* | 1597 | /* |