aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/swap.c3
4 files changed, 7 insertions, 8 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 73e0f23b7f51..6b9740bbf4c0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1821,7 +1821,7 @@ static inline void check_huge_range(struct vm_area_struct *vma,
1821 1821
1822int show_numa_map(struct seq_file *m, void *v) 1822int show_numa_map(struct seq_file *m, void *v)
1823{ 1823{
1824 struct task_struct *task = m->private; 1824 struct proc_maps_private *priv = m->private;
1825 struct vm_area_struct *vma = v; 1825 struct vm_area_struct *vma = v;
1826 struct numa_maps *md; 1826 struct numa_maps *md;
1827 struct file *file = vma->vm_file; 1827 struct file *file = vma->vm_file;
@@ -1837,7 +1837,7 @@ int show_numa_map(struct seq_file *m, void *v)
1837 return 0; 1837 return 0;
1838 1838
1839 mpol_to_str(buffer, sizeof(buffer), 1839 mpol_to_str(buffer, sizeof(buffer),
1840 get_vma_policy(task, vma, vma->vm_start)); 1840 get_vma_policy(priv->task, vma, vma->vm_start));
1841 1841
1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1843 1843
@@ -1891,7 +1891,7 @@ out:
1891 kfree(md); 1891 kfree(md);
1892 1892
1893 if (m->count < m->size) 1893 if (m->count < m->size)
1894 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 1894 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1895 return 0; 1895 return 0;
1896} 1896}
1897 1897
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6c1174fcf52c..9f86191bb632 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,7 @@ static inline void rmv_page_order(struct page *page)
266 * satisfies the following equation: 266 * satisfies the following equation:
267 * P = B & ~(1 << O) 267 * P = B & ~(1 << O)
268 * 268 *
269 * Assumption: *_mem_map is contigious at least up to MAX_ORDER 269 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
270 */ 270 */
271static inline struct page * 271static inline struct page *
272__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 272__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
diff --git a/mm/readahead.c b/mm/readahead.c
index e39e416860d7..aa7ec424656a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -390,8 +390,8 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
390 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' 390 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
391 * is set wait till the read completes. Otherwise attempt to read without 391 * is set wait till the read completes. Otherwise attempt to read without
392 * blocking. 392 * blocking.
393 * Returns 1 meaning 'success' if read is succesfull without switching off 393 * Returns 1 meaning 'success' if read is successful without switching off
394 * readhaead mode. Otherwise return failure. 394 * readahead mode. Otherwise return failure.
395 */ 395 */
396static int 396static int
397blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, 397blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/mm/swap.c b/mm/swap.c
index 03ae2076f92f..990868afc1c6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -86,8 +86,7 @@ int rotate_reclaimable_page(struct page *page)
86 zone = page_zone(page); 86 zone = page_zone(page);
87 spin_lock_irqsave(&zone->lru_lock, flags); 87 spin_lock_irqsave(&zone->lru_lock, flags);
88 if (PageLRU(page) && !PageActive(page)) { 88 if (PageLRU(page) && !PageActive(page)) {
89 list_del(&page->lru); 89 list_move_tail(&page->lru, &zone->inactive_list);
90 list_add_tail(&page->lru, &zone->inactive_list);
91 inc_page_state(pgrotated); 90 inc_page_state(pgrotated);
92 } 91 }
93 if (!test_clear_page_writeback(page)) 92 if (!test_clear_page_writeback(page))