aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2013-07-08 19:00:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-09 13:33:23 -0400
commit64363aad5ff1b878230e91223038c26a2205bff3 (patch)
tree8f8bb770645e634d55fc390ec4428b1818eccd10 /mm
parent73b44ff43c4b3cf517826da03c51948593f88753 (diff)
mm: remove unused VM_<READfoo> macros and expand other in-place
These VM_<READfoo> macros aren't used very often and three of them aren't used at all. Expand the ones that are used in-place, and remove all the now unused #define VM_<foo> macros. VM_READHINTMASK, VM_NormalReadHint and VM_ClearReadHint were added just before 2.4 and appears have never been used. Signed-off-by: Joe Perches <joe@perches.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/rmap.c2
3 files changed, 5 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7905fe721aa8..4b51ac1acae7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1539,12 +1539,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1539 struct address_space *mapping = file->f_mapping; 1539 struct address_space *mapping = file->f_mapping;
1540 1540
1541 /* If we don't want any read-ahead, don't bother */ 1541 /* If we don't want any read-ahead, don't bother */
1542 if (VM_RandomReadHint(vma)) 1542 if (vma->vm_flags & VM_RAND_READ)
1543 return; 1543 return;
1544 if (!ra->ra_pages) 1544 if (!ra->ra_pages)
1545 return; 1545 return;
1546 1546
1547 if (VM_SequentialReadHint(vma)) { 1547 if (vma->vm_flags & VM_SEQ_READ) {
1548 page_cache_sync_readahead(mapping, ra, file, offset, 1548 page_cache_sync_readahead(mapping, ra, file, offset,
1549 ra->ra_pages); 1549 ra->ra_pages);
1550 return; 1550 return;
@@ -1584,7 +1584,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
1584 struct address_space *mapping = file->f_mapping; 1584 struct address_space *mapping = file->f_mapping;
1585 1585
1586 /* If we don't want any read-ahead, don't bother */ 1586 /* If we don't want any read-ahead, don't bother */
1587 if (VM_RandomReadHint(vma)) 1587 if (vma->vm_flags & VM_RAND_READ)
1588 return; 1588 return;
1589 if (ra->mmap_miss > 0) 1589 if (ra->mmap_miss > 0)
1590 ra->mmap_miss--; 1590 ra->mmap_miss--;
diff --git a/mm/memory.c b/mm/memory.c
index b68812d682b6..1ce2e2a734fc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1150,7 +1150,7 @@ again:
1150 if (pte_dirty(ptent)) 1150 if (pte_dirty(ptent))
1151 set_page_dirty(page); 1151 set_page_dirty(page);
1152 if (pte_young(ptent) && 1152 if (pte_young(ptent) &&
1153 likely(!VM_SequentialReadHint(vma))) 1153 likely(!(vma->vm_flags & VM_SEQ_READ)))
1154 mark_page_accessed(page); 1154 mark_page_accessed(page);
1155 rss[MM_FILEPAGES]--; 1155 rss[MM_FILEPAGES]--;
1156 } 1156 }
diff --git a/mm/rmap.c b/mm/rmap.c
index e22ceeb6e5ec..cd356df4f71a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -720,7 +720,7 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
720 * mapping is already gone, the unmap path will have 720 * mapping is already gone, the unmap path will have
721 * set PG_referenced or activated the page. 721 * set PG_referenced or activated the page.
722 */ 722 */
723 if (likely(!VM_SequentialReadHint(vma))) 723 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
724 referenced++; 724 referenced++;
725 } 725 }
726 pte_unmap_unlock(pte, ptl); 726 pte_unmap_unlock(pte, ptl);