diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:41 -0400 |
commit | 67b02f119df50ffad5a4e9e53ea4c896535862cd (patch) | |
tree | 9d651e9e62b6ead325fc6bd872f3a6232d85e8a3 /mm | |
parent | 508034a32b819a2d40aa7ac0dbc8cd2e044c2de6 (diff) |
[PATCH] mm: xip_unmap ZERO_PAGE fix
Small fix to the PageReserved patch: the mips ZERO_PAGE(address) depends on
address, so __xip_unmap is wrong to initialize page with that before address
is initialized; and in fact must re-evaluate it each iteration.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap_xip.c | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 9354ee279b13..4e74ad60339a 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -174,7 +174,7 @@ __xip_unmap (struct address_space * mapping, | |||
174 | unsigned long address; | 174 | unsigned long address; |
175 | pte_t *pte; | 175 | pte_t *pte; |
176 | pte_t pteval; | 176 | pte_t pteval; |
177 | struct page *page = ZERO_PAGE(address); | 177 | struct page *page; |
178 | 178 | ||
179 | spin_lock(&mapping->i_mmap_lock); | 179 | spin_lock(&mapping->i_mmap_lock); |
180 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 180 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
@@ -182,6 +182,7 @@ __xip_unmap (struct address_space * mapping, | |||
182 | address = vma->vm_start + | 182 | address = vma->vm_start + |
183 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 183 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
184 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 184 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
185 | page = ZERO_PAGE(address); | ||
185 | /* | 186 | /* |
186 | * We need the page_table_lock to protect us from page faults, | 187 | * We need the page_table_lock to protect us from page faults, |
187 | * munmap, fork, etc... | 188 | * munmap, fork, etc... |