diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-09-04 07:02:35 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-09-04 07:02:35 -0400 |
| commit | 42390cdec5f6e6e2ee54f308474a6ef7dd16aa5c (patch) | |
| tree | e9684c84f53272319a5acd4b9c86503f30274a51 /mm/filemap_xip.c | |
| parent | 11c231a962c740b3216eb6565149ae5a7944cba7 (diff) | |
| parent | d210baf53b699fc61aa891c177b71d7082d3b957 (diff) | |
Merge branch 'linus' into x86/x2apic
Conflicts:
arch/x86/kernel/cpu/cyrix.c
include/asm-x86/cpufeature.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/filemap_xip.c')
| -rw-r--r-- | mm/filemap_xip.c | 65 |
1 files changed, 50 insertions, 15 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 380ab402d711..b5167dfb2f2d 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | #include <linux/rmap.h> | 15 | #include <linux/rmap.h> |
| 16 | #include <linux/mmu_notifier.h> | 16 | #include <linux/mmu_notifier.h> |
| 17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
| 18 | #include <linux/seqlock.h> | ||
| 19 | #include <linux/mutex.h> | ||
| 18 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
| 19 | #include <asm/io.h> | 21 | #include <asm/io.h> |
| 20 | 22 | ||
| @@ -22,22 +24,18 @@ | |||
| 22 | * We do use our own empty page to avoid interference with other users | 24 | * We do use our own empty page to avoid interference with other users |
| 23 | * of ZERO_PAGE(), such as /dev/zero | 25 | * of ZERO_PAGE(), such as /dev/zero |
| 24 | */ | 26 | */ |
| 27 | static DEFINE_MUTEX(xip_sparse_mutex); | ||
| 28 | static seqcount_t xip_sparse_seq = SEQCNT_ZERO; | ||
| 25 | static struct page *__xip_sparse_page; | 29 | static struct page *__xip_sparse_page; |
| 26 | 30 | ||
| 31 | /* called under xip_sparse_mutex */ | ||
| 27 | static struct page *xip_sparse_page(void) | 32 | static struct page *xip_sparse_page(void) |
| 28 | { | 33 | { |
| 29 | if (!__xip_sparse_page) { | 34 | if (!__xip_sparse_page) { |
| 30 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); | 35 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); |
| 31 | 36 | ||
| 32 | if (page) { | 37 | if (page) |
| 33 | static DEFINE_SPINLOCK(xip_alloc_lock); | 38 | __xip_sparse_page = page; |
| 34 | spin_lock(&xip_alloc_lock); | ||
| 35 | if (!__xip_sparse_page) | ||
| 36 | __xip_sparse_page = page; | ||
| 37 | else | ||
| 38 | __free_page(page); | ||
| 39 | spin_unlock(&xip_alloc_lock); | ||
| 40 | } | ||
| 41 | } | 39 | } |
| 42 | return __xip_sparse_page; | 40 | return __xip_sparse_page; |
| 43 | } | 41 | } |
| @@ -174,18 +172,23 @@ __xip_unmap (struct address_space * mapping, | |||
| 174 | pte_t pteval; | 172 | pte_t pteval; |
| 175 | spinlock_t *ptl; | 173 | spinlock_t *ptl; |
| 176 | struct page *page; | 174 | struct page *page; |
| 175 | unsigned count; | ||
| 176 | int locked = 0; | ||
| 177 | |||
| 178 | count = read_seqcount_begin(&xip_sparse_seq); | ||
| 177 | 179 | ||
| 178 | page = __xip_sparse_page; | 180 | page = __xip_sparse_page; |
| 179 | if (!page) | 181 | if (!page) |
| 180 | return; | 182 | return; |
| 181 | 183 | ||
| 184 | retry: | ||
| 182 | spin_lock(&mapping->i_mmap_lock); | 185 | spin_lock(&mapping->i_mmap_lock); |
| 183 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 186 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
| 184 | mm = vma->vm_mm; | 187 | mm = vma->vm_mm; |
| 185 | address = vma->vm_start + | 188 | address = vma->vm_start + |
| 186 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 189 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 187 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 190 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
| 188 | pte = page_check_address(page, mm, address, &ptl); | 191 | pte = page_check_address(page, mm, address, &ptl, 1); |
| 189 | if (pte) { | 192 | if (pte) { |
| 190 | /* Nuke the page table entry. */ | 193 | /* Nuke the page table entry. */ |
| 191 | flush_cache_page(vma, address, pte_pfn(*pte)); | 194 | flush_cache_page(vma, address, pte_pfn(*pte)); |
| @@ -198,6 +201,14 @@ __xip_unmap (struct address_space * mapping, | |||
| 198 | } | 201 | } |
| 199 | } | 202 | } |
| 200 | spin_unlock(&mapping->i_mmap_lock); | 203 | spin_unlock(&mapping->i_mmap_lock); |
| 204 | |||
| 205 | if (locked) { | ||
| 206 | mutex_unlock(&xip_sparse_mutex); | ||
| 207 | } else if (read_seqcount_retry(&xip_sparse_seq, count)) { | ||
| 208 | mutex_lock(&xip_sparse_mutex); | ||
| 209 | locked = 1; | ||
| 210 | goto retry; | ||
| 211 | } | ||
| 201 | } | 212 | } |
| 202 | 213 | ||
| 203 | /* | 214 | /* |
| @@ -218,7 +229,7 @@ static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 218 | int error; | 229 | int error; |
| 219 | 230 | ||
| 220 | /* XXX: are VM_FAULT_ codes OK? */ | 231 | /* XXX: are VM_FAULT_ codes OK? */ |
| 221 | 232 | again: | |
| 222 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 233 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| 223 | if (vmf->pgoff >= size) | 234 | if (vmf->pgoff >= size) |
| 224 | return VM_FAULT_SIGBUS; | 235 | return VM_FAULT_SIGBUS; |
| @@ -237,8 +248,10 @@ static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 237 | int err; | 248 | int err; |
| 238 | 249 | ||
| 239 | /* maybe shared writable, allocate new block */ | 250 | /* maybe shared writable, allocate new block */ |
| 251 | mutex_lock(&xip_sparse_mutex); | ||
| 240 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, | 252 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, |
| 241 | &xip_mem, &xip_pfn); | 253 | &xip_mem, &xip_pfn); |
| 254 | mutex_unlock(&xip_sparse_mutex); | ||
| 242 | if (error) | 255 | if (error) |
| 243 | return VM_FAULT_SIGBUS; | 256 | return VM_FAULT_SIGBUS; |
| 244 | /* unmap sparse mappings at pgoff from all other vmas */ | 257 | /* unmap sparse mappings at pgoff from all other vmas */ |
| @@ -252,14 +265,34 @@ found: | |||
| 252 | BUG_ON(err); | 265 | BUG_ON(err); |
| 253 | return VM_FAULT_NOPAGE; | 266 | return VM_FAULT_NOPAGE; |
| 254 | } else { | 267 | } else { |
| 268 | int err, ret = VM_FAULT_OOM; | ||
| 269 | |||
| 270 | mutex_lock(&xip_sparse_mutex); | ||
| 271 | write_seqcount_begin(&xip_sparse_seq); | ||
| 272 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, | ||
| 273 | &xip_mem, &xip_pfn); | ||
| 274 | if (unlikely(!error)) { | ||
| 275 | write_seqcount_end(&xip_sparse_seq); | ||
| 276 | mutex_unlock(&xip_sparse_mutex); | ||
| 277 | goto again; | ||
| 278 | } | ||
| 279 | if (error != -ENODATA) | ||
| 280 | goto out; | ||
| 255 | /* not shared and writable, use xip_sparse_page() */ | 281 | /* not shared and writable, use xip_sparse_page() */ |
| 256 | page = xip_sparse_page(); | 282 | page = xip_sparse_page(); |
| 257 | if (!page) | 283 | if (!page) |
| 258 | return VM_FAULT_OOM; | 284 | goto out; |
| 285 | err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, | ||
| 286 | page); | ||
| 287 | if (err == -ENOMEM) | ||
| 288 | goto out; | ||
| 259 | 289 | ||
| 260 | page_cache_get(page); | 290 | ret = VM_FAULT_NOPAGE; |
| 261 | vmf->page = page; | 291 | out: |
| 262 | return 0; | 292 | write_seqcount_end(&xip_sparse_seq); |
| 293 | mutex_unlock(&xip_sparse_mutex); | ||
| 294 | |||
| 295 | return ret; | ||
| 263 | } | 296 | } |
| 264 | } | 297 | } |
| 265 | 298 | ||
| @@ -308,8 +341,10 @@ __xip_file_write(struct file *filp, const char __user *buf, | |||
| 308 | &xip_mem, &xip_pfn); | 341 | &xip_mem, &xip_pfn); |
| 309 | if (status == -ENODATA) { | 342 | if (status == -ENODATA) { |
| 310 | /* we allocate a new page unmap it */ | 343 | /* we allocate a new page unmap it */ |
| 344 | mutex_lock(&xip_sparse_mutex); | ||
| 311 | status = a_ops->get_xip_mem(mapping, index, 1, | 345 | status = a_ops->get_xip_mem(mapping, index, 1, |
| 312 | &xip_mem, &xip_pfn); | 346 | &xip_mem, &xip_pfn); |
| 347 | mutex_unlock(&xip_sparse_mutex); | ||
| 313 | if (!status) | 348 | if (!status) |
| 314 | /* unmap page at pgoff from all other vmas */ | 349 | /* unmap page at pgoff from all other vmas */ |
| 315 | __xip_unmap(mapping, index); | 350 | __xip_unmap(mapping, index); |
