aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-10-16 14:42:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-10-16 14:42:37 -0400
commit3d875182d7f4b27b7778c3ab6a39800d383968cb (patch)
tree588a0f89219f4252d93a102b49f661b1bf435a84
parent69984b64440729bf6b08d1ddc1b3ee8282a2c846 (diff)
parent934ed25ea505859cec5236dcb1769be5f998dd25 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "6 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: sh: add copy_user_page() alias for __copy_user() lib/Kconfig: ZLIB_DEFLATE must select BITREVERSE mm, dax: fix DAX deadlocks memcg: convert threshold to bytes builddeb: remove debian/files before build mm, fs: obey gfp_mapping for add_to_page_cache()
-rw-r--r--arch/sh/include/asm/page.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/dax.c70
-rw-r--r--fs/ext4/readpage.c4
-rw-r--r--fs/mpage.c15
-rw-r--r--fs/ramfs/file-nommu.c5
-rw-r--r--lib/Kconfig1
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/memory.c2
-rw-r--r--mm/readahead.c8
-rwxr-xr-xscripts/package/builddeb4
12 files changed, 58 insertions, 61 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index fe20d14ae051..ceb5201a30ed 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
59 59
60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
61extern void copy_page(void *to, void *from); 61extern void copy_page(void *to, void *from);
62#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
62 63
63struct page; 64struct page;
64struct vm_area_struct; 65struct vm_area_struct;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 769b61193d87..a9bc6e23fc25 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
224 224
225 prefetchw(&page->flags); 225 prefetchw(&page->flags);
226 ret = add_to_page_cache_lru(page, inode->i_mapping, offset, 226 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
227 GFP_KERNEL); 227 GFP_NOFS);
228 if (ret == 0) { 228 if (ret == 0) {
229 unlock_page(page); 229 unlock_page(page);
230 } else { 230 } else {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2a6af1508af..62203c387db4 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3380 struct page *page, *tpage; 3380 struct page *page, *tpage;
3381 unsigned int expected_index; 3381 unsigned int expected_index;
3382 int rc; 3382 int rc;
3383 gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
3383 3384
3384 INIT_LIST_HEAD(tmplist); 3385 INIT_LIST_HEAD(tmplist);
3385 3386
@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3392 */ 3393 */
3393 __set_page_locked(page); 3394 __set_page_locked(page);
3394 rc = add_to_page_cache_locked(page, mapping, 3395 rc = add_to_page_cache_locked(page, mapping,
3395 page->index, GFP_KERNEL); 3396 page->index, gfp);
3396 3397
3397 /* give up if we can't stick it in the cache */ 3398 /* give up if we can't stick it in the cache */
3398 if (rc) { 3399 if (rc) {
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3418 break; 3419 break;
3419 3420
3420 __set_page_locked(page); 3421 __set_page_locked(page);
3421 if (add_to_page_cache_locked(page, mapping, page->index, 3422 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
3422 GFP_KERNEL)) {
3423 __clear_page_locked(page); 3423 __clear_page_locked(page);
3424 break; 3424 break;
3425 } 3425 }
diff --git a/fs/dax.c b/fs/dax.c
index bcfb14bfc1e4..a86d3cc2b389 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
285static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, 285static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
286 struct vm_area_struct *vma, struct vm_fault *vmf) 286 struct vm_area_struct *vma, struct vm_fault *vmf)
287{ 287{
288 struct address_space *mapping = inode->i_mapping;
288 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); 289 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
289 unsigned long vaddr = (unsigned long)vmf->virtual_address; 290 unsigned long vaddr = (unsigned long)vmf->virtual_address;
290 void __pmem *addr; 291 void __pmem *addr;
@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
292 pgoff_t size; 293 pgoff_t size;
293 int error; 294 int error;
294 295
296 i_mmap_lock_read(mapping);
297
295 /* 298 /*
296 * Check truncate didn't happen while we were allocating a block. 299 * Check truncate didn't happen while we were allocating a block.
297 * If it did, this block may or may not be still allocated to the 300 * If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
321 error = vm_insert_mixed(vma, vaddr, pfn); 324 error = vm_insert_mixed(vma, vaddr, pfn);
322 325
323 out: 326 out:
327 i_mmap_unlock_read(mapping);
328
324 return error; 329 return error;
325} 330}
326 331
@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
382 * from a read fault and we've raced with a truncate 387 * from a read fault and we've raced with a truncate
383 */ 388 */
384 error = -EIO; 389 error = -EIO;
385 goto unlock; 390 goto unlock_page;
386 } 391 }
387 } else {
388 i_mmap_lock_write(mapping);
389 } 392 }
390 393
391 error = get_block(inode, block, &bh, 0); 394 error = get_block(inode, block, &bh, 0);
392 if (!error && (bh.b_size < PAGE_SIZE)) 395 if (!error && (bh.b_size < PAGE_SIZE))
393 error = -EIO; /* fs corruption? */ 396 error = -EIO; /* fs corruption? */
394 if (error) 397 if (error)
395 goto unlock; 398 goto unlock_page;
396 399
397 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { 400 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
398 if (vmf->flags & FAULT_FLAG_WRITE) { 401 if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
403 if (!error && (bh.b_size < PAGE_SIZE)) 406 if (!error && (bh.b_size < PAGE_SIZE))
404 error = -EIO; 407 error = -EIO;
405 if (error) 408 if (error)
406 goto unlock; 409 goto unlock_page;
407 } else { 410 } else {
408 i_mmap_unlock_write(mapping);
409 return dax_load_hole(mapping, page, vmf); 411 return dax_load_hole(mapping, page, vmf);
410 } 412 }
411 } 413 }
@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
417 else 419 else
418 clear_user_highpage(new_page, vaddr); 420 clear_user_highpage(new_page, vaddr);
419 if (error) 421 if (error)
420 goto unlock; 422 goto unlock_page;
421 vmf->page = page; 423 vmf->page = page;
422 if (!page) { 424 if (!page) {
425 i_mmap_lock_read(mapping);
423 /* Check we didn't race with truncate */ 426 /* Check we didn't race with truncate */
424 size = (i_size_read(inode) + PAGE_SIZE - 1) >> 427 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
425 PAGE_SHIFT; 428 PAGE_SHIFT;
426 if (vmf->pgoff >= size) { 429 if (vmf->pgoff >= size) {
430 i_mmap_unlock_read(mapping);
427 error = -EIO; 431 error = -EIO;
428 goto unlock; 432 goto out;
429 } 433 }
430 } 434 }
431 return VM_FAULT_LOCKED; 435 return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
461 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); 465 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
462 } 466 }
463 467
464 if (!page)
465 i_mmap_unlock_write(mapping);
466 out: 468 out:
467 if (error == -ENOMEM) 469 if (error == -ENOMEM)
468 return VM_FAULT_OOM | major; 470 return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
471 return VM_FAULT_SIGBUS | major; 473 return VM_FAULT_SIGBUS | major;
472 return VM_FAULT_NOPAGE | major; 474 return VM_FAULT_NOPAGE | major;
473 475
474 unlock: 476 unlock_page:
475 if (page) { 477 if (page) {
476 unlock_page(page); 478 unlock_page(page);
477 page_cache_release(page); 479 page_cache_release(page);
478 } else {
479 i_mmap_unlock_write(mapping);
480 } 480 }
481
482 goto out; 481 goto out;
483} 482}
484EXPORT_SYMBOL(__dax_fault); 483EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
556 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); 555 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
557 556
558 bh.b_size = PMD_SIZE; 557 bh.b_size = PMD_SIZE;
559 i_mmap_lock_write(mapping);
560 length = get_block(inode, block, &bh, write); 558 length = get_block(inode, block, &bh, write);
561 if (length) 559 if (length)
562 return VM_FAULT_SIGBUS; 560 return VM_FAULT_SIGBUS;
561 i_mmap_lock_read(mapping);
563 562
564 /* 563 /*
565 * If the filesystem isn't willing to tell us the length of a hole, 564 * If the filesystem isn't willing to tell us the length of a hole,
@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) 568 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
570 goto fallback; 569 goto fallback;
571 570
572 sector = bh.b_blocknr << (blkbits - 9);
573
574 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
575 int i;
576
577 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
578 bh.b_size);
579 if (length < 0) {
580 result = VM_FAULT_SIGBUS;
581 goto out;
582 }
583 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
584 goto fallback;
585
586 for (i = 0; i < PTRS_PER_PMD; i++)
587 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
588 wmb_pmem();
589 count_vm_event(PGMAJFAULT);
590 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
591 result |= VM_FAULT_MAJOR;
592 }
593
594 /* 571 /*
595 * If we allocated new storage, make sure no process has any 572 * If we allocated new storage, make sure no process has any
596 * zero pages covering this hole 573 * zero pages covering this hole
597 */ 574 */
598 if (buffer_new(&bh)) { 575 if (buffer_new(&bh)) {
599 i_mmap_unlock_write(mapping); 576 i_mmap_unlock_read(mapping);
600 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); 577 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
601 i_mmap_lock_write(mapping); 578 i_mmap_lock_read(mapping);
602 } 579 }
603 580
604 /* 581 /*
@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
635 result = VM_FAULT_NOPAGE; 612 result = VM_FAULT_NOPAGE;
636 spin_unlock(ptl); 613 spin_unlock(ptl);
637 } else { 614 } else {
615 sector = bh.b_blocknr << (blkbits - 9);
638 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, 616 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
639 bh.b_size); 617 bh.b_size);
640 if (length < 0) { 618 if (length < 0) {
@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
644 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) 622 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
645 goto fallback; 623 goto fallback;
646 624
625 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
626 int i;
627 for (i = 0; i < PTRS_PER_PMD; i++)
628 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
629 wmb_pmem();
630 count_vm_event(PGMAJFAULT);
631 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
632 result |= VM_FAULT_MAJOR;
633 }
634
647 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); 635 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
648 } 636 }
649 637
650 out: 638 out:
639 i_mmap_unlock_read(mapping);
640
651 if (buffer_unwritten(&bh)) 641 if (buffer_unwritten(&bh))
652 complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); 642 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
653 643
654 i_mmap_unlock_write(mapping);
655
656 return result; 644 return result;
657 645
658 fallback: 646 fallback:
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index e26803fb210d..560af0437704 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
165 if (pages) { 165 if (pages) {
166 page = list_entry(pages->prev, struct page, lru); 166 page = list_entry(pages->prev, struct page, lru);
167 list_del(&page->lru); 167 list_del(&page->lru);
168 if (add_to_page_cache_lru(page, mapping, 168 if (add_to_page_cache_lru(page, mapping, page->index,
169 page->index, GFP_KERNEL)) 169 GFP_KERNEL & mapping_gfp_mask(mapping)))
170 goto next_page; 170 goto next_page;
171 } 171 }
172 172
diff --git a/fs/mpage.c b/fs/mpage.c
index 778a4ddef77a..a7c34274f207 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
139static struct bio * 139static struct bio *
140do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, 140do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
141 sector_t *last_block_in_bio, struct buffer_head *map_bh, 141 sector_t *last_block_in_bio, struct buffer_head *map_bh,
142 unsigned long *first_logical_block, get_block_t get_block) 142 unsigned long *first_logical_block, get_block_t get_block,
143 gfp_t gfp)
143{ 144{
144 struct inode *inode = page->mapping->host; 145 struct inode *inode = page->mapping->host;
145 const unsigned blkbits = inode->i_blkbits; 146 const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
277 goto out; 278 goto out;
278 } 279 }
279 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 280 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
280 min_t(int, nr_pages, BIO_MAX_PAGES), 281 min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
281 GFP_KERNEL);
282 if (bio == NULL) 282 if (bio == NULL)
283 goto confused; 283 goto confused;
284 } 284 }
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
361 sector_t last_block_in_bio = 0; 361 sector_t last_block_in_bio = 0;
362 struct buffer_head map_bh; 362 struct buffer_head map_bh;
363 unsigned long first_logical_block = 0; 363 unsigned long first_logical_block = 0;
364 gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
364 365
365 map_bh.b_state = 0; 366 map_bh.b_state = 0;
366 map_bh.b_size = 0; 367 map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
370 prefetchw(&page->flags); 371 prefetchw(&page->flags);
371 list_del(&page->lru); 372 list_del(&page->lru);
372 if (!add_to_page_cache_lru(page, mapping, 373 if (!add_to_page_cache_lru(page, mapping,
373 page->index, GFP_KERNEL)) { 374 page->index,
375 gfp)) {
374 bio = do_mpage_readpage(bio, page, 376 bio = do_mpage_readpage(bio, page,
375 nr_pages - page_idx, 377 nr_pages - page_idx,
376 &last_block_in_bio, &map_bh, 378 &last_block_in_bio, &map_bh,
377 &first_logical_block, 379 &first_logical_block,
378 get_block); 380 get_block, gfp);
379 } 381 }
380 page_cache_release(page); 382 page_cache_release(page);
381 } 383 }
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
395 sector_t last_block_in_bio = 0; 397 sector_t last_block_in_bio = 0;
396 struct buffer_head map_bh; 398 struct buffer_head map_bh;
397 unsigned long first_logical_block = 0; 399 unsigned long first_logical_block = 0;
400 gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
398 401
399 map_bh.b_state = 0; 402 map_bh.b_state = 0;
400 map_bh.b_size = 0; 403 map_bh.b_size = 0;
401 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, 404 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
402 &map_bh, &first_logical_block, get_block); 405 &map_bh, &first_logical_block, get_block, gfp);
403 if (bio) 406 if (bio)
404 mpage_bio_submit(READ, bio); 407 mpage_bio_submit(READ, bio);
405 return 0; 408 return 0;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ba1323a94924..a586467f6ff6 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
70 unsigned order; 70 unsigned order;
71 void *data; 71 void *data;
72 int ret; 72 int ret;
73 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
73 74
74 /* make various checks */ 75 /* make various checks */
75 order = get_order(newsize); 76 order = get_order(newsize);
@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
84 85
85 /* allocate enough contiguous pages to be able to satisfy the 86 /* allocate enough contiguous pages to be able to satisfy the
86 * request */ 87 * request */
87 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); 88 pages = alloc_pages(gfp, order);
88 if (!pages) 89 if (!pages)
89 return -ENOMEM; 90 return -ENOMEM;
90 91
@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
108 struct page *page = pages + loop; 109 struct page *page = pages + loop;
109 110
110 ret = add_to_page_cache_lru(page, inode->i_mapping, loop, 111 ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
111 GFP_KERNEL); 112 gfp);
112 if (ret < 0) 113 if (ret < 0)
113 goto add_error; 114 goto add_error;
114 115
diff --git a/lib/Kconfig b/lib/Kconfig
index 2e491ac15622..f0df318104e7 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -220,6 +220,7 @@ config ZLIB_INFLATE
220 220
221config ZLIB_DEFLATE 221config ZLIB_DEFLATE
222 tristate 222 tristate
223 select BITREVERSE
223 224
224config LZO_COMPRESS 225config LZO_COMPRESS
225 tristate 226 tristate
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1fedbde68f59..d9b5c817dce8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3387 ret = page_counter_memparse(args, "-1", &threshold); 3387 ret = page_counter_memparse(args, "-1", &threshold);
3388 if (ret) 3388 if (ret)
3389 return ret; 3389 return ret;
3390 threshold <<= PAGE_SHIFT;
3390 3391
3391 mutex_lock(&memcg->thresholds_lock); 3392 mutex_lock(&memcg->thresholds_lock);
3392 3393
diff --git a/mm/memory.c b/mm/memory.c
index 9cb27470fee9..deb679c31f2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping,
2426 if (details.last_index < details.first_index) 2426 if (details.last_index < details.first_index)
2427 details.last_index = ULONG_MAX; 2427 details.last_index = ULONG_MAX;
2428 2428
2429
2430 /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
2429 i_mmap_lock_write(mapping); 2431 i_mmap_lock_write(mapping);
2430 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) 2432 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2431 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2433 unmap_mapping_range_tree(&mapping->i_mmap, &details);
diff --git a/mm/readahead.c b/mm/readahead.c
index 60cd846a9a44..24682f6f4cfd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
89 while (!list_empty(pages)) { 89 while (!list_empty(pages)) {
90 page = list_to_page(pages); 90 page = list_to_page(pages);
91 list_del(&page->lru); 91 list_del(&page->lru);
92 if (add_to_page_cache_lru(page, mapping, 92 if (add_to_page_cache_lru(page, mapping, page->index,
93 page->index, GFP_KERNEL)) { 93 GFP_KERNEL & mapping_gfp_mask(mapping))) {
94 read_cache_pages_invalidate_page(mapping, page); 94 read_cache_pages_invalidate_page(mapping, page);
95 continue; 95 continue;
96 } 96 }
@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
128 struct page *page = list_to_page(pages); 128 struct page *page = list_to_page(pages);
129 list_del(&page->lru); 129 list_del(&page->lru);
130 if (!add_to_page_cache_lru(page, mapping, 130 if (!add_to_page_cache_lru(page, mapping, page->index,
131 page->index, GFP_KERNEL)) { 131 GFP_KERNEL & mapping_gfp_mask(mapping))) {
132 mapping->a_ops->readpage(filp, page); 132 mapping->a_ops->readpage(filp, page);
133 } 133 }
134 page_cache_release(page); 134 page_cache_release(page);
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 0cd46e129920..b967e4f9fed2 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -115,7 +115,7 @@ esac
115BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" 115BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
116 116
117# Setup the directory structure 117# Setup the directory structure
118rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" 118rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
119mkdir -m 755 -p "$tmpdir/DEBIAN" 119mkdir -m 755 -p "$tmpdir/DEBIAN"
120mkdir -p "$tmpdir/lib" "$tmpdir/boot" 120mkdir -p "$tmpdir/lib" "$tmpdir/boot"
121mkdir -p "$fwdir/lib/firmware/$version/" 121mkdir -p "$fwdir/lib/firmware/$version/"
@@ -408,7 +408,7 @@ binary-arch:
408 \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg 408 \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
409 409
410clean: 410clean:
411 rm -rf debian/*tmp 411 rm -rf debian/*tmp debian/files
412 mv debian/ debian.backup # debian/ might be cleaned away 412 mv debian/ debian.backup # debian/ might be cleaned away
413 \$(MAKE) clean 413 \$(MAKE) clean
414 mv debian.backup debian 414 mv debian.backup debian