aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-03-26 18:23:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-26 18:23:24 -0400
commit08abe18af1f78ee80c3c3a5ac47c3e0ae0beadf6 (patch)
tree2be39bf8942edca1bcec735145e144a682ca9cd3 /mm
parentf0de70f8bb56952f6e016a65a8a8d006918f5bf6 (diff)
parent0384e2959127a56d0640505d004d8dd92f9c29f5 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: drivers/net/wimax/i2400m/usb-notif.c
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c4
4 files changed, 10 insertions, 4 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 00ced3ee49a8..1abb9185a686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
23#include <linux/hugetlb.h> 24#include <linux/hugetlb.h>
24#include <linux/profile.h> 25#include <linux/profile.h>
25#include <linux/module.h> 26#include <linux/module.h>
@@ -1049,6 +1050,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1049 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1050 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1050 if (error) 1051 if (error)
1051 return error; 1052 return error;
1053 error = ima_file_mmap(file, prot);
1054 if (error)
1055 return error;
1052 1056
1053 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1057 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1054} 1058}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 74dc57c74349..40ca7cdb653e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void)
66/* 66/*
67 * Start background writeback (via pdflush) at this percentage 67 * Start background writeback (via pdflush) at this percentage
68 */ 68 */
69int dirty_background_ratio = 5; 69int dirty_background_ratio = 10;
70 70
71/* 71/*
72 * dirty_background_bytes starts at 0 (disabled) so that it is a function of 72 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable;
83/* 83/*
84 * The generator of dirty data starts writeback at this percentage 84 * The generator of dirty data starts writeback at this percentage
85 */ 85 */
86int vm_dirty_ratio = 10; 86int vm_dirty_ratio = 20;
87 87
88/* 88/*
89 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of 89 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
diff --git a/mm/shmem.c b/mm/shmem.c
index 4103a239ce84..7ec78e24a30d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/swap.h> 30#include <linux/swap.h>
31#include <linux/ima.h>
31 32
32static struct vfsmount *shm_mnt; 33static struct vfsmount *shm_mnt;
33 34
@@ -2665,6 +2666,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
2665 if (IS_ERR(file)) 2666 if (IS_ERR(file))
2666 return PTR_ERR(file); 2667 return PTR_ERR(file);
2667 2668
2669 ima_shm_check(file);
2668 if (vma->vm_file) 2670 if (vma->vm_file)
2669 fput(vma->vm_file); 2671 fput(vma->vm_file);
2670 vma->vm_file = file; 2672 vma->vm_file = file;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6177e3bcd66b..56ddf41149eb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1262,7 +1262,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1262 * Move the pages to the [file or anon] inactive list. 1262 * Move the pages to the [file or anon] inactive list.
1263 */ 1263 */
1264 pagevec_init(&pvec, 1); 1264 pagevec_init(&pvec, 1);
1265 pgmoved = 0;
1266 lru = LRU_BASE + file * LRU_FILE; 1265 lru = LRU_BASE + file * LRU_FILE;
1267 1266
1268 spin_lock_irq(&zone->lru_lock); 1267 spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1273,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1274 */ 1273 */
1275 reclaim_stat->recent_rotated[!!file] += pgmoved; 1274 reclaim_stat->recent_rotated[!!file] += pgmoved;
1276 1275
1276 pgmoved = 0;
1277 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1278 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
1279 prefetchw_prev_lru_page(page, &l_inactive, flags); 1279 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
1469 int file = is_file_lru(l); 1469 int file = is_file_lru(l);
1470 int scan; 1470 int scan;
1471 1471
1472 scan = zone_page_state(zone, NR_LRU_BASE + l); 1472 scan = zone_nr_pages(zone, sc, l);
1473 if (priority) { 1473 if (priority) {
1474 scan >>= priority; 1474 scan >>= priority;
1475 scan = (scan * percent[file]) / 100; 1475 scan = (scan * percent[file]) / 100;