aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/memory.c32
-rw-r--r--mm/page-writeback.c89
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/slab.c8
-rw-r--r--mm/truncate.c4
6 files changed, 79 insertions, 65 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 606432f71b3a..8332c77b1bd1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1181,8 +1181,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1181 if (pos < size) { 1181 if (pos < size) {
1182 retval = generic_file_direct_IO(READ, iocb, 1182 retval = generic_file_direct_IO(READ, iocb,
1183 iov, pos, nr_segs); 1183 iov, pos, nr_segs);
1184 if (retval > 0 && !is_sync_kiocb(iocb))
1185 retval = -EIOCBQUEUED;
1186 if (retval > 0) 1184 if (retval > 0)
1187 *ppos = pos + retval; 1185 *ppos = pos + retval;
1188 } 1186 }
@@ -2047,15 +2045,14 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2047 * Sync the fs metadata but not the minor inode changes and 2045 * Sync the fs metadata but not the minor inode changes and
2048 * of course not the data as we did direct DMA for the IO. 2046 * of course not the data as we did direct DMA for the IO.
2049 * i_mutex is held, which protects generic_osync_inode() from 2047 * i_mutex is held, which protects generic_osync_inode() from
2050 * livelocking. 2048 * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
2051 */ 2049 */
2052 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2050 if ((written >= 0 || written == -EIOCBQUEUED) &&
2051 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2053 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 2052 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2054 if (err < 0) 2053 if (err < 0)
2055 written = err; 2054 written = err;
2056 } 2055 }
2057 if (written == count && !is_sync_kiocb(iocb))
2058 written = -EIOCBQUEUED;
2059 return written; 2056 return written;
2060} 2057}
2061EXPORT_SYMBOL(generic_file_direct_write); 2058EXPORT_SYMBOL(generic_file_direct_write);
diff --git a/mm/memory.c b/mm/memory.c
index 4198df0dff1c..bf6100236e62 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1110,23 +1110,29 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1110{ 1110{
1111 pte_t *pte; 1111 pte_t *pte;
1112 spinlock_t *ptl; 1112 spinlock_t *ptl;
1113 int err = 0;
1113 1114
1114 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1115 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1115 if (!pte) 1116 if (!pte)
1116 return -ENOMEM; 1117 return -EAGAIN;
1117 arch_enter_lazy_mmu_mode(); 1118 arch_enter_lazy_mmu_mode();
1118 do { 1119 do {
1119 struct page *page = ZERO_PAGE(addr); 1120 struct page *page = ZERO_PAGE(addr);
1120 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); 1121 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
1122
1123 if (unlikely(!pte_none(*pte))) {
1124 err = -EEXIST;
1125 pte++;
1126 break;
1127 }
1121 page_cache_get(page); 1128 page_cache_get(page);
1122 page_add_file_rmap(page); 1129 page_add_file_rmap(page);
1123 inc_mm_counter(mm, file_rss); 1130 inc_mm_counter(mm, file_rss);
1124 BUG_ON(!pte_none(*pte));
1125 set_pte_at(mm, addr, pte, zero_pte); 1131 set_pte_at(mm, addr, pte, zero_pte);
1126 } while (pte++, addr += PAGE_SIZE, addr != end); 1132 } while (pte++, addr += PAGE_SIZE, addr != end);
1127 arch_leave_lazy_mmu_mode(); 1133 arch_leave_lazy_mmu_mode();
1128 pte_unmap_unlock(pte - 1, ptl); 1134 pte_unmap_unlock(pte - 1, ptl);
1129 return 0; 1135 return err;
1130} 1136}
1131 1137
1132static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, 1138static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1134,16 +1140,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1134{ 1140{
1135 pmd_t *pmd; 1141 pmd_t *pmd;
1136 unsigned long next; 1142 unsigned long next;
1143 int err;
1137 1144
1138 pmd = pmd_alloc(mm, pud, addr); 1145 pmd = pmd_alloc(mm, pud, addr);
1139 if (!pmd) 1146 if (!pmd)
1140 return -ENOMEM; 1147 return -EAGAIN;
1141 do { 1148 do {
1142 next = pmd_addr_end(addr, end); 1149 next = pmd_addr_end(addr, end);
1143 if (zeromap_pte_range(mm, pmd, addr, next, prot)) 1150 err = zeromap_pte_range(mm, pmd, addr, next, prot);
1144 return -ENOMEM; 1151 if (err)
1152 break;
1145 } while (pmd++, addr = next, addr != end); 1153 } while (pmd++, addr = next, addr != end);
1146 return 0; 1154 return err;
1147} 1155}
1148 1156
1149static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1157static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
@@ -1151,16 +1159,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1151{ 1159{
1152 pud_t *pud; 1160 pud_t *pud;
1153 unsigned long next; 1161 unsigned long next;
1162 int err;
1154 1163
1155 pud = pud_alloc(mm, pgd, addr); 1164 pud = pud_alloc(mm, pgd, addr);
1156 if (!pud) 1165 if (!pud)
1157 return -ENOMEM; 1166 return -EAGAIN;
1158 do { 1167 do {
1159 next = pud_addr_end(addr, end); 1168 next = pud_addr_end(addr, end);
1160 if (zeromap_pmd_range(mm, pud, addr, next, prot)) 1169 err = zeromap_pmd_range(mm, pud, addr, next, prot);
1161 return -ENOMEM; 1170 if (err)
1171 break;
1162 } while (pud++, addr = next, addr != end); 1172 } while (pud++, addr = next, addr != end);
1163 return 0; 1173 return err;
1164} 1174}
1165 1175
1166int zeromap_page_range(struct vm_area_struct *vma, 1176int zeromap_page_range(struct vm_area_struct *vma,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8d9b19f239c3..237107c1b084 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -21,6 +21,7 @@
21#include <linux/writeback.h> 21#include <linux/writeback.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
24#include <linux/task_io_accounting_ops.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
25#include <linux/mpage.h> 26#include <linux/mpage.h>
26#include <linux/rmap.h> 27#include <linux/rmap.h>
@@ -761,23 +762,24 @@ int __set_page_dirty_nobuffers(struct page *page)
761 struct address_space *mapping = page_mapping(page); 762 struct address_space *mapping = page_mapping(page);
762 struct address_space *mapping2; 763 struct address_space *mapping2;
763 764
764 if (mapping) { 765 if (!mapping)
765 write_lock_irq(&mapping->tree_lock); 766 return 1;
766 mapping2 = page_mapping(page); 767
767 if (mapping2) { /* Race with truncate? */ 768 write_lock_irq(&mapping->tree_lock);
768 BUG_ON(mapping2 != mapping); 769 mapping2 = page_mapping(page);
769 if (mapping_cap_account_dirty(mapping)) 770 if (mapping2) { /* Race with truncate? */
770 __inc_zone_page_state(page, 771 BUG_ON(mapping2 != mapping);
771 NR_FILE_DIRTY); 772 if (mapping_cap_account_dirty(mapping)) {
772 radix_tree_tag_set(&mapping->page_tree, 773 __inc_zone_page_state(page, NR_FILE_DIRTY);
773 page_index(page), PAGECACHE_TAG_DIRTY); 774 task_io_account_write(PAGE_CACHE_SIZE);
774 }
775 write_unlock_irq(&mapping->tree_lock);
776 if (mapping->host) {
777 /* !PageAnon && !swapper_space */
778 __mark_inode_dirty(mapping->host,
779 I_DIRTY_PAGES);
780 } 775 }
776 radix_tree_tag_set(&mapping->page_tree,
777 page_index(page), PAGECACHE_TAG_DIRTY);
778 }
779 write_unlock_irq(&mapping->tree_lock);
780 if (mapping->host) {
781 /* !PageAnon && !swapper_space */
782 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
781 } 783 }
782 return 1; 784 return 1;
783 } 785 }
@@ -851,27 +853,26 @@ int test_clear_page_dirty(struct page *page)
851 struct address_space *mapping = page_mapping(page); 853 struct address_space *mapping = page_mapping(page);
852 unsigned long flags; 854 unsigned long flags;
853 855
854 if (mapping) { 856 if (!mapping)
855 write_lock_irqsave(&mapping->tree_lock, flags); 857 return TestClearPageDirty(page);
856 if (TestClearPageDirty(page)) { 858
857 radix_tree_tag_clear(&mapping->page_tree, 859 write_lock_irqsave(&mapping->tree_lock, flags);
858 page_index(page), 860 if (TestClearPageDirty(page)) {
859 PAGECACHE_TAG_DIRTY); 861 radix_tree_tag_clear(&mapping->page_tree,
860 write_unlock_irqrestore(&mapping->tree_lock, flags); 862 page_index(page), PAGECACHE_TAG_DIRTY);
861 /*
862 * We can continue to use `mapping' here because the
863 * page is locked, which pins the address_space
864 */
865 if (mapping_cap_account_dirty(mapping)) {
866 page_mkclean(page);
867 dec_zone_page_state(page, NR_FILE_DIRTY);
868 }
869 return 1;
870 }
871 write_unlock_irqrestore(&mapping->tree_lock, flags); 863 write_unlock_irqrestore(&mapping->tree_lock, flags);
872 return 0; 864 /*
865 * We can continue to use `mapping' here because the
866 * page is locked, which pins the address_space
867 */
868 if (mapping_cap_account_dirty(mapping)) {
869 page_mkclean(page);
870 dec_zone_page_state(page, NR_FILE_DIRTY);
871 }
872 return 1;
873 } 873 }
874 return TestClearPageDirty(page); 874 write_unlock_irqrestore(&mapping->tree_lock, flags);
875 return 0;
875} 876}
876EXPORT_SYMBOL(test_clear_page_dirty); 877EXPORT_SYMBOL(test_clear_page_dirty);
877 878
@@ -893,17 +894,17 @@ int clear_page_dirty_for_io(struct page *page)
893{ 894{
894 struct address_space *mapping = page_mapping(page); 895 struct address_space *mapping = page_mapping(page);
895 896
896 if (mapping) { 897 if (!mapping)
897 if (TestClearPageDirty(page)) { 898 return TestClearPageDirty(page);
898 if (mapping_cap_account_dirty(mapping)) { 899
899 page_mkclean(page); 900 if (TestClearPageDirty(page)) {
900 dec_zone_page_state(page, NR_FILE_DIRTY); 901 if (mapping_cap_account_dirty(mapping)) {
901 } 902 page_mkclean(page);
902 return 1; 903 dec_zone_page_state(page, NR_FILE_DIRTY);
903 } 904 }
904 return 0; 905 return 1;
905 } 906 }
906 return TestClearPageDirty(page); 907 return 0;
907} 908}
908EXPORT_SYMBOL(clear_page_dirty_for_io); 909EXPORT_SYMBOL(clear_page_dirty_for_io);
909 910
diff --git a/mm/readahead.c b/mm/readahead.c
index c0df5ed05f62..0f539e8e827a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/backing-dev.h> 15#include <linux/backing-dev.h>
16#include <linux/task_io_accounting_ops.h>
16#include <linux/pagevec.h> 17#include <linux/pagevec.h>
17 18
18void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 19void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
@@ -151,6 +152,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
151 put_pages_list(pages); 152 put_pages_list(pages);
152 break; 153 break;
153 } 154 }
155 task_io_account_read(PAGE_CACHE_SIZE);
154 } 156 }
155 pagevec_lru_add(&lru_pvec); 157 pagevec_lru_add(&lru_pvec);
156 return ret; 158 return ret;
diff --git a/mm/slab.c b/mm/slab.c
index 56af694c9e6a..2c655532f5ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -946,7 +946,8 @@ static void __devinit start_cpu_timer(int cpu)
946 if (keventd_up() && reap_work->work.func == NULL) { 946 if (keventd_up() && reap_work->work.func == NULL) {
947 init_reap_node(cpu); 947 init_reap_node(cpu);
948 INIT_DELAYED_WORK(reap_work, cache_reap); 948 INIT_DELAYED_WORK(reap_work, cache_reap);
949 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 949 schedule_delayed_work_on(cpu, reap_work,
950 __round_jiffies_relative(HZ, cpu));
950 } 951 }
951} 952}
952 953
@@ -4006,7 +4007,7 @@ static void cache_reap(struct work_struct *unused)
4006 if (!mutex_trylock(&cache_chain_mutex)) { 4007 if (!mutex_trylock(&cache_chain_mutex)) {
4007 /* Give up. Setup the next iteration. */ 4008 /* Give up. Setup the next iteration. */
4008 schedule_delayed_work(&__get_cpu_var(reap_work), 4009 schedule_delayed_work(&__get_cpu_var(reap_work),
4009 REAPTIMEOUT_CPUC); 4010 round_jiffies_relative(REAPTIMEOUT_CPUC));
4010 return; 4011 return;
4011 } 4012 }
4012 4013
@@ -4052,7 +4053,8 @@ next:
4052 next_reap_node(); 4053 next_reap_node();
4053 refresh_cpu_vm_stats(smp_processor_id()); 4054 refresh_cpu_vm_stats(smp_processor_id());
4054 /* Set up the next iteration */ 4055 /* Set up the next iteration */
4055 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 4056 schedule_delayed_work(&__get_cpu_var(reap_work),
4057 round_jiffies_relative(REAPTIMEOUT_CPUC));
4056} 4058}
4057 4059
4058#ifdef CONFIG_PROC_FS 4060#ifdef CONFIG_PROC_FS
diff --git a/mm/truncate.c b/mm/truncate.c
index e07b1e682c38..9bfb8e853860 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/pagevec.h> 15#include <linux/pagevec.h>
16#include <linux/task_io_accounting_ops.h>
16#include <linux/buffer_head.h> /* grr. try_to_release_page, 17#include <linux/buffer_head.h> /* grr. try_to_release_page,
17 do_invalidatepage */ 18 do_invalidatepage */
18 19
@@ -69,7 +70,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
69 if (PagePrivate(page)) 70 if (PagePrivate(page))
70 do_invalidatepage(page, 0); 71 do_invalidatepage(page, 0);
71 72
72 clear_page_dirty(page); 73 if (test_clear_page_dirty(page))
74 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
73 ClearPageUptodate(page); 75 ClearPageUptodate(page);
74 ClearPageMappedToDisk(page); 76 ClearPageMappedToDisk(page);
75 remove_from_page_cache(page); 77 remove_from_page_cache(page);