diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/backing-dev.c | 2 | ||||
| -rw-r--r-- | mm/fremap.c | 11 | ||||
| -rw-r--r-- | mm/huge_memory.c | 4 | ||||
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/memcontrol.c | 8 | ||||
| -rw-r--r-- | mm/memory.c | 49 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/mmap.c | 6 | ||||
| -rw-r--r-- | mm/page-writeback.c | 4 | ||||
| -rw-r--r-- | mm/rmap.c | 14 | ||||
| -rw-r--r-- | mm/shmem.c | 11 | ||||
| -rw-r--r-- | mm/slab.c | 10 | ||||
| -rw-r--r-- | mm/slub.c | 7 | ||||
| -rw-r--r-- | mm/swap.c | 29 | ||||
| -rw-r--r-- | mm/swapfile.c | 19 | ||||
| -rw-r--r-- | mm/vmpressure.c | 28 | ||||
| -rw-r--r-- | mm/vmstat.c | 6 | ||||
| -rw-r--r-- | mm/zbud.c | 2 |
18 files changed, 125 insertions, 93 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index d014ee5fcbbd..e04454cdb33f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -232,8 +232,6 @@ static ssize_t stable_pages_required_show(struct device *dev, | |||
| 232 | bdi_cap_stable_pages_required(bdi) ? 1 : 0); | 232 | bdi_cap_stable_pages_required(bdi) ? 1 : 0); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) | ||
| 236 | |||
| 237 | static struct device_attribute bdi_dev_attrs[] = { | 235 | static struct device_attribute bdi_dev_attrs[] = { |
| 238 | __ATTR_RW(read_ahead_kb), | 236 | __ATTR_RW(read_ahead_kb), |
| 239 | __ATTR_RW(min_ratio), | 237 | __ATTR_RW(min_ratio), |
diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
| @@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) | 57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
| 58 | { | 58 | { |
| 59 | int err = -ENOMEM; | 59 | int err = -ENOMEM; |
| 60 | pte_t *pte; | 60 | pte_t *pte, ptfile; |
| 61 | spinlock_t *ptl; | 61 | spinlock_t *ptl; |
| 62 | 62 | ||
| 63 | pte = get_locked_pte(mm, addr, &ptl); | 63 | pte = get_locked_pte(mm, addr, &ptl); |
| 64 | if (!pte) | 64 | if (!pte) |
| 65 | goto out; | 65 | goto out; |
| 66 | 66 | ||
| 67 | if (!pte_none(*pte)) | 67 | ptfile = pgoff_to_pte(pgoff); |
| 68 | |||
| 69 | if (!pte_none(*pte)) { | ||
| 70 | if (pte_present(*pte) && pte_soft_dirty(*pte)) | ||
| 71 | pte_file_mksoft_dirty(ptfile); | ||
| 68 | zap_pte(mm, vma, addr, pte); | 72 | zap_pte(mm, vma, addr, pte); |
| 73 | } | ||
| 69 | 74 | ||
| 70 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | 75 | set_pte_at(mm, addr, pte, ptfile); |
| 71 | /* | 76 | /* |
| 72 | * We don't need to run update_mmu_cache() here because the "file pte" | 77 | * We don't need to run update_mmu_cache() here because the "file pte" |
| 73 | * being installed by install_file_pte() is not a real pte - it's a | 78 | * being installed by install_file_pte() is not a real pte - it's a |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 243e710c6039..a92012a71702 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page, | |||
| 1620 | ((1L << PG_referenced) | | 1620 | ((1L << PG_referenced) | |
| 1621 | (1L << PG_swapbacked) | | 1621 | (1L << PG_swapbacked) | |
| 1622 | (1L << PG_mlocked) | | 1622 | (1L << PG_mlocked) | |
| 1623 | (1L << PG_uptodate))); | 1623 | (1L << PG_uptodate) | |
| 1624 | (1L << PG_active) | | ||
| 1625 | (1L << PG_unevictable))); | ||
| 1624 | page_tail->flags |= (1L << PG_dirty); | 1626 | page_tail->flags |= (1L << PG_dirty); |
| 1625 | 1627 | ||
| 1626 | /* clear PageTail before overwriting first_page */ | 1628 | /* clear PageTail before overwriting first_page */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
| 2490 | 2490 | ||
| 2491 | mm = vma->vm_mm; | 2491 | mm = vma->vm_mm; |
| 2492 | 2492 | ||
| 2493 | tlb_gather_mmu(&tlb, mm, 0); | 2493 | tlb_gather_mmu(&tlb, mm, start, end); |
| 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); | 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); |
| 2495 | tlb_finish_mmu(&tlb, start, end); | 2495 | tlb_finish_mmu(&tlb, start, end); |
| 2496 | } | 2496 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d12ca6f3c293..0878ff7c26a9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -2522,7 +2522,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) | |||
| 2522 | spin_unlock(&memcg->pcp_counter_lock); | 2522 | spin_unlock(&memcg->pcp_counter_lock); |
| 2523 | } | 2523 | } |
| 2524 | 2524 | ||
| 2525 | static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, | 2525 | static int memcg_cpu_hotplug_callback(struct notifier_block *nb, |
| 2526 | unsigned long action, | 2526 | unsigned long action, |
| 2527 | void *hcpu) | 2527 | void *hcpu) |
| 2528 | { | 2528 | { |
| @@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
| 3195 | if (!s->memcg_params) | 3195 | if (!s->memcg_params) |
| 3196 | return -ENOMEM; | 3196 | return -ENOMEM; |
| 3197 | 3197 | ||
| 3198 | INIT_WORK(&s->memcg_params->destroy, | ||
| 3199 | kmem_cache_destroy_work_func); | ||
| 3200 | if (memcg) { | 3198 | if (memcg) { |
| 3201 | s->memcg_params->memcg = memcg; | 3199 | s->memcg_params->memcg = memcg; |
| 3202 | s->memcg_params->root_cache = root_cache; | 3200 | s->memcg_params->root_cache = root_cache; |
| 3201 | INIT_WORK(&s->memcg_params->destroy, | ||
| 3202 | kmem_cache_destroy_work_func); | ||
| 3203 | } else | 3203 | } else |
| 3204 | s->memcg_params->is_root_cache = true; | 3204 | s->memcg_params->is_root_cache = true; |
| 3205 | 3205 | ||
| @@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont) | |||
| 6335 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6335 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
| 6336 | mem_cgroup_reparent_charges(memcg); | 6336 | mem_cgroup_reparent_charges(memcg); |
| 6337 | mem_cgroup_destroy_all_caches(memcg); | 6337 | mem_cgroup_destroy_all_caches(memcg); |
| 6338 | vmpressure_cleanup(&memcg->vmpressure); | ||
| 6338 | } | 6339 | } |
| 6339 | 6340 | ||
| 6340 | static void mem_cgroup_css_free(struct cgroup *cont) | 6341 | static void mem_cgroup_css_free(struct cgroup *cont) |
| @@ -6968,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
| 6968 | #ifdef CONFIG_MEMCG_SWAP | 6969 | #ifdef CONFIG_MEMCG_SWAP |
| 6969 | static int __init enable_swap_account(char *s) | 6970 | static int __init enable_swap_account(char *s) |
| 6970 | { | 6971 | { |
| 6971 | /* consider enabled if no parameter or 1 is given */ | ||
| 6972 | if (!strcmp(s, "1")) | 6972 | if (!strcmp(s, "1")) |
| 6973 | really_do_swap_account = 1; | 6973 | really_do_swap_account = 1; |
| 6974 | else if (!strcmp(s, "0")) | 6974 | else if (!strcmp(s, "0")) |
diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
| 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without | 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
| 210 | * users and we're going to destroy the full address space (exit/execve). | 210 | * users and we're going to destroy the full address space (exit/execve). |
| 211 | */ | 211 | */ |
| 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 213 | { | 213 | { |
| 214 | tlb->mm = mm; | 214 | tlb->mm = mm; |
| 215 | 215 | ||
| 216 | tlb->fullmm = fullmm; | 216 | /* Is it from 0 to ~0? */ |
| 217 | tlb->fullmm = !(start | (end+1)); | ||
| 217 | tlb->need_flush_all = 0; | 218 | tlb->need_flush_all = 0; |
| 218 | tlb->start = -1UL; | 219 | tlb->start = start; |
| 219 | tlb->end = 0; | 220 | tlb->end = end; |
| 220 | tlb->need_flush = 0; | 221 | tlb->need_flush = 0; |
| 221 | tlb->local.next = NULL; | 222 | tlb->local.next = NULL; |
| 222 | tlb->local.nr = 0; | 223 | tlb->local.nr = 0; |
| @@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
| 256 | { | 257 | { |
| 257 | struct mmu_gather_batch *batch, *next; | 258 | struct mmu_gather_batch *batch, *next; |
| 258 | 259 | ||
| 259 | tlb->start = start; | ||
| 260 | tlb->end = end; | ||
| 261 | tlb_flush_mmu(tlb); | 260 | tlb_flush_mmu(tlb); |
| 262 | 261 | ||
| 263 | /* keep the page table cache within bounds */ | 262 | /* keep the page table cache within bounds */ |
| @@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
| 1099 | spinlock_t *ptl; | 1098 | spinlock_t *ptl; |
| 1100 | pte_t *start_pte; | 1099 | pte_t *start_pte; |
| 1101 | pte_t *pte; | 1100 | pte_t *pte; |
| 1102 | unsigned long range_start = addr; | ||
| 1103 | 1101 | ||
| 1104 | again: | 1102 | again: |
| 1105 | init_rss_vec(rss); | 1103 | init_rss_vec(rss); |
| @@ -1141,9 +1139,12 @@ again: | |||
| 1141 | continue; | 1139 | continue; |
| 1142 | if (unlikely(details) && details->nonlinear_vma | 1140 | if (unlikely(details) && details->nonlinear_vma |
| 1143 | && linear_page_index(details->nonlinear_vma, | 1141 | && linear_page_index(details->nonlinear_vma, |
| 1144 | addr) != page->index) | 1142 | addr) != page->index) { |
| 1145 | set_pte_at(mm, addr, pte, | 1143 | pte_t ptfile = pgoff_to_pte(page->index); |
| 1146 | pgoff_to_pte(page->index)); | 1144 | if (pte_soft_dirty(ptent)) |
| 1145 | pte_file_mksoft_dirty(ptfile); | ||
| 1146 | set_pte_at(mm, addr, pte, ptfile); | ||
| 1147 | } | ||
| 1147 | if (PageAnon(page)) | 1148 | if (PageAnon(page)) |
| 1148 | rss[MM_ANONPAGES]--; | 1149 | rss[MM_ANONPAGES]--; |
| 1149 | else { | 1150 | else { |
| @@ -1202,17 +1203,25 @@ again: | |||
| 1202 | * and page-free while holding it. | 1203 | * and page-free while holding it. |
| 1203 | */ | 1204 | */ |
| 1204 | if (force_flush) { | 1205 | if (force_flush) { |
| 1206 | unsigned long old_end; | ||
| 1207 | |||
| 1205 | force_flush = 0; | 1208 | force_flush = 0; |
| 1206 | 1209 | ||
| 1207 | #ifdef HAVE_GENERIC_MMU_GATHER | 1210 | /* |
| 1208 | tlb->start = range_start; | 1211 | * Flush the TLB just for the previous segment, |
| 1212 | * then update the range to be the remaining | ||
| 1213 | * TLB range. | ||
| 1214 | */ | ||
| 1215 | old_end = tlb->end; | ||
| 1209 | tlb->end = addr; | 1216 | tlb->end = addr; |
| 1210 | #endif | 1217 | |
| 1211 | tlb_flush_mmu(tlb); | 1218 | tlb_flush_mmu(tlb); |
| 1212 | if (addr != end) { | 1219 | |
| 1213 | range_start = addr; | 1220 | tlb->start = addr; |
| 1221 | tlb->end = old_end; | ||
| 1222 | |||
| 1223 | if (addr != end) | ||
| 1214 | goto again; | 1224 | goto again; |
| 1215 | } | ||
| 1216 | } | 1225 | } |
| 1217 | 1226 | ||
| 1218 | return addr; | 1227 | return addr; |
| @@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
| 1397 | unsigned long end = start + size; | 1406 | unsigned long end = start + size; |
| 1398 | 1407 | ||
| 1399 | lru_add_drain(); | 1408 | lru_add_drain(); |
| 1400 | tlb_gather_mmu(&tlb, mm, 0); | 1409 | tlb_gather_mmu(&tlb, mm, start, end); |
| 1401 | update_hiwater_rss(mm); | 1410 | update_hiwater_rss(mm); |
| 1402 | mmu_notifier_invalidate_range_start(mm, start, end); | 1411 | mmu_notifier_invalidate_range_start(mm, start, end); |
| 1403 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1412 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
| @@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
| 1423 | unsigned long end = address + size; | 1432 | unsigned long end = address + size; |
| 1424 | 1433 | ||
| 1425 | lru_add_drain(); | 1434 | lru_add_drain(); |
| 1426 | tlb_gather_mmu(&tlb, mm, 0); | 1435 | tlb_gather_mmu(&tlb, mm, address, end); |
| 1427 | update_hiwater_rss(mm); | 1436 | update_hiwater_rss(mm); |
| 1428 | mmu_notifier_invalidate_range_start(mm, address, end); | 1437 | mmu_notifier_invalidate_range_start(mm, address, end); |
| 1429 | unmap_single_vma(&tlb, vma, address, end, details); | 1438 | unmap_single_vma(&tlb, vma, address, end, details); |
| @@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3115 | exclusive = 1; | 3124 | exclusive = 1; |
| 3116 | } | 3125 | } |
| 3117 | flush_icache_page(vma, page); | 3126 | flush_icache_page(vma, page); |
| 3127 | if (pte_swp_soft_dirty(orig_pte)) | ||
| 3128 | pte = pte_mksoft_dirty(pte); | ||
| 3118 | set_pte_at(mm, address, page_table, pte); | 3129 | set_pte_at(mm, address, page_table, pte); |
| 3119 | if (page == swapcache) | 3130 | if (page == swapcache) |
| 3120 | do_page_add_anon_rmap(page, vma, address, exclusive); | 3131 | do_page_add_anon_rmap(page, vma, address, exclusive); |
| @@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3408 | entry = mk_pte(page, vma->vm_page_prot); | 3419 | entry = mk_pte(page, vma->vm_page_prot); |
| 3409 | if (flags & FAULT_FLAG_WRITE) | 3420 | if (flags & FAULT_FLAG_WRITE) |
| 3410 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 3421 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
| 3422 | else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) | ||
| 3423 | pte_mksoft_dirty(entry); | ||
| 3411 | if (anon) { | 3424 | if (anon) { |
| 3412 | inc_mm_counter_fast(mm, MM_ANONPAGES); | 3425 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
| 3413 | page_add_new_anon_rmap(page, vma, address); | 3426 | page_add_new_anon_rmap(page, vma, address); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 74310017296e..4baf12e534d1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, | |||
| 732 | if (prev) { | 732 | if (prev) { |
| 733 | vma = prev; | 733 | vma = prev; |
| 734 | next = vma->vm_next; | 734 | next = vma->vm_next; |
| 735 | continue; | 735 | if (mpol_equal(vma_policy(vma), new_pol)) |
| 736 | continue; | ||
| 737 | /* vma_merge() joined vma && vma->next, case 8 */ | ||
| 738 | goto replace; | ||
| 736 | } | 739 | } |
| 737 | if (vma->vm_start != vmstart) { | 740 | if (vma->vm_start != vmstart) { |
| 738 | err = split_vma(vma->vm_mm, vma, vmstart, 1); | 741 | err = split_vma(vma->vm_mm, vma, vmstart, 1); |
| @@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, | |||
| 744 | if (err) | 747 | if (err) |
| 745 | goto out; | 748 | goto out; |
| 746 | } | 749 | } |
| 750 | replace: | ||
| 747 | err = vma_replace_policy(vma, new_pol); | 751 | err = vma_replace_policy(vma, new_pol); |
| 748 | if (err) | 752 | if (err) |
| 749 | goto out; | 753 | goto out; |
| @@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
| 865 | if (next->anon_vma) | 865 | if (next->anon_vma) |
| 866 | anon_vma_merge(vma, next); | 866 | anon_vma_merge(vma, next); |
| 867 | mm->map_count--; | 867 | mm->map_count--; |
| 868 | vma_set_policy(vma, vma_policy(next)); | 868 | mpol_put(vma_policy(next)); |
| 869 | kmem_cache_free(vm_area_cachep, next); | 869 | kmem_cache_free(vm_area_cachep, next); |
| 870 | /* | 870 | /* |
| 871 | * In mprotect's case 6 (see comments on vma_merge), | 871 | * In mprotect's case 6 (see comments on vma_merge), |
| @@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, | |||
| 2336 | struct mmu_gather tlb; | 2336 | struct mmu_gather tlb; |
| 2337 | 2337 | ||
| 2338 | lru_add_drain(); | 2338 | lru_add_drain(); |
| 2339 | tlb_gather_mmu(&tlb, mm, 0); | 2339 | tlb_gather_mmu(&tlb, mm, start, end); |
| 2340 | update_hiwater_rss(mm); | 2340 | update_hiwater_rss(mm); |
| 2341 | unmap_vmas(&tlb, vma, start, end); | 2341 | unmap_vmas(&tlb, vma, start, end); |
| 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
| @@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) | |||
| 2709 | 2709 | ||
| 2710 | lru_add_drain(); | 2710 | lru_add_drain(); |
| 2711 | flush_cache_mm(mm); | 2711 | flush_cache_mm(mm); |
| 2712 | tlb_gather_mmu(&tlb, mm, 1); | 2712 | tlb_gather_mmu(&tlb, mm, 0, -1); |
| 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
| 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
| 2715 | unmap_vmas(&tlb, vma, 0, -1); | 2715 | unmap_vmas(&tlb, vma, 0, -1); |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4514ad7415c3..3f0c895c71fe 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -1619,7 +1619,7 @@ void writeback_set_ratelimit(void) | |||
| 1619 | ratelimit_pages = 16; | 1619 | ratelimit_pages = 16; |
| 1620 | } | 1620 | } |
| 1621 | 1621 | ||
| 1622 | static int __cpuinit | 1622 | static int |
| 1623 | ratelimit_handler(struct notifier_block *self, unsigned long action, | 1623 | ratelimit_handler(struct notifier_block *self, unsigned long action, |
| 1624 | void *hcpu) | 1624 | void *hcpu) |
| 1625 | { | 1625 | { |
| @@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action, | |||
| 1634 | } | 1634 | } |
| 1635 | } | 1635 | } |
| 1636 | 1636 | ||
| 1637 | static struct notifier_block __cpuinitdata ratelimit_nb = { | 1637 | static struct notifier_block ratelimit_nb = { |
| 1638 | .notifier_call = ratelimit_handler, | 1638 | .notifier_call = ratelimit_handler, |
| 1639 | .next = NULL, | 1639 | .next = NULL, |
| 1640 | }; | 1640 | }; |
| @@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1236 | swp_entry_to_pte(make_hwpoison_entry(page))); | 1236 | swp_entry_to_pte(make_hwpoison_entry(page))); |
| 1237 | } else if (PageAnon(page)) { | 1237 | } else if (PageAnon(page)) { |
| 1238 | swp_entry_t entry = { .val = page_private(page) }; | 1238 | swp_entry_t entry = { .val = page_private(page) }; |
| 1239 | pte_t swp_pte; | ||
| 1239 | 1240 | ||
| 1240 | if (PageSwapCache(page)) { | 1241 | if (PageSwapCache(page)) { |
| 1241 | /* | 1242 | /* |
| @@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1264 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); | 1265 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); |
| 1265 | entry = make_migration_entry(page, pte_write(pteval)); | 1266 | entry = make_migration_entry(page, pte_write(pteval)); |
| 1266 | } | 1267 | } |
| 1267 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 1268 | swp_pte = swp_entry_to_pte(entry); |
| 1269 | if (pte_soft_dirty(pteval)) | ||
| 1270 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
| 1271 | set_pte_at(mm, address, pte, swp_pte); | ||
| 1268 | BUG_ON(pte_file(*pte)); | 1272 | BUG_ON(pte_file(*pte)); |
| 1269 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | 1273 | } else if (IS_ENABLED(CONFIG_MIGRATION) && |
| 1270 | (TTU_ACTION(flags) == TTU_MIGRATION)) { | 1274 | (TTU_ACTION(flags) == TTU_MIGRATION)) { |
| @@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
| 1401 | pteval = ptep_clear_flush(vma, address, pte); | 1405 | pteval = ptep_clear_flush(vma, address, pte); |
| 1402 | 1406 | ||
| 1403 | /* If nonlinear, store the file page offset in the pte. */ | 1407 | /* If nonlinear, store the file page offset in the pte. */ |
| 1404 | if (page->index != linear_page_index(vma, address)) | 1408 | if (page->index != linear_page_index(vma, address)) { |
| 1405 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 1409 | pte_t ptfile = pgoff_to_pte(page->index); |
| 1410 | if (pte_soft_dirty(pteval)) | ||
| 1411 | pte_file_mksoft_dirty(ptfile); | ||
| 1412 | set_pte_at(mm, address, pte, ptfile); | ||
| 1413 | } | ||
| 1406 | 1414 | ||
| 1407 | /* Move the dirty bit to the physical page now the pte is gone. */ | 1415 | /* Move the dirty bit to the physical page now the pte is gone. */ |
| 1408 | if (pte_dirty(pteval)) | 1416 | if (pte_dirty(pteval)) |
diff --git a/mm/shmem.c b/mm/shmem.c index a87990cf9f94..e43dc555069d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) | |||
| 1798 | } | 1798 | } |
| 1799 | } | 1799 | } |
| 1800 | 1800 | ||
| 1801 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); | 1801 | if (offset >= 0) |
| 1802 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); | ||
| 1802 | mutex_unlock(&inode->i_mutex); | 1803 | mutex_unlock(&inode->i_mutex); |
| 1803 | return offset; | 1804 | return offset; |
| 1804 | } | 1805 | } |
| @@ -2908,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); | |||
| 2908 | 2909 | ||
| 2909 | /* common code */ | 2910 | /* common code */ |
| 2910 | 2911 | ||
| 2911 | static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) | ||
| 2912 | { | ||
| 2913 | return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", | ||
| 2914 | dentry->d_name.name); | ||
| 2915 | } | ||
| 2916 | |||
| 2917 | static struct dentry_operations anon_ops = { | 2912 | static struct dentry_operations anon_ops = { |
| 2918 | .d_dname = shmem_dname | 2913 | .d_dname = simple_dname |
| 2919 | }; | 2914 | }; |
| 2920 | 2915 | ||
| 2921 | /** | 2916 | /** |
| @@ -787,7 +787,7 @@ static void next_reap_node(void) | |||
| 787 | * the CPUs getting into lockstep and contending for the global cache chain | 787 | * the CPUs getting into lockstep and contending for the global cache chain |
| 788 | * lock. | 788 | * lock. |
| 789 | */ | 789 | */ |
| 790 | static void __cpuinit start_cpu_timer(int cpu) | 790 | static void start_cpu_timer(int cpu) |
| 791 | { | 791 | { |
| 792 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); | 792 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); |
| 793 | 793 | ||
| @@ -1186,7 +1186,7 @@ static inline int slabs_tofree(struct kmem_cache *cachep, | |||
| 1186 | return (n->free_objects + cachep->num - 1) / cachep->num; | 1186 | return (n->free_objects + cachep->num - 1) / cachep->num; |
| 1187 | } | 1187 | } |
| 1188 | 1188 | ||
| 1189 | static void __cpuinit cpuup_canceled(long cpu) | 1189 | static void cpuup_canceled(long cpu) |
| 1190 | { | 1190 | { |
| 1191 | struct kmem_cache *cachep; | 1191 | struct kmem_cache *cachep; |
| 1192 | struct kmem_cache_node *n = NULL; | 1192 | struct kmem_cache_node *n = NULL; |
| @@ -1251,7 +1251,7 @@ free_array_cache: | |||
| 1251 | } | 1251 | } |
| 1252 | } | 1252 | } |
| 1253 | 1253 | ||
| 1254 | static int __cpuinit cpuup_prepare(long cpu) | 1254 | static int cpuup_prepare(long cpu) |
| 1255 | { | 1255 | { |
| 1256 | struct kmem_cache *cachep; | 1256 | struct kmem_cache *cachep; |
| 1257 | struct kmem_cache_node *n = NULL; | 1257 | struct kmem_cache_node *n = NULL; |
| @@ -1334,7 +1334,7 @@ bad: | |||
| 1334 | return -ENOMEM; | 1334 | return -ENOMEM; |
| 1335 | } | 1335 | } |
| 1336 | 1336 | ||
| 1337 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | 1337 | static int cpuup_callback(struct notifier_block *nfb, |
| 1338 | unsigned long action, void *hcpu) | 1338 | unsigned long action, void *hcpu) |
| 1339 | { | 1339 | { |
| 1340 | long cpu = (long)hcpu; | 1340 | long cpu = (long)hcpu; |
| @@ -1390,7 +1390,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
| 1390 | return notifier_from_errno(err); | 1390 | return notifier_from_errno(err); |
| 1391 | } | 1391 | } |
| 1392 | 1392 | ||
| 1393 | static struct notifier_block __cpuinitdata cpucache_notifier = { | 1393 | static struct notifier_block cpucache_notifier = { |
| 1394 | &cpuup_callback, NULL, 0 | 1394 | &cpuup_callback, NULL, 0 |
| 1395 | }; | 1395 | }; |
| 1396 | 1396 | ||
| @@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
| 1968 | int pages; | 1968 | int pages; |
| 1969 | int pobjects; | 1969 | int pobjects; |
| 1970 | 1970 | ||
| 1971 | if (!s->cpu_partial) | ||
| 1972 | return; | ||
| 1973 | |||
| 1974 | do { | 1971 | do { |
| 1975 | pages = 0; | 1972 | pages = 0; |
| 1976 | pobjects = 0; | 1973 | pobjects = 0; |
| @@ -3773,7 +3770,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) | |||
| 3773 | * Use the cpu notifier to insure that the cpu slabs are flushed when | 3770 | * Use the cpu notifier to insure that the cpu slabs are flushed when |
| 3774 | * necessary. | 3771 | * necessary. |
| 3775 | */ | 3772 | */ |
| 3776 | static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, | 3773 | static int slab_cpuup_callback(struct notifier_block *nfb, |
| 3777 | unsigned long action, void *hcpu) | 3774 | unsigned long action, void *hcpu) |
| 3778 | { | 3775 | { |
| 3779 | long cpu = (long)hcpu; | 3776 | long cpu = (long)hcpu; |
| @@ -3799,7 +3796,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, | |||
| 3799 | return NOTIFY_OK; | 3796 | return NOTIFY_OK; |
| 3800 | } | 3797 | } |
| 3801 | 3798 | ||
| 3802 | static struct notifier_block __cpuinitdata slab_notifier = { | 3799 | static struct notifier_block slab_notifier = { |
| 3803 | .notifier_call = slab_cpuup_callback | 3800 | .notifier_call = slab_cpuup_callback |
| 3804 | }; | 3801 | }; |
| 3805 | 3802 | ||
| @@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add); | |||
| 512 | */ | 512 | */ |
| 513 | void lru_cache_add(struct page *page) | 513 | void lru_cache_add(struct page *page) |
| 514 | { | 514 | { |
| 515 | if (PageActive(page)) { | 515 | VM_BUG_ON(PageActive(page) && PageUnevictable(page)); |
| 516 | VM_BUG_ON(PageUnevictable(page)); | ||
| 517 | } else if (PageUnevictable(page)) { | ||
| 518 | VM_BUG_ON(PageActive(page)); | ||
| 519 | } | ||
| 520 | |||
| 521 | VM_BUG_ON(PageLRU(page)); | 516 | VM_BUG_ON(PageLRU(page)); |
| 522 | __lru_cache_add(page); | 517 | __lru_cache_add(page); |
| 523 | } | 518 | } |
| @@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page) | |||
| 539 | 534 | ||
| 540 | spin_lock_irq(&zone->lru_lock); | 535 | spin_lock_irq(&zone->lru_lock); |
| 541 | lruvec = mem_cgroup_page_lruvec(page, zone); | 536 | lruvec = mem_cgroup_page_lruvec(page, zone); |
| 537 | ClearPageActive(page); | ||
| 542 | SetPageUnevictable(page); | 538 | SetPageUnevictable(page); |
| 543 | SetPageLRU(page); | 539 | SetPageLRU(page); |
| 544 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); | 540 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); |
| @@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release); | |||
| 774 | void lru_add_page_tail(struct page *page, struct page *page_tail, | 770 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
| 775 | struct lruvec *lruvec, struct list_head *list) | 771 | struct lruvec *lruvec, struct list_head *list) |
| 776 | { | 772 | { |
| 777 | int uninitialized_var(active); | ||
| 778 | enum lru_list lru; | ||
| 779 | const int file = 0; | 773 | const int file = 0; |
| 780 | 774 | ||
| 781 | VM_BUG_ON(!PageHead(page)); | 775 | VM_BUG_ON(!PageHead(page)); |
| @@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
| 787 | if (!list) | 781 | if (!list) |
| 788 | SetPageLRU(page_tail); | 782 | SetPageLRU(page_tail); |
| 789 | 783 | ||
| 790 | if (page_evictable(page_tail)) { | ||
| 791 | if (PageActive(page)) { | ||
| 792 | SetPageActive(page_tail); | ||
| 793 | active = 1; | ||
| 794 | lru = LRU_ACTIVE_ANON; | ||
| 795 | } else { | ||
| 796 | active = 0; | ||
| 797 | lru = LRU_INACTIVE_ANON; | ||
| 798 | } | ||
| 799 | } else { | ||
| 800 | SetPageUnevictable(page_tail); | ||
| 801 | lru = LRU_UNEVICTABLE; | ||
| 802 | } | ||
| 803 | |||
| 804 | if (likely(PageLRU(page))) | 784 | if (likely(PageLRU(page))) |
| 805 | list_add_tail(&page_tail->lru, &page->lru); | 785 | list_add_tail(&page_tail->lru, &page->lru); |
| 806 | else if (list) { | 786 | else if (list) { |
| @@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
| 816 | * Use the standard add function to put page_tail on the list, | 796 | * Use the standard add function to put page_tail on the list, |
| 817 | * but then correct its position so they all end up in order. | 797 | * but then correct its position so they all end up in order. |
| 818 | */ | 798 | */ |
| 819 | add_page_to_lru_list(page_tail, lruvec, lru); | 799 | add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); |
| 820 | list_head = page_tail->lru.prev; | 800 | list_head = page_tail->lru.prev; |
| 821 | list_move_tail(&page_tail->lru, list_head); | 801 | list_move_tail(&page_tail->lru, list_head); |
| 822 | } | 802 | } |
| 823 | 803 | ||
| 824 | if (!PageUnevictable(page)) | 804 | if (!PageUnevictable(page)) |
| 825 | update_page_reclaim_stat(lruvec, file, active); | 805 | update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); |
| 826 | } | 806 | } |
| 827 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 807 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 828 | 808 | ||
| @@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | |||
| 833 | int active = PageActive(page); | 813 | int active = PageActive(page); |
| 834 | enum lru_list lru = page_lru(page); | 814 | enum lru_list lru = page_lru(page); |
| 835 | 815 | ||
| 836 | VM_BUG_ON(PageUnevictable(page)); | ||
| 837 | VM_BUG_ON(PageLRU(page)); | 816 | VM_BUG_ON(PageLRU(page)); |
| 838 | 817 | ||
| 839 | SetPageLRU(page); | 818 | SetPageLRU(page); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) | |||
| 866 | } | 866 | } |
| 867 | #endif /* CONFIG_HIBERNATION */ | 867 | #endif /* CONFIG_HIBERNATION */ |
| 868 | 868 | ||
| 869 | static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) | ||
| 870 | { | ||
| 871 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
| 872 | /* | ||
| 873 | * When pte keeps soft dirty bit the pte generated | ||
| 874 | * from swap entry does not has it, still it's same | ||
| 875 | * pte from logical point of view. | ||
| 876 | */ | ||
| 877 | pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); | ||
| 878 | return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); | ||
| 879 | #else | ||
| 880 | return pte_same(pte, swp_pte); | ||
| 881 | #endif | ||
| 882 | } | ||
| 883 | |||
| 869 | /* | 884 | /* |
| 870 | * No need to decide whether this PTE shares the swap entry with others, | 885 | * No need to decide whether this PTE shares the swap entry with others, |
| 871 | * just let do_wp_page work it out if a write is requested later - to | 886 | * just let do_wp_page work it out if a write is requested later - to |
| @@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 892 | } | 907 | } |
| 893 | 908 | ||
| 894 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 909 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
| 895 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 910 | if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { |
| 896 | mem_cgroup_cancel_charge_swapin(memcg); | 911 | mem_cgroup_cancel_charge_swapin(memcg); |
| 897 | ret = 0; | 912 | ret = 0; |
| 898 | goto out; | 913 | goto out; |
| @@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 947 | * swapoff spends a _lot_ of time in this loop! | 962 | * swapoff spends a _lot_ of time in this loop! |
| 948 | * Test inline before going to call unuse_pte. | 963 | * Test inline before going to call unuse_pte. |
| 949 | */ | 964 | */ |
| 950 | if (unlikely(pte_same(*pte, swp_pte))) { | 965 | if (unlikely(maybe_same_pte(*pte, swp_pte))) { |
| 951 | pte_unmap(pte); | 966 | pte_unmap(pte); |
| 952 | ret = unuse_pte(vma, pmd, addr, entry, page); | 967 | ret = unuse_pte(vma, pmd, addr, entry, page); |
| 953 | if (ret) | 968 | if (ret) |
diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 736a6011c2c8..0c1e37d829fa 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c | |||
| @@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work) | |||
| 180 | if (!vmpr->scanned) | 180 | if (!vmpr->scanned) |
| 181 | return; | 181 | return; |
| 182 | 182 | ||
| 183 | mutex_lock(&vmpr->sr_lock); | 183 | spin_lock(&vmpr->sr_lock); |
| 184 | scanned = vmpr->scanned; | 184 | scanned = vmpr->scanned; |
| 185 | reclaimed = vmpr->reclaimed; | 185 | reclaimed = vmpr->reclaimed; |
| 186 | vmpr->scanned = 0; | 186 | vmpr->scanned = 0; |
| 187 | vmpr->reclaimed = 0; | 187 | vmpr->reclaimed = 0; |
| 188 | mutex_unlock(&vmpr->sr_lock); | 188 | spin_unlock(&vmpr->sr_lock); |
| 189 | 189 | ||
| 190 | do { | 190 | do { |
| 191 | if (vmpressure_event(vmpr, scanned, reclaimed)) | 191 | if (vmpressure_event(vmpr, scanned, reclaimed)) |
| @@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, | |||
| 240 | if (!scanned) | 240 | if (!scanned) |
| 241 | return; | 241 | return; |
| 242 | 242 | ||
| 243 | mutex_lock(&vmpr->sr_lock); | 243 | spin_lock(&vmpr->sr_lock); |
| 244 | vmpr->scanned += scanned; | 244 | vmpr->scanned += scanned; |
| 245 | vmpr->reclaimed += reclaimed; | 245 | vmpr->reclaimed += reclaimed; |
| 246 | scanned = vmpr->scanned; | 246 | scanned = vmpr->scanned; |
| 247 | mutex_unlock(&vmpr->sr_lock); | 247 | spin_unlock(&vmpr->sr_lock); |
| 248 | 248 | ||
| 249 | if (scanned < vmpressure_win || work_pending(&vmpr->work)) | 249 | if (scanned < vmpressure_win) |
| 250 | return; | 250 | return; |
| 251 | schedule_work(&vmpr->work); | 251 | schedule_work(&vmpr->work); |
| 252 | } | 252 | } |
| @@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft, | |||
| 367 | */ | 367 | */ |
| 368 | void vmpressure_init(struct vmpressure *vmpr) | 368 | void vmpressure_init(struct vmpressure *vmpr) |
| 369 | { | 369 | { |
| 370 | mutex_init(&vmpr->sr_lock); | 370 | spin_lock_init(&vmpr->sr_lock); |
| 371 | mutex_init(&vmpr->events_lock); | 371 | mutex_init(&vmpr->events_lock); |
| 372 | INIT_LIST_HEAD(&vmpr->events); | 372 | INIT_LIST_HEAD(&vmpr->events); |
| 373 | INIT_WORK(&vmpr->work, vmpressure_work_fn); | 373 | INIT_WORK(&vmpr->work, vmpressure_work_fn); |
| 374 | } | 374 | } |
| 375 | |||
| 376 | /** | ||
| 377 | * vmpressure_cleanup() - shuts down vmpressure control structure | ||
| 378 | * @vmpr: Structure to be cleaned up | ||
| 379 | * | ||
| 380 | * This function should be called before the structure in which it is | ||
| 381 | * embedded is cleaned up. | ||
| 382 | */ | ||
| 383 | void vmpressure_cleanup(struct vmpressure *vmpr) | ||
| 384 | { | ||
| 385 | /* | ||
| 386 | * Make sure there is no pending work before eventfd infrastructure | ||
| 387 | * goes away. | ||
| 388 | */ | ||
| 389 | flush_work(&vmpr->work); | ||
| 390 | } | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index f42745e65780..20c2ef4458fa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -1182,7 +1182,7 @@ static void vmstat_update(struct work_struct *w) | |||
| 1182 | round_jiffies_relative(sysctl_stat_interval)); | 1182 | round_jiffies_relative(sysctl_stat_interval)); |
| 1183 | } | 1183 | } |
| 1184 | 1184 | ||
| 1185 | static void __cpuinit start_cpu_timer(int cpu) | 1185 | static void start_cpu_timer(int cpu) |
| 1186 | { | 1186 | { |
| 1187 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); | 1187 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); |
| 1188 | 1188 | ||
| @@ -1194,7 +1194,7 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
| 1194 | * Use the cpu notifier to insure that the thresholds are recalculated | 1194 | * Use the cpu notifier to insure that the thresholds are recalculated |
| 1195 | * when necessary. | 1195 | * when necessary. |
| 1196 | */ | 1196 | */ |
| 1197 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | 1197 | static int vmstat_cpuup_callback(struct notifier_block *nfb, |
| 1198 | unsigned long action, | 1198 | unsigned long action, |
| 1199 | void *hcpu) | 1199 | void *hcpu) |
| 1200 | { | 1200 | { |
| @@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |||
| 1226 | return NOTIFY_OK; | 1226 | return NOTIFY_OK; |
| 1227 | } | 1227 | } |
| 1228 | 1228 | ||
| 1229 | static struct notifier_block __cpuinitdata vmstat_notifier = | 1229 | static struct notifier_block vmstat_notifier = |
| 1230 | { &vmstat_cpuup_callback, NULL, 0 }; | 1230 | { &vmstat_cpuup_callback, NULL, 0 }; |
| 1231 | #endif | 1231 | #endif |
| 1232 | 1232 | ||
| @@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, | |||
| 257 | 257 | ||
| 258 | if (size <= 0 || gfp & __GFP_HIGHMEM) | 258 | if (size <= 0 || gfp & __GFP_HIGHMEM) |
| 259 | return -EINVAL; | 259 | return -EINVAL; |
| 260 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) | 260 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) |
| 261 | return -ENOSPC; | 261 | return -ENOSPC; |
| 262 | chunks = size_to_chunks(size); | 262 | chunks = size_to_chunks(size); |
| 263 | spin_lock(&pool->lock); | 263 | spin_lock(&pool->lock); |
