diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/allocpercpu.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 6 | ||||
| -rw-r--r-- | mm/page_alloc.c | 5 | ||||
| -rw-r--r-- | mm/pdflush.c | 2 | ||||
| -rw-r--r-- | mm/slab.c | 4 | ||||
| -rw-r--r-- | mm/slob.c | 2 | ||||
| -rw-r--r-- | mm/slub.c | 1 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 | ||||
| -rw-r--r-- | mm/vmstat.c | 2 |
9 files changed, 21 insertions, 5 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 1882923bc706..139d5b7b6621 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
| @@ -143,7 +143,7 @@ void free_percpu(void *__pdata) | |||
| 143 | { | 143 | { |
| 144 | if (unlikely(!__pdata)) | 144 | if (unlikely(!__pdata)) |
| 145 | return; | 145 | return; |
| 146 | __percpu_depopulate_mask(__pdata, &cpu_possible_map); | 146 | __percpu_depopulate_mask(__pdata, cpu_possible_mask); |
| 147 | kfree(__percpu_disguise(__pdata)); | 147 | kfree(__percpu_disguise(__pdata)); |
| 148 | } | 148 | } |
| 149 | EXPORT_SYMBOL_GPL(free_percpu); | 149 | EXPORT_SYMBOL_GPL(free_percpu); |
diff --git a/mm/memory.c b/mm/memory.c index baa999e87cd2..2032ad2fc34b 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1665,9 +1665,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1665 | * behaviour that some programs depend on. We mark the "original" | 1665 | * behaviour that some programs depend on. We mark the "original" |
| 1666 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". | 1666 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
| 1667 | */ | 1667 | */ |
| 1668 | if (addr == vma->vm_start && end == vma->vm_end) | 1668 | if (addr == vma->vm_start && end == vma->vm_end) { |
| 1669 | vma->vm_pgoff = pfn; | 1669 | vma->vm_pgoff = pfn; |
| 1670 | else if (is_cow_mapping(vma->vm_flags)) | 1670 | vma->vm_flags |= VM_PFN_AT_MMAP; |
| 1671 | } else if (is_cow_mapping(vma->vm_flags)) | ||
| 1671 | return -EINVAL; | 1672 | return -EINVAL; |
| 1672 | 1673 | ||
| 1673 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 1674 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
| @@ -1679,6 +1680,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1679 | * needed from higher level routine calling unmap_vmas | 1680 | * needed from higher level routine calling unmap_vmas |
| 1680 | */ | 1681 | */ |
| 1681 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); | 1682 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); |
| 1683 | vma->vm_flags &= ~VM_PFN_AT_MMAP; | ||
| 1682 | return -EINVAL; | 1684 | return -EINVAL; |
| 1683 | } | 1685 | } |
| 1684 | 1686 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5c44ed49ca93..a3803ea8c27d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |||
| 1479 | unsigned long did_some_progress; | 1479 | unsigned long did_some_progress; |
| 1480 | unsigned long pages_reclaimed = 0; | 1480 | unsigned long pages_reclaimed = 0; |
| 1481 | 1481 | ||
| 1482 | lockdep_trace_alloc(gfp_mask); | ||
| 1483 | |||
| 1482 | might_sleep_if(wait); | 1484 | might_sleep_if(wait); |
| 1483 | 1485 | ||
| 1484 | if (should_fail_alloc_page(gfp_mask, order)) | 1486 | if (should_fail_alloc_page(gfp_mask, order)) |
| @@ -1578,12 +1580,15 @@ nofail_alloc: | |||
| 1578 | */ | 1580 | */ |
| 1579 | cpuset_update_task_memory_state(); | 1581 | cpuset_update_task_memory_state(); |
| 1580 | p->flags |= PF_MEMALLOC; | 1582 | p->flags |= PF_MEMALLOC; |
| 1583 | |||
| 1584 | lockdep_set_current_reclaim_state(gfp_mask); | ||
| 1581 | reclaim_state.reclaimed_slab = 0; | 1585 | reclaim_state.reclaimed_slab = 0; |
| 1582 | p->reclaim_state = &reclaim_state; | 1586 | p->reclaim_state = &reclaim_state; |
| 1583 | 1587 | ||
| 1584 | did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); | 1588 | did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); |
| 1585 | 1589 | ||
| 1586 | p->reclaim_state = NULL; | 1590 | p->reclaim_state = NULL; |
| 1591 | lockdep_clear_current_reclaim_state(); | ||
| 1587 | p->flags &= ~PF_MEMALLOC; | 1592 | p->flags &= ~PF_MEMALLOC; |
| 1588 | 1593 | ||
| 1589 | cond_resched(); | 1594 | cond_resched(); |
diff --git a/mm/pdflush.c b/mm/pdflush.c index 15de509b68fd..118905e3d788 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c | |||
| @@ -191,7 +191,7 @@ static int pdflush(void *dummy) | |||
| 191 | 191 | ||
| 192 | /* | 192 | /* |
| 193 | * Some configs put our parent kthread in a limited cpuset, | 193 | * Some configs put our parent kthread in a limited cpuset, |
| 194 | * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL. | 194 | * which kthread() overrides, forcing cpus_allowed == cpu_all_mask. |
| 195 | * Our needs are more modest - cut back to our cpusets cpus_allowed. | 195 | * Our needs are more modest - cut back to our cpusets cpus_allowed. |
| 196 | * This is needed as pdflush's are dynamically created and destroyed. | 196 | * This is needed as pdflush's are dynamically created and destroyed. |
| 197 | * The boottime pdflush's are easily placed w/o these 2 lines. | 197 | * The boottime pdflush's are easily placed w/o these 2 lines. |
| @@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
| 3318 | unsigned long save_flags; | 3318 | unsigned long save_flags; |
| 3319 | void *ptr; | 3319 | void *ptr; |
| 3320 | 3320 | ||
| 3321 | lockdep_trace_alloc(flags); | ||
| 3322 | |||
| 3321 | if (slab_should_failslab(cachep, flags)) | 3323 | if (slab_should_failslab(cachep, flags)) |
| 3322 | return NULL; | 3324 | return NULL; |
| 3323 | 3325 | ||
| @@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
| 3394 | unsigned long save_flags; | 3396 | unsigned long save_flags; |
| 3395 | void *objp; | 3397 | void *objp; |
| 3396 | 3398 | ||
| 3399 | lockdep_trace_alloc(flags); | ||
| 3400 | |||
| 3397 | if (slab_should_failslab(cachep, flags)) | 3401 | if (slab_should_failslab(cachep, flags)) |
| 3398 | return NULL; | 3402 | return NULL; |
| 3399 | 3403 | ||
| @@ -475,6 +475,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 475 | unsigned int *m; | 475 | unsigned int *m; |
| 476 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 476 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 477 | 477 | ||
| 478 | lockdep_trace_alloc(gfp); | ||
| 479 | |||
| 478 | if (size < PAGE_SIZE - align) { | 480 | if (size < PAGE_SIZE - align) { |
| 479 | if (!size) | 481 | if (!size) |
| 480 | return ZERO_SIZE_PTR; | 482 | return ZERO_SIZE_PTR; |
| @@ -1590,6 +1590,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1590 | unsigned long flags; | 1590 | unsigned long flags; |
| 1591 | unsigned int objsize; | 1591 | unsigned int objsize; |
| 1592 | 1592 | ||
| 1593 | lockdep_trace_alloc(gfpflags); | ||
| 1593 | might_sleep_if(gfpflags & __GFP_WAIT); | 1594 | might_sleep_if(gfpflags & __GFP_WAIT); |
| 1594 | 1595 | ||
| 1595 | if (should_failslab(s->objsize, gfpflags)) | 1596 | if (should_failslab(s->objsize, gfpflags)) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 56ddf41149eb..479e46719394 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1965,6 +1965,8 @@ static int kswapd(void *p) | |||
| 1965 | }; | 1965 | }; |
| 1966 | node_to_cpumask_ptr(cpumask, pgdat->node_id); | 1966 | node_to_cpumask_ptr(cpumask, pgdat->node_id); |
| 1967 | 1967 | ||
| 1968 | lockdep_set_current_reclaim_state(GFP_KERNEL); | ||
| 1969 | |||
| 1968 | if (!cpumask_empty(cpumask)) | 1970 | if (!cpumask_empty(cpumask)) |
| 1969 | set_cpus_allowed_ptr(tsk, cpumask); | 1971 | set_cpus_allowed_ptr(tsk, cpumask); |
| 1970 | current->reclaim_state = &reclaim_state; | 1972 | current->reclaim_state = &reclaim_state; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 91149746bb8d..8cd81ea1ddc1 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) | |||
| 27 | 27 | ||
| 28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
| 29 | 29 | ||
| 30 | for_each_cpu_mask_nr(cpu, *cpumask) { | 30 | for_each_cpu(cpu, cpumask) { |
| 31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
| 32 | 32 | ||
| 33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
