diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 9 | ||||
-rw-r--r-- | mm/mmap.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 7 | ||||
-rw-r--r-- | mm/vmalloc.c | 14 |
4 files changed, 29 insertions, 5 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index 51e1c1995fec..be8f8d36a8b9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -99,6 +99,15 @@ static void flush_all_zero_pkmaps(void) | |||
99 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | 99 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
100 | } | 100 | } |
101 | 101 | ||
102 | /* Flush all unused kmap mappings in order to remove stray | ||
103 | mappings. */ | ||
104 | void kmap_flush_unused(void) | ||
105 | { | ||
106 | spin_lock(&kmap_lock); | ||
107 | flush_all_zero_pkmaps(); | ||
108 | spin_unlock(&kmap_lock); | ||
109 | } | ||
110 | |||
102 | static inline unsigned long map_new_virtual(struct page *page) | 111 | static inline unsigned long map_new_virtual(struct page *page) |
103 | { | 112 | { |
104 | unsigned long vaddr; | 113 | unsigned long vaddr; |
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/tlb.h> | 31 | #include <asm/tlb.h> |
32 | #include <asm/mmu_context.h> | ||
32 | 33 | ||
33 | #ifndef arch_mmap_check | 34 | #ifndef arch_mmap_check |
34 | #define arch_mmap_check(addr, len, flags) (0) | 35 | #define arch_mmap_check(addr, len, flags) (0) |
@@ -1979,6 +1980,9 @@ void exit_mmap(struct mm_struct *mm) | |||
1979 | unsigned long nr_accounted = 0; | 1980 | unsigned long nr_accounted = 0; |
1980 | unsigned long end; | 1981 | unsigned long end; |
1981 | 1982 | ||
1983 | /* mm's last user has gone, and its about to be pulled down */ | ||
1984 | arch_exit_mmap(mm); | ||
1985 | |||
1982 | lru_add_drain(); | 1986 | lru_add_drain(); |
1983 | flush_cache_mm(mm); | 1987 | flush_cache_mm(mm); |
1984 | tlb = tlb_gather_mmu(mm, 1); | 1988 | tlb = tlb_gather_mmu(mm, 1); |
@@ -1146,7 +1146,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1146 | * Make sure we are not freeing a object from another node to the array | 1146 | * Make sure we are not freeing a object from another node to the array |
1147 | * cache on this cpu. | 1147 | * cache on this cpu. |
1148 | */ | 1148 | */ |
1149 | if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) | 1149 | if (likely(slabp->nodeid == node)) |
1150 | return 0; | 1150 | return 0; |
1151 | 1151 | ||
1152 | l3 = cachep->nodelists[node]; | 1152 | l3 = cachep->nodelists[node]; |
@@ -1394,6 +1394,9 @@ void __init kmem_cache_init(void) | |||
1394 | int order; | 1394 | int order; |
1395 | int node; | 1395 | int node; |
1396 | 1396 | ||
1397 | if (num_possible_nodes() == 1) | ||
1398 | use_alien_caches = 0; | ||
1399 | |||
1397 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1400 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
1398 | kmem_list3_init(&initkmem_list3[i]); | 1401 | kmem_list3_init(&initkmem_list3[i]); |
1399 | if (i < MAX_NUMNODES) | 1402 | if (i < MAX_NUMNODES) |
@@ -3563,7 +3566,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3563 | check_irq_off(); | 3566 | check_irq_off(); |
3564 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3567 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3565 | 3568 | ||
3566 | if (cache_free_alien(cachep, objp)) | 3569 | if (use_alien_caches && cache_free_alien(cachep, objp)) |
3567 | return; | 3570 | return; |
3568 | 3571 | ||
3569 | if (likely(ac->avail < ac->limit)) { | 3572 | if (likely(ac->avail < ac->limit)) { |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9eef486da909..cb5aabda7046 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -431,7 +431,7 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
431 | area->flags |= VM_VPAGES; | 431 | area->flags |= VM_VPAGES; |
432 | } else { | 432 | } else { |
433 | pages = kmalloc_node(array_size, | 433 | pages = kmalloc_node(array_size, |
434 | (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)), | 434 | (gfp_mask & GFP_LEVEL_MASK), |
435 | node); | 435 | node); |
436 | } | 436 | } |
437 | area->pages = pages; | 437 | area->pages = pages; |
@@ -577,6 +577,14 @@ void *vmalloc_exec(unsigned long size) | |||
577 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 577 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); |
578 | } | 578 | } |
579 | 579 | ||
580 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) | ||
581 | #define GFP_VMALLOC32 GFP_DMA32 | ||
582 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) | ||
583 | #define GFP_VMALLOC32 GFP_DMA | ||
584 | #else | ||
585 | #define GFP_VMALLOC32 GFP_KERNEL | ||
586 | #endif | ||
587 | |||
580 | /** | 588 | /** |
581 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | 589 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
582 | * @size: allocation size | 590 | * @size: allocation size |
@@ -586,7 +594,7 @@ void *vmalloc_exec(unsigned long size) | |||
586 | */ | 594 | */ |
587 | void *vmalloc_32(unsigned long size) | 595 | void *vmalloc_32(unsigned long size) |
588 | { | 596 | { |
589 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | 597 | return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); |
590 | } | 598 | } |
591 | EXPORT_SYMBOL(vmalloc_32); | 599 | EXPORT_SYMBOL(vmalloc_32); |
592 | 600 | ||
@@ -602,7 +610,7 @@ void *vmalloc_32_user(unsigned long size) | |||
602 | struct vm_struct *area; | 610 | struct vm_struct *area; |
603 | void *ret; | 611 | void *ret; |
604 | 612 | ||
605 | ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 613 | ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); |
606 | if (ret) { | 614 | if (ret) { |
607 | write_lock(&vmlist_lock); | 615 | write_lock(&vmlist_lock); |
608 | area = __find_vm_area(ret); | 616 | area = __find_vm_area(ret); |