diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 21:54:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 21:54:50 -0400 |
commit | dfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370 (patch) | |
tree | 9ed639a08c16322cdf136d576f42df5b97cd1549 /mm | |
parent | a45d572841a24db02a62cf05e1157c35fdd3705b (diff) | |
parent | 64e455079e1bd7787cc47be30b7f601ce682a5f6 (diff) |
Merge branch 'akpm' (patches from Andrew Morton)
Merge second patch-bomb from Andrew Morton:
- a few hotfixes
- drivers/dma updates
- MAINTAINERS updates
- Quite a lot of lib/ updates
- checkpatch updates
- binfmt updates
- autofs4
- drivers/rtc/
- various small tweaks to less used filesystems
- ipc/ updates
- kernel/watchdog.c changes
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (135 commits)
mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared
kernel/param: consolidate __{start,stop}___param[] in <linux/moduleparam.h>
ia64: remove duplicate declarations of __per_cpu_start[] and __per_cpu_end[]
frv: remove unused declarations of __start___ex_table and __stop___ex_table
kvm: ensure hard lockup detection is disabled by default
kernel/watchdog.c: control hard lockup detection default
staging: rtl8192u: use %*pEn to escape buffer
staging: rtl8192e: use %*pEn to escape buffer
staging: wlan-ng: use %*pEhp to print SN
lib80211: remove unused print_ssid()
wireless: hostap: proc: print properly escaped SSID
wireless: ipw2x00: print SSID via %*pE
wireless: libertas: print esaped string via %*pE
lib/vsprintf: add %*pE[achnops] format specifier
lib / string_helpers: introduce string_escape_mem()
lib / string_helpers: refactoring the test suite
lib / string_helpers: move documentation to c-file
include/linux: remove strict_strto* definitions
arch/x86/mm/numa.c: fix boot failure when all nodes are hotpluggable
fs: check bh blocknr earlier when searching lru
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cma.c | 66 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/mmap.c | 45 | ||||
-rw-r--r-- | mm/mprotect.c | 20 | ||||
-rw-r--r-- | mm/slab.c | 2 |
5 files changed, 90 insertions, 46 deletions
@@ -58,7 +58,9 @@ unsigned long cma_get_size(struct cma *cma) | |||
58 | 58 | ||
59 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | 59 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) |
60 | { | 60 | { |
61 | return (1UL << (align_order >> cma->order_per_bit)) - 1; | 61 | if (align_order <= cma->order_per_bit) |
62 | return 0; | ||
63 | return (1UL << (align_order - cma->order_per_bit)) - 1; | ||
62 | } | 64 | } |
63 | 65 | ||
64 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 66 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
@@ -141,6 +143,54 @@ static int __init cma_init_reserved_areas(void) | |||
141 | core_initcall(cma_init_reserved_areas); | 143 | core_initcall(cma_init_reserved_areas); |
142 | 144 | ||
143 | /** | 145 | /** |
146 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | ||
147 | * @base: Base address of the reserved area | ||
148 | * @size: Size of the reserved area (in bytes), | ||
149 | * @order_per_bit: Order of pages represented by one bit on bitmap. | ||
150 | * @res_cma: Pointer to store the created cma region. | ||
151 | * | ||
152 | * This function creates custom contiguous area from already reserved memory. | ||
153 | */ | ||
154 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | ||
155 | int order_per_bit, struct cma **res_cma) | ||
156 | { | ||
157 | struct cma *cma; | ||
158 | phys_addr_t alignment; | ||
159 | |||
160 | /* Sanity checks */ | ||
161 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | ||
162 | pr_err("Not enough slots for CMA reserved regions!\n"); | ||
163 | return -ENOSPC; | ||
164 | } | ||
165 | |||
166 | if (!size || !memblock_is_region_reserved(base, size)) | ||
167 | return -EINVAL; | ||
168 | |||
169 | /* ensure minimal alignment requied by mm core */ | ||
170 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); | ||
171 | |||
172 | /* alignment should be aligned with order_per_bit */ | ||
173 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) | ||
174 | return -EINVAL; | ||
175 | |||
176 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* | ||
180 | * Each reserved area must be initialised later, when more kernel | ||
181 | * subsystems (like slab allocator) are available. | ||
182 | */ | ||
183 | cma = &cma_areas[cma_area_count]; | ||
184 | cma->base_pfn = PFN_DOWN(base); | ||
185 | cma->count = size >> PAGE_SHIFT; | ||
186 | cma->order_per_bit = order_per_bit; | ||
187 | *res_cma = cma; | ||
188 | cma_area_count++; | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /** | ||
144 | * cma_declare_contiguous() - reserve custom contiguous area | 194 | * cma_declare_contiguous() - reserve custom contiguous area |
145 | * @base: Base address of the reserved area optional, use 0 for any | 195 | * @base: Base address of the reserved area optional, use 0 for any |
146 | * @size: Size of the reserved area (in bytes), | 196 | * @size: Size of the reserved area (in bytes), |
@@ -163,7 +213,6 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
163 | phys_addr_t alignment, unsigned int order_per_bit, | 213 | phys_addr_t alignment, unsigned int order_per_bit, |
164 | bool fixed, struct cma **res_cma) | 214 | bool fixed, struct cma **res_cma) |
165 | { | 215 | { |
166 | struct cma *cma; | ||
167 | phys_addr_t memblock_end = memblock_end_of_DRAM(); | 216 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
168 | phys_addr_t highmem_start = __pa(high_memory); | 217 | phys_addr_t highmem_start = __pa(high_memory); |
169 | int ret = 0; | 218 | int ret = 0; |
@@ -235,16 +284,9 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
235 | } | 284 | } |
236 | } | 285 | } |
237 | 286 | ||
238 | /* | 287 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
239 | * Each reserved area must be initialised later, when more kernel | 288 | if (ret) |
240 | * subsystems (like slab allocator) are available. | 289 | goto err; |
241 | */ | ||
242 | cma = &cma_areas[cma_area_count]; | ||
243 | cma->base_pfn = PFN_DOWN(base); | ||
244 | cma->count = size >> PAGE_SHIFT; | ||
245 | cma->order_per_bit = order_per_bit; | ||
246 | *res_cma = cma; | ||
247 | cma_area_count++; | ||
248 | 290 | ||
249 | pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, | 291 | pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, |
250 | (unsigned long)base); | 292 | (unsigned long)base); |
diff --git a/mm/memory.c b/mm/memory.c index e229970e4223..1cc6bfbd872e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2053,7 +2053,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2053 | old_page = vm_normal_page(vma, address, orig_pte); | 2053 | old_page = vm_normal_page(vma, address, orig_pte); |
2054 | if (!old_page) { | 2054 | if (!old_page) { |
2055 | /* | 2055 | /* |
2056 | * VM_MIXEDMAP !pfn_valid() case | 2056 | * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a |
2057 | * VM_PFNMAP VMA. | ||
2057 | * | 2058 | * |
2058 | * We should not cow pages in a shared writeable mapping. | 2059 | * We should not cow pages in a shared writeable mapping. |
2059 | * Just mark the pages writable as we can't do any dirty | 2060 | * Just mark the pages writable as we can't do any dirty |
@@ -89,6 +89,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) | |||
89 | } | 89 | } |
90 | EXPORT_SYMBOL(vm_get_page_prot); | 90 | EXPORT_SYMBOL(vm_get_page_prot); |
91 | 91 | ||
92 | static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) | ||
93 | { | ||
94 | return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); | ||
95 | } | ||
96 | |||
97 | /* Update vma->vm_page_prot to reflect vma->vm_flags. */ | ||
98 | void vma_set_page_prot(struct vm_area_struct *vma) | ||
99 | { | ||
100 | unsigned long vm_flags = vma->vm_flags; | ||
101 | |||
102 | vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); | ||
103 | if (vma_wants_writenotify(vma)) { | ||
104 | vm_flags &= ~VM_SHARED; | ||
105 | vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, | ||
106 | vm_flags); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | |||
92 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 111 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
93 | int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ | 112 | int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ |
94 | unsigned long sysctl_overcommit_kbytes __read_mostly; | 113 | unsigned long sysctl_overcommit_kbytes __read_mostly; |
@@ -1475,11 +1494,16 @@ int vma_wants_writenotify(struct vm_area_struct *vma) | |||
1475 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) | 1494 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) |
1476 | return 1; | 1495 | return 1; |
1477 | 1496 | ||
1478 | /* The open routine did something to the protections already? */ | 1497 | /* The open routine did something to the protections that pgprot_modify |
1498 | * won't preserve? */ | ||
1479 | if (pgprot_val(vma->vm_page_prot) != | 1499 | if (pgprot_val(vma->vm_page_prot) != |
1480 | pgprot_val(vm_get_page_prot(vm_flags))) | 1500 | pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) |
1481 | return 0; | 1501 | return 0; |
1482 | 1502 | ||
1503 | /* Do we need to track softdirty? */ | ||
1504 | if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) | ||
1505 | return 1; | ||
1506 | |||
1483 | /* Specialty mapping? */ | 1507 | /* Specialty mapping? */ |
1484 | if (vm_flags & VM_PFNMAP) | 1508 | if (vm_flags & VM_PFNMAP) |
1485 | return 0; | 1509 | return 0; |
@@ -1615,21 +1639,6 @@ munmap_back: | |||
1615 | goto free_vma; | 1639 | goto free_vma; |
1616 | } | 1640 | } |
1617 | 1641 | ||
1618 | if (vma_wants_writenotify(vma)) { | ||
1619 | pgprot_t pprot = vma->vm_page_prot; | ||
1620 | |||
1621 | /* Can vma->vm_page_prot have changed?? | ||
1622 | * | ||
1623 | * Answer: Yes, drivers may have changed it in their | ||
1624 | * f_op->mmap method. | ||
1625 | * | ||
1626 | * Ensures that vmas marked as uncached stay that way. | ||
1627 | */ | ||
1628 | vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); | ||
1629 | if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot))) | ||
1630 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
1631 | } | ||
1632 | |||
1633 | vma_link(mm, vma, prev, rb_link, rb_parent); | 1642 | vma_link(mm, vma, prev, rb_link, rb_parent); |
1634 | /* Once vma denies write, undo our temporary denial count */ | 1643 | /* Once vma denies write, undo our temporary denial count */ |
1635 | if (file) { | 1644 | if (file) { |
@@ -1663,6 +1672,8 @@ out: | |||
1663 | */ | 1672 | */ |
1664 | vma->vm_flags |= VM_SOFTDIRTY; | 1673 | vma->vm_flags |= VM_SOFTDIRTY; |
1665 | 1674 | ||
1675 | vma_set_page_prot(vma); | ||
1676 | |||
1666 | return addr; | 1677 | return addr; |
1667 | 1678 | ||
1668 | unmap_and_free_vma: | 1679 | unmap_and_free_vma: |
diff --git a/mm/mprotect.c b/mm/mprotect.c index c43d557941f8..ace93454ce8e 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -29,13 +29,6 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | 31 | ||
32 | #ifndef pgprot_modify | ||
33 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | ||
34 | { | ||
35 | return newprot; | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | /* | 32 | /* |
40 | * For a prot_numa update we only hold mmap_sem for read so there is a | 33 | * For a prot_numa update we only hold mmap_sem for read so there is a |
41 | * potential race with faulting where a pmd was temporarily none. This | 34 | * potential race with faulting where a pmd was temporarily none. This |
@@ -93,7 +86,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
93 | * Avoid taking write faults for pages we | 86 | * Avoid taking write faults for pages we |
94 | * know to be dirty. | 87 | * know to be dirty. |
95 | */ | 88 | */ |
96 | if (dirty_accountable && pte_dirty(ptent)) | 89 | if (dirty_accountable && pte_dirty(ptent) && |
90 | (pte_soft_dirty(ptent) || | ||
91 | !(vma->vm_flags & VM_SOFTDIRTY))) | ||
97 | ptent = pte_mkwrite(ptent); | 92 | ptent = pte_mkwrite(ptent); |
98 | ptep_modify_prot_commit(mm, addr, pte, ptent); | 93 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
99 | updated = true; | 94 | updated = true; |
@@ -320,13 +315,8 @@ success: | |||
320 | * held in write mode. | 315 | * held in write mode. |
321 | */ | 316 | */ |
322 | vma->vm_flags = newflags; | 317 | vma->vm_flags = newflags; |
323 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, | 318 | dirty_accountable = vma_wants_writenotify(vma); |
324 | vm_get_page_prot(newflags)); | 319 | vma_set_page_prot(vma); |
325 | |||
326 | if (vma_wants_writenotify(vma)) { | ||
327 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); | ||
328 | dirty_accountable = 1; | ||
329 | } | ||
330 | 320 | ||
331 | change_protection(vma, start, end, vma->vm_page_prot, | 321 | change_protection(vma, start, end, vma->vm_page_prot, |
332 | dirty_accountable, 0); | 322 | dirty_accountable, 0); |
@@ -1992,7 +1992,7 @@ static struct array_cache __percpu *alloc_kmem_cache_cpus( | |||
1992 | struct array_cache __percpu *cpu_cache; | 1992 | struct array_cache __percpu *cpu_cache; |
1993 | 1993 | ||
1994 | size = sizeof(void *) * entries + sizeof(struct array_cache); | 1994 | size = sizeof(void *) * entries + sizeof(struct array_cache); |
1995 | cpu_cache = __alloc_percpu(size, 0); | 1995 | cpu_cache = __alloc_percpu(size, sizeof(void *)); |
1996 | 1996 | ||
1997 | if (!cpu_cache) | 1997 | if (!cpu_cache) |
1998 | return NULL; | 1998 | return NULL; |