diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-25 13:29:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-25 13:29:09 -0500 |
commit | 7b46588f364f4f40c25f43ceabb6f705d20793e2 (patch) | |
tree | 29e80019ee791abe58176161f3ae2b766749b808 | |
parent | 915f3e3f76c05b2da93c4cc278eebc2d9219d9f4 (diff) | |
parent | 95330473636e5e4546f94874c957c3be66bb2140 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- almost all of the rest of MM
- misc bits
- KASAN updates
- procfs
- lib/ updates
- checkpatch updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (124 commits)
checkpatch: remove false unbalanced braces warning
checkpatch: notice unbalanced else braces in a patch
checkpatch: add another old address for the FSF
checkpatch: update $logFunctions
checkpatch: warn on logging continuations
checkpatch: warn on embedded function names
lib/lz4: remove back-compat wrappers
fs/pstore: fs/squashfs: change usage of LZ4 to work with new LZ4 version
crypto: change LZ4 modules to work with new LZ4 module version
lib/decompress_unlz4: change module to work with new LZ4 module version
lib: update LZ4 compressor module
lib/test_sort.c: make it explicitly non-modular
lib: add CONFIG_TEST_SORT to enable self-test of sort()
rbtree: use designated initializers
linux/kernel.h: fix DIV_ROUND_CLOSEST to support negative divisors
lib/find_bit.c: micro-optimise find_next_*_bit
lib: add module support to atomic64 tests
lib: add module support to glob tests
lib: add module support to crc32 tests
kernel/ksysfs.c: add __ro_after_init to bin_attribute structure
...
247 files changed, 7290 insertions, 4008 deletions
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 1c0c08d9206b..4fced8a21307 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt | |||
@@ -201,8 +201,8 @@ File /sys/block/zram<id>/mm_stat | |||
201 | The stat file represents device's mm statistics. It consists of a single | 201 | The stat file represents device's mm statistics. It consists of a single |
202 | line of text and contains the following stats separated by whitespace: | 202 | line of text and contains the following stats separated by whitespace: |
203 | orig_data_size uncompressed size of data stored in this disk. | 203 | orig_data_size uncompressed size of data stored in this disk. |
204 | This excludes zero-filled pages (zero_pages) since no | 204 | This excludes same-element-filled pages (same_pages) since |
205 | memory is allocated for them. | 205 | no memory is allocated for them. |
206 | Unit: bytes | 206 | Unit: bytes |
207 | compr_data_size compressed size of data stored in this disk | 207 | compr_data_size compressed size of data stored in this disk |
208 | mem_used_total the amount of memory allocated for this disk. This | 208 | mem_used_total the amount of memory allocated for this disk. This |
@@ -214,7 +214,7 @@ line of text and contains the following stats separated by whitespace: | |||
214 | the compressed data | 214 | the compressed data |
215 | mem_used_max the maximum amount of memory zram have consumed to | 215 | mem_used_max the maximum amount of memory zram have consumed to |
216 | store the data | 216 | store the data |
217 | zero_pages the number of zero filled pages written to this disk. | 217 | same_pages the number of same element filled pages written to this disk. |
218 | No memory is allocated for such pages. | 218 | No memory is allocated for such pages. |
219 | pages_compacted the number of pages freed during compaction | 219 | pages_compacted the number of pages freed during compaction |
220 | 220 | ||
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 95ccbe6d79ce..b4ad97f10b8e 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -376,8 +376,8 @@ max_map_count: | |||
376 | 376 | ||
377 | This file contains the maximum number of memory map areas a process | 377 | This file contains the maximum number of memory map areas a process |
378 | may have. Memory map areas are used as a side-effect of calling | 378 | may have. Memory map areas are used as a side-effect of calling |
379 | malloc, directly by mmap and mprotect, and also when loading shared | 379 | malloc, directly by mmap, mprotect, and madvise, and also when loading |
380 | libraries. | 380 | shared libraries. |
381 | 381 | ||
382 | While most applications need less than a thousand maps, certain | 382 | While most applications need less than a thousand maps, certain |
383 | programs, particularly malloc debuggers, may consume lots of them, | 383 | programs, particularly malloc debuggers, may consume lots of them, |
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt index f34a8ee6f860..6b0ca7feb135 100644 --- a/Documentation/vm/ksm.txt +++ b/Documentation/vm/ksm.txt | |||
@@ -38,6 +38,10 @@ the range for whenever the KSM daemon is started; even if the range | |||
38 | cannot contain any pages which KSM could actually merge; even if | 38 | cannot contain any pages which KSM could actually merge; even if |
39 | MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE. | 39 | MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE. |
40 | 40 | ||
41 | If a region of memory must be split into at least one new MADV_MERGEABLE | ||
42 | or MADV_UNMERGEABLE region, the madvise may return ENOMEM if the process | ||
43 | will exceed vm.max_map_count (see Documentation/sysctl/vm.txt). | ||
44 | |||
41 | Like other madvise calls, they are intended for use on mapped areas of | 45 | Like other madvise calls, they are intended for use on mapped areas of |
42 | the user address space: they will report ENOMEM if the specified range | 46 | the user address space: they will report ENOMEM if the specified range |
43 | includes unmapped gaps (though working on the intervening mapped areas), | 47 | includes unmapped gaps (though working on the intervening mapped areas), |
@@ -80,6 +84,20 @@ run - set 0 to stop ksmd from running but keep merged pages, | |||
80 | Default: 0 (must be changed to 1 to activate KSM, | 84 | Default: 0 (must be changed to 1 to activate KSM, |
81 | except if CONFIG_SYSFS is disabled) | 85 | except if CONFIG_SYSFS is disabled) |
82 | 86 | ||
87 | use_zero_pages - specifies whether empty pages (i.e. allocated pages | ||
88 | that only contain zeroes) should be treated specially. | ||
89 | When set to 1, empty pages are merged with the kernel | ||
90 | zero page(s) instead of with each other as it would | ||
91 | happen normally. This can improve the performance on | ||
92 | architectures with coloured zero pages, depending on | ||
93 | the workload. Care should be taken when enabling this | ||
94 | setting, as it can potentially degrade the performance | ||
95 | of KSM for some workloads, for example if the checksums | ||
96 | of pages candidate for merging match the checksum of | ||
97 | an empty page. This setting can be changed at any time, | ||
98 | it is only effective for pages merged after the change. | ||
99 | Default: 0 (normal KSM behaviour as in earlier releases) | ||
100 | |||
83 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: | 101 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: |
84 | 102 | ||
85 | pages_shared - how many shared pages are being used | 103 | pages_shared - how many shared pages are being used |
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt index 70a3c94d1941..fe51a5aa8963 100644 --- a/Documentation/vm/userfaultfd.txt +++ b/Documentation/vm/userfaultfd.txt | |||
@@ -54,6 +54,26 @@ uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of | |||
54 | respectively all the available features of the read(2) protocol and | 54 | respectively all the available features of the read(2) protocol and |
55 | the generic ioctl available. | 55 | the generic ioctl available. |
56 | 56 | ||
57 | The uffdio_api.features bitmask returned by the UFFDIO_API ioctl | ||
58 | defines what memory types are supported by the userfaultfd and what | ||
59 | events, except page fault notifications, may be generated. | ||
60 | |||
61 | If the kernel supports registering userfaultfd ranges on hugetlbfs | ||
62 | virtual memory areas, UFFD_FEATURE_MISSING_HUGETLBFS will be set in | ||
63 | uffdio_api.features. Similarly, UFFD_FEATURE_MISSING_SHMEM will be | ||
64 | set if the kernel supports registering userfaultfd ranges on shared | ||
65 | memory (covering all shmem APIs, i.e. tmpfs, IPCSHM, /dev/zero | ||
66 | MAP_SHARED, memfd_create, etc). | ||
67 | |||
68 | The userland application that wants to use userfaultfd with hugetlbfs | ||
69 | or shared memory need to set the corresponding flag in | ||
70 | uffdio_api.features to enable those features. | ||
71 | |||
72 | If the userland desires to receive notifications for events other than | ||
73 | page faults, it has to verify that uffdio_api.features has appropriate | ||
74 | UFFD_FEATURE_EVENT_* bits set. These events are described in more | ||
75 | detail below in "Non-cooperative userfaultfd" section. | ||
76 | |||
57 | Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should | 77 | Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should |
58 | be invoked (if present in the returned uffdio_api.ioctls bitmask) to | 78 | be invoked (if present in the returned uffdio_api.ioctls bitmask) to |
59 | register a memory range in the userfaultfd by setting the | 79 | register a memory range in the userfaultfd by setting the |
@@ -142,3 +162,72 @@ course the bitmap is updated accordingly. It's also useful to avoid | |||
142 | sending the same page twice (in case the userfault is read by the | 162 | sending the same page twice (in case the userfault is read by the |
143 | postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration | 163 | postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration |
144 | thread). | 164 | thread). |
165 | |||
166 | == Non-cooperative userfaultfd == | ||
167 | |||
168 | When the userfaultfd is monitored by an external manager, the manager | ||
169 | must be able to track changes in the process virtual memory | ||
170 | layout. Userfaultfd can notify the manager about such changes using | ||
171 | the same read(2) protocol as for the page fault notifications. The | ||
172 | manager has to explicitly enable these events by setting appropriate | ||
173 | bits in uffdio_api.features passed to UFFDIO_API ioctl: | ||
174 | |||
175 | UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the | ||
176 | non-cooperative process. When the monitored process exits, the uffd | ||
177 | manager will get UFFD_EVENT_EXIT. | ||
178 | |||
179 | UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When | ||
180 | this feature is enabled, the userfaultfd context of the parent process | ||
181 | is duplicated into the newly created process. The manager receives | ||
182 | UFFD_EVENT_FORK with file descriptor of the new userfaultfd context in | ||
183 | the uffd_msg.fork. | ||
184 | |||
185 | UFFD_FEATURE_EVENT_REMAP - enable notifications about mremap() | ||
186 | calls. When the non-cooperative process moves a virtual memory area to | ||
187 | a different location, the manager will receive UFFD_EVENT_REMAP. The | ||
188 | uffd_msg.remap will contain the old and new addresses of the area and | ||
189 | its original length. | ||
190 | |||
191 | UFFD_FEATURE_EVENT_REMOVE - enable notifications about | ||
192 | madvise(MADV_REMOVE) and madvise(MADV_DONTNEED) calls. The event | ||
193 | UFFD_EVENT_REMOVE will be generated upon these calls to madvise. The | ||
194 | uffd_msg.remove will contain start and end addresses of the removed | ||
195 | area. | ||
196 | |||
197 | UFFD_FEATURE_EVENT_UNMAP - enable notifications about memory | ||
198 | unmapping. The manager will get UFFD_EVENT_UNMAP with uffd_msg.remove | ||
199 | containing start and end addresses of the unmapped area. | ||
200 | |||
201 | Although the UFFD_FEATURE_EVENT_REMOVE and UFFD_FEATURE_EVENT_UNMAP | ||
202 | are pretty similar, they quite differ in the action expected from the | ||
203 | userfaultfd manager. In the former case, the virtual memory is | ||
204 | removed, but the area is not, the area remains monitored by the | ||
205 | userfaultfd, and if a page fault occurs in that area it will be | ||
206 | delivered to the manager. The proper resolution for such page fault is | ||
207 | to zeromap the faulting address. However, in the latter case, when an | ||
208 | area is unmapped, either explicitly (with munmap() system call), or | ||
209 | implicitly (e.g. during mremap()), the area is removed and in turn the | ||
210 | userfaultfd context for such area disappears too and the manager will | ||
211 | not get further userland page faults from the removed area. Still, the | ||
212 | notification is required in order to prevent manager from using | ||
213 | UFFDIO_COPY on the unmapped area. | ||
214 | |||
215 | Unlike userland page faults which have to be synchronous and require | ||
216 | explicit or implicit wakeup, all the events are delivered | ||
217 | asynchronously and the non-cooperative process resumes execution as | ||
218 | soon as manager executes read(). The userfaultfd manager should | ||
219 | carefully synchronize calls to UFFDIO_COPY with the events | ||
220 | processing. To aid the synchronization, the UFFDIO_COPY ioctl will | ||
221 | return -ENOSPC when the monitored process exits at the time of | ||
222 | UFFDIO_COPY, and -ENOENT, when the non-cooperative process has changed | ||
223 | its virtual memory layout simultaneously with outstanding UFFDIO_COPY | ||
224 | operation. | ||
225 | |||
226 | The current asynchronous model of the event delivery is optimal for | ||
227 | single threaded non-cooperative userfaultfd manager implementations. A | ||
228 | synchronous event delivery model can be added later as a new | ||
229 | userfaultfd feature to facilitate multithreading enhancements of the | ||
230 | non cooperative manager, for example to allow UFFDIO_COPY ioctls to | ||
231 | run in parallel to the event reception. Single threaded | ||
232 | implementations should continue to use the current async event | ||
233 | delivery model instead. | ||
diff --git a/arch/Kconfig b/arch/Kconfig index f761142976e5..d0012add6b19 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -571,6 +571,9 @@ config HAVE_IRQ_TIME_ACCOUNTING | |||
571 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE | 571 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE |
572 | bool | 572 | bool |
573 | 573 | ||
574 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
575 | bool | ||
576 | |||
574 | config HAVE_ARCH_HUGE_VMAP | 577 | config HAVE_ARCH_HUGE_VMAP |
575 | bool | 578 | bool |
576 | 579 | ||
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index baa152b9348e..46e47c088622 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild | |||
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h | |||
9 | generic-y += preempt.h | 9 | generic-y += preempt.h |
10 | generic-y += sections.h | 10 | generic-y += sections.h |
11 | generic-y += trace_clock.h | 11 | generic-y += trace_clock.h |
12 | generic-y += current.h | ||
diff --git a/arch/alpha/include/asm/current.h b/arch/alpha/include/asm/current.h deleted file mode 100644 index 094d285a1b34..000000000000 --- a/arch/alpha/include/asm/current.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _ALPHA_CURRENT_H | ||
2 | #define _ALPHA_CURRENT_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | #define get_current() (current_thread_info()->task) | ||
7 | #define current get_current() | ||
8 | |||
9 | #endif /* _ALPHA_CURRENT_H */ | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82d3e79ec82b..6ffdf17e0d5c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -349,7 +349,7 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
349 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 349 | static void *__alloc_from_contiguous(struct device *dev, size_t size, |
350 | pgprot_t prot, struct page **ret_page, | 350 | pgprot_t prot, struct page **ret_page, |
351 | const void *caller, bool want_vaddr, | 351 | const void *caller, bool want_vaddr, |
352 | int coherent_flag); | 352 | int coherent_flag, gfp_t gfp); |
353 | 353 | ||
354 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, | 354 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, |
355 | pgprot_t prot, struct page **ret_page, | 355 | pgprot_t prot, struct page **ret_page, |
@@ -420,7 +420,8 @@ static int __init atomic_pool_init(void) | |||
420 | */ | 420 | */ |
421 | if (dev_get_cma_area(NULL)) | 421 | if (dev_get_cma_area(NULL)) |
422 | ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, | 422 | ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, |
423 | &page, atomic_pool_init, true, NORMAL); | 423 | &page, atomic_pool_init, true, NORMAL, |
424 | GFP_KERNEL); | ||
424 | else | 425 | else |
425 | ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, | 426 | ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, |
426 | &page, atomic_pool_init, true); | 427 | &page, atomic_pool_init, true); |
@@ -594,14 +595,14 @@ static int __free_from_pool(void *start, size_t size) | |||
594 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 595 | static void *__alloc_from_contiguous(struct device *dev, size_t size, |
595 | pgprot_t prot, struct page **ret_page, | 596 | pgprot_t prot, struct page **ret_page, |
596 | const void *caller, bool want_vaddr, | 597 | const void *caller, bool want_vaddr, |
597 | int coherent_flag) | 598 | int coherent_flag, gfp_t gfp) |
598 | { | 599 | { |
599 | unsigned long order = get_order(size); | 600 | unsigned long order = get_order(size); |
600 | size_t count = size >> PAGE_SHIFT; | 601 | size_t count = size >> PAGE_SHIFT; |
601 | struct page *page; | 602 | struct page *page; |
602 | void *ptr = NULL; | 603 | void *ptr = NULL; |
603 | 604 | ||
604 | page = dma_alloc_from_contiguous(dev, count, order); | 605 | page = dma_alloc_from_contiguous(dev, count, order, gfp); |
605 | if (!page) | 606 | if (!page) |
606 | return NULL; | 607 | return NULL; |
607 | 608 | ||
@@ -655,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) | |||
655 | #define __get_dma_pgprot(attrs, prot) __pgprot(0) | 656 | #define __get_dma_pgprot(attrs, prot) __pgprot(0) |
656 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL | 657 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL |
657 | #define __alloc_from_pool(size, ret_page) NULL | 658 | #define __alloc_from_pool(size, ret_page) NULL |
658 | #define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL | 659 | #define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL |
659 | #define __free_from_pool(cpu_addr, size) do { } while (0) | 660 | #define __free_from_pool(cpu_addr, size) do { } while (0) |
660 | #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) | 661 | #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) |
661 | #define __dma_free_remap(cpu_addr, size) do { } while (0) | 662 | #define __dma_free_remap(cpu_addr, size) do { } while (0) |
@@ -697,7 +698,8 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, | |||
697 | { | 698 | { |
698 | return __alloc_from_contiguous(args->dev, args->size, args->prot, | 699 | return __alloc_from_contiguous(args->dev, args->size, args->prot, |
699 | ret_page, args->caller, | 700 | ret_page, args->caller, |
700 | args->want_vaddr, args->coherent_flag); | 701 | args->want_vaddr, args->coherent_flag, |
702 | args->gfp); | ||
701 | } | 703 | } |
702 | 704 | ||
703 | static void cma_allocator_free(struct arm_dma_free_args *args) | 705 | static void cma_allocator_free(struct arm_dma_free_args *args) |
@@ -1312,7 +1314,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | |||
1312 | unsigned long order = get_order(size); | 1314 | unsigned long order = get_order(size); |
1313 | struct page *page; | 1315 | struct page *page; |
1314 | 1316 | ||
1315 | page = dma_alloc_from_contiguous(dev, count, order); | 1317 | page = dma_alloc_from_contiguous(dev, count, order, gfp); |
1316 | if (!page) | 1318 | if (!page) |
1317 | goto error; | 1319 | goto error; |
1318 | 1320 | ||
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 351f7595cb3e..aff1d0afeb1e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -107,7 +107,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
107 | void *addr; | 107 | void *addr; |
108 | 108 | ||
109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
110 | get_order(size)); | 110 | get_order(size), flags); |
111 | if (!page) | 111 | if (!page) |
112 | return NULL; | 112 | return NULL; |
113 | 113 | ||
@@ -390,7 +390,7 @@ static int __init atomic_pool_init(void) | |||
390 | 390 | ||
391 | if (dev_get_cma_area(NULL)) | 391 | if (dev_get_cma_area(NULL)) |
392 | page = dma_alloc_from_contiguous(NULL, nr_pages, | 392 | page = dma_alloc_from_contiguous(NULL, nr_pages, |
393 | pool_size_order); | 393 | pool_size_order, GFP_KERNEL); |
394 | else | 394 | else |
395 | page = alloc_pages(GFP_DMA, pool_size_order); | 395 | page = alloc_pages(GFP_DMA, pool_size_order); |
396 | 396 | ||
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 9f19e19bff9d..8e4ef321001f 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild | |||
@@ -4,6 +4,7 @@ generic-y += barrier.h | |||
4 | generic-y += bitsperlong.h | 4 | generic-y += bitsperlong.h |
5 | generic-y += clkdev.h | 5 | generic-y += clkdev.h |
6 | generic-y += cmpxchg.h | 6 | generic-y += cmpxchg.h |
7 | generic-y += current.h | ||
7 | generic-y += device.h | 8 | generic-y += device.h |
8 | generic-y += div64.h | 9 | generic-y += div64.h |
9 | generic-y += errno.h | 10 | generic-y += errno.h |
diff --git a/arch/cris/include/asm/current.h b/arch/cris/include/asm/current.h deleted file mode 100644 index 5f5c0efd00be..000000000000 --- a/arch/cris/include/asm/current.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | #ifndef _CRIS_CURRENT_H | ||
2 | #define _CRIS_CURRENT_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | struct task_struct; | ||
7 | |||
8 | static inline struct task_struct * get_current(void) | ||
9 | { | ||
10 | return current_thread_info()->task; | ||
11 | } | ||
12 | |||
13 | #define current get_current() | ||
14 | |||
15 | #endif /* !(_CRIS_CURRENT_H) */ | ||
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c index 34bb4b13e079..c452ddb5620f 100644 --- a/arch/frv/mb93090-mb00/pci-frv.c +++ b/arch/frv/mb93090-mb00/pci-frv.c | |||
@@ -147,7 +147,7 @@ static void __init pcibios_allocate_resources(int pass) | |||
147 | static void __init pcibios_assign_resources(void) | 147 | static void __init pcibios_assign_resources(void) |
148 | { | 148 | { |
149 | struct pci_dev *dev = NULL; | 149 | struct pci_dev *dev = NULL; |
150 | int idx; | 150 | int idx, err; |
151 | struct resource *r; | 151 | struct resource *r; |
152 | 152 | ||
153 | for_each_pci_dev(dev) { | 153 | for_each_pci_dev(dev) { |
@@ -172,8 +172,13 @@ static void __init pcibios_assign_resources(void) | |||
172 | * the BIOS forgot to do so or because we have decided the old | 172 | * the BIOS forgot to do so or because we have decided the old |
173 | * address was unusable for some reason. | 173 | * address was unusable for some reason. |
174 | */ | 174 | */ |
175 | if (!r->start && r->end) | 175 | if (!r->start && r->end) { |
176 | pci_assign_resource(dev, idx); | 176 | err = pci_assign_resource(dev, idx); |
177 | if (err) | ||
178 | dev_err(&dev->dev, | ||
179 | "Failed to assign new address to %d\n", | ||
180 | idx); | ||
181 | } | ||
177 | } | 182 | } |
178 | } | 183 | } |
179 | } | 184 | } |
diff --git a/arch/m68k/68000/bootlogo-vz.h b/arch/m68k/68000/bootlogo-vz.h index b38e2b255142..6ff09beba1ba 100644 --- a/arch/m68k/68000/bootlogo-vz.h +++ b/arch/m68k/68000/bootlogo-vz.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #include <linux/compiler.h> | ||
2 | |||
1 | #define splash_width 640 | 3 | #define splash_width 640 |
2 | #define splash_height 480 | 4 | #define splash_height 480 |
3 | unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = { | 5 | unsigned char __aligned(16) bootlogo_bits[] = { |
4 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 6 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
5 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 7 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
6 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 8 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
diff --git a/arch/m68k/68000/bootlogo.h b/arch/m68k/68000/bootlogo.h index b896c933fafc..c466db3ca3a8 100644 --- a/arch/m68k/68000/bootlogo.h +++ b/arch/m68k/68000/bootlogo.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #include <linux/compiler.h> | ||
2 | |||
1 | #define bootlogo_width 160 | 3 | #define bootlogo_width 160 |
2 | #define bootlogo_height 160 | 4 | #define bootlogo_height 160 |
3 | unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = { | 5 | unsigned char __aligned(16) bootlogo_bits[] = { |
4 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00, | 6 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00, |
5 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 7 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
6 | 0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 8 | 0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h index 1a8080c4cc40..b61230e74e63 100644 --- a/arch/m68k/include/asm/MC68328.h +++ b/arch/m68k/include/asm/MC68328.h | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>, | 8 | * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>, |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #include <linux/compiler.h> | ||
11 | 12 | ||
12 | #ifndef _MC68328_H_ | 13 | #ifndef _MC68328_H_ |
13 | #define _MC68328_H_ | 14 | #define _MC68328_H_ |
@@ -993,7 +994,7 @@ typedef volatile struct { | |||
993 | volatile unsigned short int pad1; | 994 | volatile unsigned short int pad1; |
994 | volatile unsigned short int pad2; | 995 | volatile unsigned short int pad2; |
995 | volatile unsigned short int pad3; | 996 | volatile unsigned short int pad3; |
996 | } __attribute__((packed)) m68328_uart; | 997 | } __packed m68328_uart; |
997 | 998 | ||
998 | 999 | ||
999 | /********** | 1000 | /********** |
diff --git a/arch/m68k/include/asm/MC68EZ328.h b/arch/m68k/include/asm/MC68EZ328.h index fedac87c5d13..703331ece328 100644 --- a/arch/m68k/include/asm/MC68EZ328.h +++ b/arch/m68k/include/asm/MC68EZ328.h | |||
@@ -9,6 +9,7 @@ | |||
9 | * The Silver Hammer Group, Ltd. | 9 | * The Silver Hammer Group, Ltd. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | #include <linux/compiler.h> | ||
12 | 13 | ||
13 | #ifndef _MC68EZ328_H_ | 14 | #ifndef _MC68EZ328_H_ |
14 | #define _MC68EZ328_H_ | 15 | #define _MC68EZ328_H_ |
@@ -815,7 +816,7 @@ typedef volatile struct { | |||
815 | volatile unsigned short int nipr; | 816 | volatile unsigned short int nipr; |
816 | volatile unsigned short int pad1; | 817 | volatile unsigned short int pad1; |
817 | volatile unsigned short int pad2; | 818 | volatile unsigned short int pad2; |
818 | } __attribute__((packed)) m68328_uart; | 819 | } __packed m68328_uart; |
819 | 820 | ||
820 | 821 | ||
821 | /********** | 822 | /********** |
diff --git a/arch/m68k/include/asm/MC68VZ328.h b/arch/m68k/include/asm/MC68VZ328.h index 34a51b2c784f..fbaed7ddfb41 100644 --- a/arch/m68k/include/asm/MC68VZ328.h +++ b/arch/m68k/include/asm/MC68VZ328.h | |||
@@ -909,7 +909,7 @@ typedef struct { | |||
909 | volatile unsigned short int nipr; | 909 | volatile unsigned short int nipr; |
910 | volatile unsigned short int hmark; | 910 | volatile unsigned short int hmark; |
911 | volatile unsigned short int unused; | 911 | volatile unsigned short int unused; |
912 | } __attribute__((packed)) m68328_uart; | 912 | } __packed m68328_uart; |
913 | 913 | ||
914 | 914 | ||
915 | 915 | ||
diff --git a/arch/m68k/include/asm/natfeat.h b/arch/m68k/include/asm/natfeat.h index a3521b80c3b9..2d2424de1d65 100644 --- a/arch/m68k/include/asm/natfeat.h +++ b/arch/m68k/include/asm/natfeat.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * This software may be used and distributed according to the terms of | 6 | * This software may be used and distributed according to the terms of |
7 | * the GNU General Public License (GPL), incorporated herein by reference. | 7 | * the GNU General Public License (GPL), incorporated herein by reference. |
8 | */ | 8 | */ |
9 | #include <linux/compiler.h> | ||
9 | 10 | ||
10 | #ifndef _NATFEAT_H | 11 | #ifndef _NATFEAT_H |
11 | #define _NATFEAT_H | 12 | #define _NATFEAT_H |
@@ -17,6 +18,6 @@ void nf_init(void); | |||
17 | void nf_shutdown(void); | 18 | void nf_shutdown(void); |
18 | 19 | ||
19 | void nfprint(const char *fmt, ...) | 20 | void nfprint(const char *fmt, ...) |
20 | __attribute__ ((format (printf, 1, 2))); | 21 | __printf(1, 2); |
21 | 22 | ||
22 | # endif /* _NATFEAT_H */ | 23 | # endif /* _NATFEAT_H */ |
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c index 8dffd36ec4f2..ac08f8141390 100644 --- a/arch/m68k/lib/ashldi3.c +++ b/arch/m68k/lib/ashldi3.c | |||
@@ -18,10 +18,10 @@ GNU General Public License for more details. */ | |||
18 | 18 | ||
19 | #define BITS_PER_UNIT 8 | 19 | #define BITS_PER_UNIT 8 |
20 | 20 | ||
21 | typedef int SItype __attribute__ ((mode (SI))); | 21 | typedef int SItype __mode(SI); |
22 | typedef unsigned int USItype __attribute__ ((mode (SI))); | 22 | typedef unsigned int USItype __mode(SI); |
23 | typedef int DItype __attribute__ ((mode (DI))); | 23 | typedef int DItype __mode(DI); |
24 | typedef int word_type __attribute__ ((mode (__word__))); | 24 | typedef int word_type __mode(__word__); |
25 | 25 | ||
26 | struct DIstruct {SItype high, low;}; | 26 | struct DIstruct {SItype high, low;}; |
27 | 27 | ||
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c index e6565a3ee2c3..5837b1dd3334 100644 --- a/arch/m68k/lib/ashrdi3.c +++ b/arch/m68k/lib/ashrdi3.c | |||
@@ -18,10 +18,10 @@ GNU General Public License for more details. */ | |||
18 | 18 | ||
19 | #define BITS_PER_UNIT 8 | 19 | #define BITS_PER_UNIT 8 |
20 | 20 | ||
21 | typedef int SItype __attribute__ ((mode (SI))); | 21 | typedef int SItype __mode(SI); |
22 | typedef unsigned int USItype __attribute__ ((mode (SI))); | 22 | typedef unsigned int USItype __mode(SI); |
23 | typedef int DItype __attribute__ ((mode (DI))); | 23 | typedef int DItype __mode(DI); |
24 | typedef int word_type __attribute__ ((mode (__word__))); | 24 | typedef int word_type __mode(__word__); |
25 | 25 | ||
26 | struct DIstruct {SItype high, low;}; | 26 | struct DIstruct {SItype high, low;}; |
27 | 27 | ||
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c index 039779737c7d..7f40566be6c8 100644 --- a/arch/m68k/lib/lshrdi3.c +++ b/arch/m68k/lib/lshrdi3.c | |||
@@ -18,10 +18,10 @@ GNU General Public License for more details. */ | |||
18 | 18 | ||
19 | #define BITS_PER_UNIT 8 | 19 | #define BITS_PER_UNIT 8 |
20 | 20 | ||
21 | typedef int SItype __attribute__ ((mode (SI))); | 21 | typedef int SItype __mode(SI); |
22 | typedef unsigned int USItype __attribute__ ((mode (SI))); | 22 | typedef unsigned int USItype __mode(SI); |
23 | typedef int DItype __attribute__ ((mode (DI))); | 23 | typedef int DItype __mode(DI); |
24 | typedef int word_type __attribute__ ((mode (__word__))); | 24 | typedef int word_type __mode(__word__); |
25 | 25 | ||
26 | struct DIstruct {SItype high, low;}; | 26 | struct DIstruct {SItype high, low;}; |
27 | 27 | ||
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c index 6459af5b2af0..3fb05c698c41 100644 --- a/arch/m68k/lib/muldi3.c +++ b/arch/m68k/lib/muldi3.c | |||
@@ -65,10 +65,10 @@ GNU General Public License for more details. */ | |||
65 | umul_ppmm (__w.s.high, __w.s.low, u, v); \ | 65 | umul_ppmm (__w.s.high, __w.s.low, u, v); \ |
66 | __w.ll; }) | 66 | __w.ll; }) |
67 | 67 | ||
68 | typedef int SItype __attribute__ ((mode (SI))); | 68 | typedef int SItype __mode(SI); |
69 | typedef unsigned int USItype __attribute__ ((mode (SI))); | 69 | typedef unsigned int USItype __mode(SI); |
70 | typedef int DItype __attribute__ ((mode (DI))); | 70 | typedef int DItype __mode(DI); |
71 | typedef int word_type __attribute__ ((mode (__word__))); | 71 | typedef int word_type __mode(__word__); |
72 | 72 | ||
73 | struct DIstruct {SItype high, low;}; | 73 | struct DIstruct {SItype high, low;}; |
74 | 74 | ||
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 7f696f97f9dd..13bc93242c0c 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/shmem_fs.h> | ||
25 | #include <linux/list.h> | 26 | #include <linux/list.h> |
26 | #include <linux/syscalls.h> | 27 | #include <linux/syscalls.h> |
27 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index f9dbfb14af33..093517e85a6c 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c | |||
@@ -111,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
111 | base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, | 111 | base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, |
112 | VM_READ|VM_WRITE|VM_EXEC| | 112 | VM_READ|VM_WRITE|VM_EXEC| |
113 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 113 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
114 | 0); | 114 | 0, NULL); |
115 | if (IS_ERR_VALUE(base)) { | 115 | if (IS_ERR_VALUE(base)) { |
116 | ret = base; | 116 | ret = base; |
117 | goto out; | 117 | goto out; |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a39c36af97ad..1895a692efd4 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -148,8 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, | |||
148 | gfp = massage_gfp_flags(dev, gfp); | 148 | gfp = massage_gfp_flags(dev, gfp); |
149 | 149 | ||
150 | if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) | 150 | if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) |
151 | page = dma_alloc_from_contiguous(dev, | 151 | page = dma_alloc_from_contiguous(dev, count, get_order(size), |
152 | count, get_order(size)); | 152 | gfp); |
153 | if (!page) | 153 | if (!page) |
154 | page = alloc_pages(gfp, get_order(size)); | 154 | page = alloc_pages(gfp, get_order(size)); |
155 | 155 | ||
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index fef738229a68..1eeeb72c7015 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | 2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <linux/mmdebug.h> | ||
6 | #endif | ||
4 | /* | 7 | /* |
5 | * Common bits between hash and Radix page table | 8 | * Common bits between hash and Radix page table |
6 | */ | 9 | */ |
@@ -434,15 +437,47 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte) | |||
434 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | 437 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
435 | 438 | ||
436 | #ifdef CONFIG_NUMA_BALANCING | 439 | #ifdef CONFIG_NUMA_BALANCING |
437 | /* | ||
438 | * These work without NUMA balancing but the kernel does not care. See the | ||
439 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
440 | * work for user pages and always return true for kernel pages. | ||
441 | */ | ||
442 | static inline int pte_protnone(pte_t pte) | 440 | static inline int pte_protnone(pte_t pte) |
443 | { | 441 | { |
444 | return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)) == | 442 | return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) == |
445 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED); | 443 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); |
444 | } | ||
445 | |||
446 | #define pte_mk_savedwrite pte_mk_savedwrite | ||
447 | static inline pte_t pte_mk_savedwrite(pte_t pte) | ||
448 | { | ||
449 | /* | ||
450 | * Used by Autonuma subsystem to preserve the write bit | ||
451 | * while marking the pte PROT_NONE. Only allow this | ||
452 | * on PROT_NONE pte | ||
453 | */ | ||
454 | VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) != | ||
455 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)); | ||
456 | return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED); | ||
457 | } | ||
458 | |||
459 | #define pte_clear_savedwrite pte_clear_savedwrite | ||
460 | static inline pte_t pte_clear_savedwrite(pte_t pte) | ||
461 | { | ||
462 | /* | ||
463 | * Used by KSM subsystem to make a protnone pte readonly. | ||
464 | */ | ||
465 | VM_BUG_ON(!pte_protnone(pte)); | ||
466 | return __pte(pte_val(pte) | _PAGE_PRIVILEGED); | ||
467 | } | ||
468 | |||
469 | #define pte_savedwrite pte_savedwrite | ||
470 | static inline bool pte_savedwrite(pte_t pte) | ||
471 | { | ||
472 | /* | ||
473 | * Saved write ptes are prot none ptes that doesn't have | ||
474 | * privileged bit sit. We mark prot none as one which has | ||
475 | * present and pviliged bit set and RWX cleared. To mark | ||
476 | * protnone which used to have _PAGE_WRITE set we clear | ||
477 | * the privileged bit. | ||
478 | */ | ||
479 | VM_BUG_ON(!pte_protnone(pte)); | ||
480 | return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); | ||
446 | } | 481 | } |
447 | #endif /* CONFIG_NUMA_BALANCING */ | 482 | #endif /* CONFIG_NUMA_BALANCING */ |
448 | 483 | ||
@@ -873,6 +908,8 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) | |||
873 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) | 908 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
874 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | 909 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
875 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | 910 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
911 | #define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd))) | ||
912 | #define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd))) | ||
876 | 913 | ||
877 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | 914 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
878 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) | 915 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) |
@@ -889,6 +926,7 @@ static inline int pmd_protnone(pmd_t pmd) | |||
889 | 926 | ||
890 | #define __HAVE_ARCH_PMD_WRITE | 927 | #define __HAVE_ARCH_PMD_WRITE |
891 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | 928 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
929 | #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) | ||
892 | 930 | ||
893 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 931 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
894 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); | 932 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 74bec5498972..a3f5334f5d8c 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/of_pci.h> | 26 | #include <linux/of_pci.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/shmem_fs.h> | ||
28 | #include <linux/list.h> | 29 | #include <linux/list.h> |
29 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
30 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 491c5d8120f7..ab9d14c0e460 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -102,9 +102,9 @@ static void release_spapr_tce_table(struct rcu_head *head) | |||
102 | kfree(stt); | 102 | kfree(stt); |
103 | } | 103 | } |
104 | 104 | ||
105 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 105 | static int kvm_spapr_tce_fault(struct vm_fault *vmf) |
106 | { | 106 | { |
107 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | 107 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; |
108 | struct page *page; | 108 | struct page *page; |
109 | 109 | ||
110 | if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) | 110 | if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index c42a7e63b39e..4d6c64b3041c 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) | |||
56 | { | 56 | { |
57 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); | 57 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
58 | 58 | ||
59 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); | 59 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), |
60 | GFP_KERNEL); | ||
60 | } | 61 | } |
61 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); | 62 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); |
62 | 63 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index a35e2c29d7ee..e5ec1368f0cd 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -233,8 +233,9 @@ spufs_mem_write(struct file *file, const char __user *buffer, | |||
233 | } | 233 | } |
234 | 234 | ||
235 | static int | 235 | static int |
236 | spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 236 | spufs_mem_mmap_fault(struct vm_fault *vmf) |
237 | { | 237 | { |
238 | struct vm_area_struct *vma = vmf->vma; | ||
238 | struct spu_context *ctx = vma->vm_file->private_data; | 239 | struct spu_context *ctx = vma->vm_file->private_data; |
239 | unsigned long pfn, offset; | 240 | unsigned long pfn, offset; |
240 | 241 | ||
@@ -311,12 +312,11 @@ static const struct file_operations spufs_mem_fops = { | |||
311 | .mmap = spufs_mem_mmap, | 312 | .mmap = spufs_mem_mmap, |
312 | }; | 313 | }; |
313 | 314 | ||
314 | static int spufs_ps_fault(struct vm_area_struct *vma, | 315 | static int spufs_ps_fault(struct vm_fault *vmf, |
315 | struct vm_fault *vmf, | ||
316 | unsigned long ps_offs, | 316 | unsigned long ps_offs, |
317 | unsigned long ps_size) | 317 | unsigned long ps_size) |
318 | { | 318 | { |
319 | struct spu_context *ctx = vma->vm_file->private_data; | 319 | struct spu_context *ctx = vmf->vma->vm_file->private_data; |
320 | unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; | 320 | unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; |
321 | int ret = 0; | 321 | int ret = 0; |
322 | 322 | ||
@@ -354,7 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma, | |||
354 | down_read(¤t->mm->mmap_sem); | 354 | down_read(¤t->mm->mmap_sem); |
355 | } else { | 355 | } else { |
356 | area = ctx->spu->problem_phys + ps_offs; | 356 | area = ctx->spu->problem_phys + ps_offs; |
357 | vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); | 357 | vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT); |
358 | spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); | 358 | spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); |
359 | } | 359 | } |
360 | 360 | ||
@@ -367,10 +367,9 @@ refault: | |||
367 | } | 367 | } |
368 | 368 | ||
369 | #if SPUFS_MMAP_4K | 369 | #if SPUFS_MMAP_4K |
370 | static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, | 370 | static int spufs_cntl_mmap_fault(struct vm_fault *vmf) |
371 | struct vm_fault *vmf) | ||
372 | { | 371 | { |
373 | return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); | 372 | return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); |
374 | } | 373 | } |
375 | 374 | ||
376 | static const struct vm_operations_struct spufs_cntl_mmap_vmops = { | 375 | static const struct vm_operations_struct spufs_cntl_mmap_vmops = { |
@@ -1067,15 +1066,15 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, | |||
1067 | } | 1066 | } |
1068 | 1067 | ||
1069 | static int | 1068 | static int |
1070 | spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1069 | spufs_signal1_mmap_fault(struct vm_fault *vmf) |
1071 | { | 1070 | { |
1072 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 | 1071 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 |
1073 | return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); | 1072 | return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); |
1074 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 | 1073 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 |
1075 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole | 1074 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole |
1076 | * signal 1 and 2 area | 1075 | * signal 1 and 2 area |
1077 | */ | 1076 | */ |
1078 | return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); | 1077 | return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); |
1079 | #else | 1078 | #else |
1080 | #error unsupported page size | 1079 | #error unsupported page size |
1081 | #endif | 1080 | #endif |
@@ -1205,15 +1204,15 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, | |||
1205 | 1204 | ||
1206 | #if SPUFS_MMAP_4K | 1205 | #if SPUFS_MMAP_4K |
1207 | static int | 1206 | static int |
1208 | spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1207 | spufs_signal2_mmap_fault(struct vm_fault *vmf) |
1209 | { | 1208 | { |
1210 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 | 1209 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 |
1211 | return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); | 1210 | return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); |
1212 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 | 1211 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 |
1213 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole | 1212 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole |
1214 | * signal 1 and 2 area | 1213 | * signal 1 and 2 area |
1215 | */ | 1214 | */ |
1216 | return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); | 1215 | return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); |
1217 | #else | 1216 | #else |
1218 | #error unsupported page size | 1217 | #error unsupported page size |
1219 | #endif | 1218 | #endif |
@@ -1334,9 +1333,9 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, | |||
1334 | 1333 | ||
1335 | #if SPUFS_MMAP_4K | 1334 | #if SPUFS_MMAP_4K |
1336 | static int | 1335 | static int |
1337 | spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1336 | spufs_mss_mmap_fault(struct vm_fault *vmf) |
1338 | { | 1337 | { |
1339 | return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); | 1338 | return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); |
1340 | } | 1339 | } |
1341 | 1340 | ||
1342 | static const struct vm_operations_struct spufs_mss_mmap_vmops = { | 1341 | static const struct vm_operations_struct spufs_mss_mmap_vmops = { |
@@ -1396,9 +1395,9 @@ static const struct file_operations spufs_mss_fops = { | |||
1396 | }; | 1395 | }; |
1397 | 1396 | ||
1398 | static int | 1397 | static int |
1399 | spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1398 | spufs_psmap_mmap_fault(struct vm_fault *vmf) |
1400 | { | 1399 | { |
1401 | return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); | 1400 | return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); |
1402 | } | 1401 | } |
1403 | 1402 | ||
1404 | static const struct vm_operations_struct spufs_psmap_mmap_vmops = { | 1403 | static const struct vm_operations_struct spufs_psmap_mmap_vmops = { |
@@ -1456,9 +1455,9 @@ static const struct file_operations spufs_psmap_fops = { | |||
1456 | 1455 | ||
1457 | #if SPUFS_MMAP_4K | 1456 | #if SPUFS_MMAP_4K |
1458 | static int | 1457 | static int |
1459 | spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1458 | spufs_mfc_mmap_fault(struct vm_fault *vmf) |
1460 | { | 1459 | { |
1461 | return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); | 1460 | return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); |
1462 | } | 1461 | } |
1463 | 1462 | ||
1464 | static const struct vm_operations_struct spufs_mfc_mmap_vmops = { | 1463 | static const struct vm_operations_struct spufs_mfc_mmap_vmops = { |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index e2293c662bdf..dd1d5c62c374 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -32,6 +32,7 @@ static struct memblock_type oldmem_type = { | |||
32 | .max = 1, | 32 | .max = 1, |
33 | .total_size = 0, | 33 | .total_size = 0, |
34 | .regions = &oldmem_region, | 34 | .regions = &oldmem_region, |
35 | .name = "oldmem", | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct save_area { | 38 | struct save_area { |
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index 6225cc998db1..889901824400 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c | |||
@@ -143,7 +143,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
143 | unsigned long addr = MEM_USER_INTRPT; | 143 | unsigned long addr = MEM_USER_INTRPT; |
144 | addr = mmap_region(NULL, addr, INTRPT_SIZE, | 144 | addr = mmap_region(NULL, addr, INTRPT_SIZE, |
145 | VM_READ|VM_EXEC| | 145 | VM_READ|VM_EXEC| |
146 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); | 146 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0, NULL); |
147 | if (addr > (unsigned long) -PAGE_SIZE) | 147 | if (addr > (unsigned long) -PAGE_SIZE) |
148 | retval = (int) addr; | 148 | retval = (int) addr; |
149 | } | 149 | } |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 874c1238dffd..33007aa74111 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -109,6 +109,7 @@ config X86 | |||
109 | select HAVE_ARCH_SECCOMP_FILTER | 109 | select HAVE_ARCH_SECCOMP_FILTER |
110 | select HAVE_ARCH_TRACEHOOK | 110 | select HAVE_ARCH_TRACEHOOK |
111 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 111 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
112 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 | ||
112 | select HAVE_ARCH_VMAP_STACK if X86_64 | 113 | select HAVE_ARCH_VMAP_STACK if X86_64 |
113 | select HAVE_ARCH_WITHIN_STACK_FRAMES | 114 | select HAVE_ARCH_WITHIN_STACK_FRAMES |
114 | select HAVE_CC_STACKPROTECTOR | 115 | select HAVE_CC_STACKPROTECTOR |
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 10820f6cefbf..572cee3fccff 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c | |||
@@ -186,7 +186,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) | |||
186 | 186 | ||
187 | if (IS_ERR(vma)) { | 187 | if (IS_ERR(vma)) { |
188 | ret = PTR_ERR(vma); | 188 | ret = PTR_ERR(vma); |
189 | do_munmap(mm, text_start, image->size); | 189 | do_munmap(mm, text_start, image->size, NULL); |
190 | } else { | 190 | } else { |
191 | current->mm->context.vdso = (void __user *)text_start; | 191 | current->mm->context.vdso = (void __user *)text_start; |
192 | current->mm->context.vdso_image = image; | 192 | current->mm->context.vdso_image = image; |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index f75fbfe550f2..0489884fdc44 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -475,6 +475,17 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
475 | native_pmd_val(pmd)); | 475 | native_pmd_val(pmd)); |
476 | } | 476 | } |
477 | 477 | ||
478 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, | ||
479 | pud_t *pudp, pud_t pud) | ||
480 | { | ||
481 | if (sizeof(pudval_t) > sizeof(long)) | ||
482 | /* 5 arg words */ | ||
483 | pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); | ||
484 | else | ||
485 | PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, | ||
486 | native_pud_val(pud)); | ||
487 | } | ||
488 | |||
478 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 489 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
479 | { | 490 | { |
480 | pmdval_t val = native_pmd_val(pmd); | 491 | pmdval_t val = native_pmd_val(pmd); |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index bb2de45a60f2..b060f962d581 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -249,6 +249,8 @@ struct pv_mmu_ops { | |||
249 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | 249 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
250 | void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, | 250 | void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, |
251 | pmd_t *pmdp, pmd_t pmdval); | 251 | pmd_t *pmdp, pmd_t pmdval); |
252 | void (*set_pud_at)(struct mm_struct *mm, unsigned long addr, | ||
253 | pud_t *pudp, pud_t pudval); | ||
252 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, | 254 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, |
253 | pte_t *ptep); | 255 | pte_t *ptep); |
254 | 256 | ||
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index fd74a11959de..a8b96e708c2b 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -21,6 +21,10 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
21 | *pmdp = pmd; | 21 | *pmdp = pmd; |
22 | } | 22 | } |
23 | 23 | ||
24 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | ||
25 | { | ||
26 | } | ||
27 | |||
24 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 28 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
25 | { | 29 | { |
26 | native_set_pte(ptep, pte); | 30 | native_set_pte(ptep, pte); |
@@ -31,6 +35,10 @@ static inline void native_pmd_clear(pmd_t *pmdp) | |||
31 | native_set_pmd(pmdp, __pmd(0)); | 35 | native_set_pmd(pmdp, __pmd(0)); |
32 | } | 36 | } |
33 | 37 | ||
38 | static inline void native_pud_clear(pud_t *pudp) | ||
39 | { | ||
40 | } | ||
41 | |||
34 | static inline void native_pte_clear(struct mm_struct *mm, | 42 | static inline void native_pte_clear(struct mm_struct *mm, |
35 | unsigned long addr, pte_t *xp) | 43 | unsigned long addr, pte_t *xp) |
36 | { | 44 | { |
@@ -55,6 +63,15 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 63 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 64 | #endif |
57 | 65 | ||
66 | #ifdef CONFIG_SMP | ||
67 | static inline pud_t native_pudp_get_and_clear(pud_t *xp) | ||
68 | { | ||
69 | return __pud(xchg((pudval_t *)xp, 0)); | ||
70 | } | ||
71 | #else | ||
72 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) | ||
73 | #endif | ||
74 | |||
58 | /* Bit manipulation helper on pte/pgoff entry */ | 75 | /* Bit manipulation helper on pte/pgoff entry */ |
59 | static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, | 76 | static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, |
60 | unsigned long mask, unsigned int leftshift) | 77 | unsigned long mask, unsigned int leftshift) |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index cdaa58c9b39e..8f50fb3f04e1 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -121,6 +121,12 @@ static inline void native_pmd_clear(pmd_t *pmd) | |||
121 | *(tmp + 1) = 0; | 121 | *(tmp + 1) = 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | #ifndef CONFIG_SMP | ||
125 | static inline void native_pud_clear(pud_t *pudp) | ||
126 | { | ||
127 | } | ||
128 | #endif | ||
129 | |||
124 | static inline void pud_clear(pud_t *pudp) | 130 | static inline void pud_clear(pud_t *pudp) |
125 | { | 131 | { |
126 | set_pud(pudp, __pud(0)); | 132 | set_pud(pudp, __pud(0)); |
@@ -176,6 +182,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |||
176 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 182 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
177 | #endif | 183 | #endif |
178 | 184 | ||
185 | #ifdef CONFIG_SMP | ||
186 | union split_pud { | ||
187 | struct { | ||
188 | u32 pud_low; | ||
189 | u32 pud_high; | ||
190 | }; | ||
191 | pud_t pud; | ||
192 | }; | ||
193 | |||
194 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) | ||
195 | { | ||
196 | union split_pud res, *orig = (union split_pud *)pudp; | ||
197 | |||
198 | /* xchg acts as a barrier before setting of the high bits */ | ||
199 | res.pud_low = xchg(&orig->pud_low, 0); | ||
200 | res.pud_high = orig->pud_high; | ||
201 | orig->pud_high = 0; | ||
202 | |||
203 | return res.pud; | ||
204 | } | ||
205 | #else | ||
206 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) | ||
207 | #endif | ||
208 | |||
179 | /* Encode and de-code a swap entry */ | 209 | /* Encode and de-code a swap entry */ |
180 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) | 210 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
181 | #define __swp_type(x) (((x).val) & 0x1f) | 211 | #define __swp_type(x) (((x).val) & 0x1f) |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 437feb436efa..1cfb36b8c024 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); | |||
46 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | 46 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) |
47 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | 47 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) |
48 | #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) | 48 | #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) |
49 | #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) | ||
49 | 50 | ||
50 | #define set_pte_atomic(ptep, pte) \ | 51 | #define set_pte_atomic(ptep, pte) \ |
51 | native_set_pte_atomic(ptep, pte) | 52 | native_set_pte_atomic(ptep, pte) |
@@ -128,6 +129,16 @@ static inline int pmd_young(pmd_t pmd) | |||
128 | return pmd_flags(pmd) & _PAGE_ACCESSED; | 129 | return pmd_flags(pmd) & _PAGE_ACCESSED; |
129 | } | 130 | } |
130 | 131 | ||
132 | static inline int pud_dirty(pud_t pud) | ||
133 | { | ||
134 | return pud_flags(pud) & _PAGE_DIRTY; | ||
135 | } | ||
136 | |||
137 | static inline int pud_young(pud_t pud) | ||
138 | { | ||
139 | return pud_flags(pud) & _PAGE_ACCESSED; | ||
140 | } | ||
141 | |||
131 | static inline int pte_write(pte_t pte) | 142 | static inline int pte_write(pte_t pte) |
132 | { | 143 | { |
133 | return pte_flags(pte) & _PAGE_RW; | 144 | return pte_flags(pte) & _PAGE_RW; |
@@ -181,6 +192,13 @@ static inline int pmd_trans_huge(pmd_t pmd) | |||
181 | return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; | 192 | return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; |
182 | } | 193 | } |
183 | 194 | ||
195 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
196 | static inline int pud_trans_huge(pud_t pud) | ||
197 | { | ||
198 | return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; | ||
199 | } | ||
200 | #endif | ||
201 | |||
184 | #define has_transparent_hugepage has_transparent_hugepage | 202 | #define has_transparent_hugepage has_transparent_hugepage |
185 | static inline int has_transparent_hugepage(void) | 203 | static inline int has_transparent_hugepage(void) |
186 | { | 204 | { |
@@ -192,6 +210,18 @@ static inline int pmd_devmap(pmd_t pmd) | |||
192 | { | 210 | { |
193 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); | 211 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); |
194 | } | 212 | } |
213 | |||
214 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
215 | static inline int pud_devmap(pud_t pud) | ||
216 | { | ||
217 | return !!(pud_val(pud) & _PAGE_DEVMAP); | ||
218 | } | ||
219 | #else | ||
220 | static inline int pud_devmap(pud_t pud) | ||
221 | { | ||
222 | return 0; | ||
223 | } | ||
224 | #endif | ||
195 | #endif | 225 | #endif |
196 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 226 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
197 | 227 | ||
@@ -333,6 +363,65 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) | |||
333 | return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); | 363 | return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); |
334 | } | 364 | } |
335 | 365 | ||
366 | static inline pud_t pud_set_flags(pud_t pud, pudval_t set) | ||
367 | { | ||
368 | pudval_t v = native_pud_val(pud); | ||
369 | |||
370 | return __pud(v | set); | ||
371 | } | ||
372 | |||
373 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) | ||
374 | { | ||
375 | pudval_t v = native_pud_val(pud); | ||
376 | |||
377 | return __pud(v & ~clear); | ||
378 | } | ||
379 | |||
380 | static inline pud_t pud_mkold(pud_t pud) | ||
381 | { | ||
382 | return pud_clear_flags(pud, _PAGE_ACCESSED); | ||
383 | } | ||
384 | |||
385 | static inline pud_t pud_mkclean(pud_t pud) | ||
386 | { | ||
387 | return pud_clear_flags(pud, _PAGE_DIRTY); | ||
388 | } | ||
389 | |||
390 | static inline pud_t pud_wrprotect(pud_t pud) | ||
391 | { | ||
392 | return pud_clear_flags(pud, _PAGE_RW); | ||
393 | } | ||
394 | |||
395 | static inline pud_t pud_mkdirty(pud_t pud) | ||
396 | { | ||
397 | return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); | ||
398 | } | ||
399 | |||
400 | static inline pud_t pud_mkdevmap(pud_t pud) | ||
401 | { | ||
402 | return pud_set_flags(pud, _PAGE_DEVMAP); | ||
403 | } | ||
404 | |||
405 | static inline pud_t pud_mkhuge(pud_t pud) | ||
406 | { | ||
407 | return pud_set_flags(pud, _PAGE_PSE); | ||
408 | } | ||
409 | |||
410 | static inline pud_t pud_mkyoung(pud_t pud) | ||
411 | { | ||
412 | return pud_set_flags(pud, _PAGE_ACCESSED); | ||
413 | } | ||
414 | |||
415 | static inline pud_t pud_mkwrite(pud_t pud) | ||
416 | { | ||
417 | return pud_set_flags(pud, _PAGE_RW); | ||
418 | } | ||
419 | |||
420 | static inline pud_t pud_mknotpresent(pud_t pud) | ||
421 | { | ||
422 | return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); | ||
423 | } | ||
424 | |||
336 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | 425 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
337 | static inline int pte_soft_dirty(pte_t pte) | 426 | static inline int pte_soft_dirty(pte_t pte) |
338 | { | 427 | { |
@@ -344,6 +433,11 @@ static inline int pmd_soft_dirty(pmd_t pmd) | |||
344 | return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; | 433 | return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; |
345 | } | 434 | } |
346 | 435 | ||
436 | static inline int pud_soft_dirty(pud_t pud) | ||
437 | { | ||
438 | return pud_flags(pud) & _PAGE_SOFT_DIRTY; | ||
439 | } | ||
440 | |||
347 | static inline pte_t pte_mksoft_dirty(pte_t pte) | 441 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
348 | { | 442 | { |
349 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | 443 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); |
@@ -354,6 +448,11 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
354 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | 448 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
355 | } | 449 | } |
356 | 450 | ||
451 | static inline pud_t pud_mksoft_dirty(pud_t pud) | ||
452 | { | ||
453 | return pud_set_flags(pud, _PAGE_SOFT_DIRTY); | ||
454 | } | ||
455 | |||
357 | static inline pte_t pte_clear_soft_dirty(pte_t pte) | 456 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
358 | { | 457 | { |
359 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | 458 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); |
@@ -364,6 +463,11 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) | |||
364 | return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); | 463 | return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); |
365 | } | 464 | } |
366 | 465 | ||
466 | static inline pud_t pud_clear_soft_dirty(pud_t pud) | ||
467 | { | ||
468 | return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); | ||
469 | } | ||
470 | |||
367 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | 471 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
368 | 472 | ||
369 | /* | 473 | /* |
@@ -392,6 +496,12 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
392 | massage_pgprot(pgprot)); | 496 | massage_pgprot(pgprot)); |
393 | } | 497 | } |
394 | 498 | ||
499 | static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) | ||
500 | { | ||
501 | return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | | ||
502 | massage_pgprot(pgprot)); | ||
503 | } | ||
504 | |||
395 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 505 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
396 | { | 506 | { |
397 | pteval_t val = pte_val(pte); | 507 | pteval_t val = pte_val(pte); |
@@ -771,6 +881,14 @@ static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) | |||
771 | return res; | 881 | return res; |
772 | } | 882 | } |
773 | 883 | ||
884 | static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) | ||
885 | { | ||
886 | pud_t res = *pudp; | ||
887 | |||
888 | native_pud_clear(pudp); | ||
889 | return res; | ||
890 | } | ||
891 | |||
774 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | 892 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
775 | pte_t *ptep , pte_t pte) | 893 | pte_t *ptep , pte_t pte) |
776 | { | 894 | { |
@@ -783,6 +901,12 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
783 | native_set_pmd(pmdp, pmd); | 901 | native_set_pmd(pmdp, pmd); |
784 | } | 902 | } |
785 | 903 | ||
904 | static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, | ||
905 | pud_t *pudp, pud_t pud) | ||
906 | { | ||
907 | native_set_pud(pudp, pud); | ||
908 | } | ||
909 | |||
786 | #ifndef CONFIG_PARAVIRT | 910 | #ifndef CONFIG_PARAVIRT |
787 | /* | 911 | /* |
788 | * Rules for using pte_update - it must be called after any PTE update which | 912 | * Rules for using pte_update - it must be called after any PTE update which |
@@ -861,10 +985,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, | |||
861 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | 985 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
862 | unsigned long address, pmd_t *pmdp, | 986 | unsigned long address, pmd_t *pmdp, |
863 | pmd_t entry, int dirty); | 987 | pmd_t entry, int dirty); |
988 | extern int pudp_set_access_flags(struct vm_area_struct *vma, | ||
989 | unsigned long address, pud_t *pudp, | ||
990 | pud_t entry, int dirty); | ||
864 | 991 | ||
865 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | 992 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
866 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | 993 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
867 | unsigned long addr, pmd_t *pmdp); | 994 | unsigned long addr, pmd_t *pmdp); |
995 | extern int pudp_test_and_clear_young(struct vm_area_struct *vma, | ||
996 | unsigned long addr, pud_t *pudp); | ||
868 | 997 | ||
869 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | 998 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
870 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | 999 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
@@ -884,6 +1013,13 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long | |||
884 | return native_pmdp_get_and_clear(pmdp); | 1013 | return native_pmdp_get_and_clear(pmdp); |
885 | } | 1014 | } |
886 | 1015 | ||
1016 | #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR | ||
1017 | static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, | ||
1018 | unsigned long addr, pud_t *pudp) | ||
1019 | { | ||
1020 | return native_pudp_get_and_clear(pudp); | ||
1021 | } | ||
1022 | |||
887 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | 1023 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
888 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | 1024 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
889 | unsigned long addr, pmd_t *pmdp) | 1025 | unsigned long addr, pmd_t *pmdp) |
@@ -932,6 +1068,10 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | |||
932 | unsigned long addr, pmd_t *pmd) | 1068 | unsigned long addr, pmd_t *pmd) |
933 | { | 1069 | { |
934 | } | 1070 | } |
1071 | static inline void update_mmu_cache_pud(struct vm_area_struct *vma, | ||
1072 | unsigned long addr, pud_t *pud) | ||
1073 | { | ||
1074 | } | ||
935 | 1075 | ||
936 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | 1076 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
937 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | 1077 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 62b775926045..73c7ccc38912 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -106,6 +106,21 @@ static inline void native_pud_clear(pud_t *pud) | |||
106 | native_set_pud(pud, native_make_pud(0)); | 106 | native_set_pud(pud, native_make_pud(0)); |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline pud_t native_pudp_get_and_clear(pud_t *xp) | ||
110 | { | ||
111 | #ifdef CONFIG_SMP | ||
112 | return native_make_pud(xchg(&xp->pud, 0)); | ||
113 | #else | ||
114 | /* native_local_pudp_get_and_clear, | ||
115 | * but duplicated because of cyclic dependency | ||
116 | */ | ||
117 | pud_t ret = *xp; | ||
118 | |||
119 | native_pud_clear(xp); | ||
120 | return ret; | ||
121 | #endif | ||
122 | } | ||
123 | |||
109 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | 124 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
110 | { | 125 | { |
111 | *pgdp = pgd; | 126 | *pgdp = pgd; |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index a1bfba0f7234..4797e87b0fb6 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -425,6 +425,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { | |||
425 | .pmd_clear = native_pmd_clear, | 425 | .pmd_clear = native_pmd_clear, |
426 | #endif | 426 | #endif |
427 | .set_pud = native_set_pud, | 427 | .set_pud = native_set_pud, |
428 | .set_pud_at = native_set_pud_at, | ||
428 | 429 | ||
429 | .pmd_val = PTE_IDENT, | 430 | .pmd_val = PTE_IDENT, |
430 | .make_pmd = PTE_IDENT, | 431 | .make_pmd = PTE_IDENT, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index d30c37750765..d5c223c9cf11 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -91,7 +91,8 @@ again: | |||
91 | page = NULL; | 91 | page = NULL; |
92 | /* CMA can be used only in the context which permits sleeping */ | 92 | /* CMA can be used only in the context which permits sleeping */ |
93 | if (gfpflags_allow_blocking(flag)) { | 93 | if (gfpflags_allow_blocking(flag)) { |
94 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 94 | page = dma_alloc_from_contiguous(dev, count, get_order(size), |
95 | flag); | ||
95 | if (page && page_to_phys(page) + size > dma_mask) { | 96 | if (page && page_to_phys(page) + size > dma_mask) { |
96 | dma_release_from_contiguous(dev, page, count); | 97 | dma_release_from_contiguous(dev, page, count); |
97 | page = NULL; | 98 | page = NULL; |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 0d4fb3ebbbac..99c7805a9693 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -154,14 +154,12 @@ static inline void get_head_page_multiple(struct page *page, int nr) | |||
154 | SetPageReferenced(page); | 154 | SetPageReferenced(page); |
155 | } | 155 | } |
156 | 156 | ||
157 | static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, | 157 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
158 | unsigned long end, struct page **pages, int *nr) | 158 | unsigned long end, struct page **pages, int *nr) |
159 | { | 159 | { |
160 | int nr_start = *nr; | 160 | int nr_start = *nr; |
161 | unsigned long pfn = pmd_pfn(pmd); | ||
162 | struct dev_pagemap *pgmap = NULL; | 161 | struct dev_pagemap *pgmap = NULL; |
163 | 162 | ||
164 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; | ||
165 | do { | 163 | do { |
166 | struct page *page = pfn_to_page(pfn); | 164 | struct page *page = pfn_to_page(pfn); |
167 | 165 | ||
@@ -180,6 +178,24 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, | |||
180 | return 1; | 178 | return 1; |
181 | } | 179 | } |
182 | 180 | ||
181 | static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, | ||
182 | unsigned long end, struct page **pages, int *nr) | ||
183 | { | ||
184 | unsigned long fault_pfn; | ||
185 | |||
186 | fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | ||
187 | return __gup_device_huge(fault_pfn, addr, end, pages, nr); | ||
188 | } | ||
189 | |||
190 | static int __gup_device_huge_pud(pud_t pud, unsigned long addr, | ||
191 | unsigned long end, struct page **pages, int *nr) | ||
192 | { | ||
193 | unsigned long fault_pfn; | ||
194 | |||
195 | fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | ||
196 | return __gup_device_huge(fault_pfn, addr, end, pages, nr); | ||
197 | } | ||
198 | |||
183 | static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, | 199 | static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, |
184 | unsigned long end, int write, struct page **pages, int *nr) | 200 | unsigned long end, int write, struct page **pages, int *nr) |
185 | { | 201 | { |
@@ -251,9 +267,13 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, | |||
251 | 267 | ||
252 | if (!pte_allows_gup(pud_val(pud), write)) | 268 | if (!pte_allows_gup(pud_val(pud), write)) |
253 | return 0; | 269 | return 0; |
270 | |||
271 | VM_BUG_ON(!pfn_valid(pud_pfn(pud))); | ||
272 | if (pud_devmap(pud)) | ||
273 | return __gup_device_huge_pud(pud, addr, end, pages, nr); | ||
274 | |||
254 | /* hugepages are never "special" */ | 275 | /* hugepages are never "special" */ |
255 | VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); | 276 | VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); |
256 | VM_BUG_ON(!pfn_valid(pud_pfn(pud))); | ||
257 | 277 | ||
258 | refs = 0; | 278 | refs = 0; |
259 | head = pud_page(pud); | 279 | head = pud_page(pud); |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index aad4ac386f98..c98079684bdb 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
@@ -51,7 +51,7 @@ static unsigned long mpx_mmap(unsigned long len) | |||
51 | 51 | ||
52 | down_write(&mm->mmap_sem); | 52 | down_write(&mm->mmap_sem); |
53 | addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE, | 53 | addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE, |
54 | MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate); | 54 | MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL); |
55 | up_write(&mm->mmap_sem); | 55 | up_write(&mm->mmap_sem); |
56 | if (populate) | 56 | if (populate) |
57 | mm_populate(addr, populate); | 57 | mm_populate(addr, populate); |
@@ -893,7 +893,7 @@ static int unmap_entire_bt(struct mm_struct *mm, | |||
893 | * avoid recursion, do_munmap() will check whether it comes | 893 | * avoid recursion, do_munmap() will check whether it comes |
894 | * from one bounds table through VM_MPX flag. | 894 | * from one bounds table through VM_MPX flag. |
895 | */ | 895 | */ |
896 | return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm)); | 896 | return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL); |
897 | } | 897 | } |
898 | 898 | ||
899 | static int try_unmap_single_bt(struct mm_struct *mm, | 899 | static int try_unmap_single_bt(struct mm_struct *mm, |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3feec5af4e67..6cbdff26bb96 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -445,6 +445,26 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
445 | 445 | ||
446 | return changed; | 446 | return changed; |
447 | } | 447 | } |
448 | |||
449 | int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | ||
450 | pud_t *pudp, pud_t entry, int dirty) | ||
451 | { | ||
452 | int changed = !pud_same(*pudp, entry); | ||
453 | |||
454 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | ||
455 | |||
456 | if (changed && dirty) { | ||
457 | *pudp = entry; | ||
458 | /* | ||
459 | * We had a write-protection fault here and changed the pud | ||
460 | * to to more permissive. No need to flush the TLB for that, | ||
461 | * #PF is architecturally guaranteed to do that and in the | ||
462 | * worst-case we'll generate a spurious fault. | ||
463 | */ | ||
464 | } | ||
465 | |||
466 | return changed; | ||
467 | } | ||
448 | #endif | 468 | #endif |
449 | 469 | ||
450 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | 470 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
@@ -474,6 +494,17 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |||
474 | 494 | ||
475 | return ret; | 495 | return ret; |
476 | } | 496 | } |
497 | int pudp_test_and_clear_young(struct vm_area_struct *vma, | ||
498 | unsigned long addr, pud_t *pudp) | ||
499 | { | ||
500 | int ret = 0; | ||
501 | |||
502 | if (pud_young(*pudp)) | ||
503 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | ||
504 | (unsigned long *)pudp); | ||
505 | |||
506 | return ret; | ||
507 | } | ||
477 | #endif | 508 | #endif |
478 | 509 | ||
479 | int ptep_clear_flush_young(struct vm_area_struct *vma, | 510 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 70e362e6038e..34c1f9fa6acc 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -158,7 +158,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
158 | flag |= GFP_DMA; | 158 | flag |= GFP_DMA; |
159 | 159 | ||
160 | if (gfpflags_allow_blocking(flag)) | 160 | if (gfpflags_allow_blocking(flag)) |
161 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 161 | page = dma_alloc_from_contiguous(dev, count, get_order(size), |
162 | flag); | ||
162 | 163 | ||
163 | if (!page) | 164 | if (!page) |
164 | page = alloc_pages(flag, get_order(size)); | 165 | page = alloc_pages(flag, get_order(size)); |
diff --git a/crypto/lz4.c b/crypto/lz4.c index 99c1b2cc2976..71eff9b01b12 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c | |||
@@ -66,15 +66,13 @@ static void lz4_exit(struct crypto_tfm *tfm) | |||
66 | static int __lz4_compress_crypto(const u8 *src, unsigned int slen, | 66 | static int __lz4_compress_crypto(const u8 *src, unsigned int slen, |
67 | u8 *dst, unsigned int *dlen, void *ctx) | 67 | u8 *dst, unsigned int *dlen, void *ctx) |
68 | { | 68 | { |
69 | size_t tmp_len = *dlen; | 69 | int out_len = LZ4_compress_default(src, dst, |
70 | int err; | 70 | slen, *dlen, ctx); |
71 | 71 | ||
72 | err = lz4_compress(src, slen, dst, &tmp_len, ctx); | 72 | if (!out_len) |
73 | |||
74 | if (err < 0) | ||
75 | return -EINVAL; | 73 | return -EINVAL; |
76 | 74 | ||
77 | *dlen = tmp_len; | 75 | *dlen = out_len; |
78 | return 0; | 76 | return 0; |
79 | } | 77 | } |
80 | 78 | ||
@@ -96,16 +94,13 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
96 | static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, | 94 | static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, |
97 | u8 *dst, unsigned int *dlen, void *ctx) | 95 | u8 *dst, unsigned int *dlen, void *ctx) |
98 | { | 96 | { |
99 | int err; | 97 | int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); |
100 | size_t tmp_len = *dlen; | ||
101 | size_t __slen = slen; | ||
102 | 98 | ||
103 | err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); | 99 | if (out_len < 0) |
104 | if (err < 0) | 100 | return out_len; |
105 | return -EINVAL; | ||
106 | 101 | ||
107 | *dlen = tmp_len; | 102 | *dlen = out_len; |
108 | return err; | 103 | return 0; |
109 | } | 104 | } |
110 | 105 | ||
111 | static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, | 106 | static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, |
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index 75ffc4a3f786..03a34a8109c0 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c | |||
@@ -65,15 +65,13 @@ static void lz4hc_exit(struct crypto_tfm *tfm) | |||
65 | static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, | 65 | static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, |
66 | u8 *dst, unsigned int *dlen, void *ctx) | 66 | u8 *dst, unsigned int *dlen, void *ctx) |
67 | { | 67 | { |
68 | size_t tmp_len = *dlen; | 68 | int out_len = LZ4_compress_HC(src, dst, slen, |
69 | int err; | 69 | *dlen, LZ4HC_DEFAULT_CLEVEL, ctx); |
70 | 70 | ||
71 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx); | 71 | if (!out_len) |
72 | |||
73 | if (err < 0) | ||
74 | return -EINVAL; | 72 | return -EINVAL; |
75 | 73 | ||
76 | *dlen = tmp_len; | 74 | *dlen = out_len; |
77 | return 0; | 75 | return 0; |
78 | } | 76 | } |
79 | 77 | ||
@@ -97,16 +95,13 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
97 | static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, | 95 | static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, |
98 | u8 *dst, unsigned int *dlen, void *ctx) | 96 | u8 *dst, unsigned int *dlen, void *ctx) |
99 | { | 97 | { |
100 | int err; | 98 | int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); |
101 | size_t tmp_len = *dlen; | ||
102 | size_t __slen = slen; | ||
103 | 99 | ||
104 | err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); | 100 | if (out_len < 0) |
105 | if (err < 0) | 101 | return out_len; |
106 | return -EINVAL; | ||
107 | 102 | ||
108 | *dlen = tmp_len; | 103 | *dlen = out_len; |
109 | return err; | 104 | return 0; |
110 | } | 105 | } |
111 | 106 | ||
112 | static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, | 107 | static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index f85e51cf7dcc..006ecc434351 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -34293,61 +34293,123 @@ static struct hash_testvec bfin_crc_tv_template[] = { | |||
34293 | 34293 | ||
34294 | static struct comp_testvec lz4_comp_tv_template[] = { | 34294 | static struct comp_testvec lz4_comp_tv_template[] = { |
34295 | { | 34295 | { |
34296 | .inlen = 70, | 34296 | .inlen = 255, |
34297 | .outlen = 45, | 34297 | .outlen = 218, |
34298 | .input = "Join us now and share the software " | 34298 | .input = "LZ4 is lossless compression algorithm, providing" |
34299 | "Join us now and share the software ", | 34299 | " compression speed at 400 MB/s per core, scalable " |
34300 | .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | 34300 | "with multi-cores CPU. It features an extremely fast " |
34301 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | 34301 | "decoder, with speed in multiple GB/s per core, " |
34302 | "\x64\x20\x73\x68\x61\x72\x65\x20" | 34302 | "typically reaching RAM speed limits on multi-core " |
34303 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | 34303 | "systems.", |
34304 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | 34304 | .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" |
34305 | "\x77\x61\x72\x65\x20", | 34305 | "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" |
34306 | "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" | ||
34307 | "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" | ||
34308 | "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" | ||
34309 | "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" | ||
34310 | "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" | ||
34311 | "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" | ||
34312 | "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" | ||
34313 | "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" | ||
34314 | "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" | ||
34315 | "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" | ||
34316 | "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" | ||
34317 | "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" | ||
34318 | "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83" | ||
34319 | "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00" | ||
34320 | "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e", | ||
34321 | |||
34306 | }, | 34322 | }, |
34307 | }; | 34323 | }; |
34308 | 34324 | ||
34309 | static struct comp_testvec lz4_decomp_tv_template[] = { | 34325 | static struct comp_testvec lz4_decomp_tv_template[] = { |
34310 | { | 34326 | { |
34311 | .inlen = 45, | 34327 | .inlen = 218, |
34312 | .outlen = 70, | 34328 | .outlen = 255, |
34313 | .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | 34329 | .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" |
34314 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | 34330 | "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" |
34315 | "\x64\x20\x73\x68\x61\x72\x65\x20" | 34331 | "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" |
34316 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | 34332 | "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" |
34317 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | 34333 | "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" |
34318 | "\x77\x61\x72\x65\x20", | 34334 | "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" |
34319 | .output = "Join us now and share the software " | 34335 | "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" |
34320 | "Join us now and share the software ", | 34336 | "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" |
34337 | "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" | ||
34338 | "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" | ||
34339 | "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" | ||
34340 | "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" | ||
34341 | "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" | ||
34342 | "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" | ||
34343 | "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83" | ||
34344 | "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00" | ||
34345 | "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e", | ||
34346 | .output = "LZ4 is lossless compression algorithm, providing" | ||
34347 | " compression speed at 400 MB/s per core, scalable " | ||
34348 | "with multi-cores CPU. It features an extremely fast " | ||
34349 | "decoder, with speed in multiple GB/s per core, " | ||
34350 | "typically reaching RAM speed limits on multi-core " | ||
34351 | "systems.", | ||
34321 | }, | 34352 | }, |
34322 | }; | 34353 | }; |
34323 | 34354 | ||
34324 | static struct comp_testvec lz4hc_comp_tv_template[] = { | 34355 | static struct comp_testvec lz4hc_comp_tv_template[] = { |
34325 | { | 34356 | { |
34326 | .inlen = 70, | 34357 | .inlen = 255, |
34327 | .outlen = 45, | 34358 | .outlen = 216, |
34328 | .input = "Join us now and share the software " | 34359 | .input = "LZ4 is lossless compression algorithm, providing" |
34329 | "Join us now and share the software ", | 34360 | " compression speed at 400 MB/s per core, scalable " |
34330 | .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | 34361 | "with multi-cores CPU. It features an extremely fast " |
34331 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | 34362 | "decoder, with speed in multiple GB/s per core, " |
34332 | "\x64\x20\x73\x68\x61\x72\x65\x20" | 34363 | "typically reaching RAM speed limits on multi-core " |
34333 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | 34364 | "systems.", |
34334 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | 34365 | .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" |
34335 | "\x77\x61\x72\x65\x20", | 34366 | "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" |
34367 | "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" | ||
34368 | "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" | ||
34369 | "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" | ||
34370 | "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" | ||
34371 | "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" | ||
34372 | "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" | ||
34373 | "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" | ||
34374 | "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" | ||
34375 | "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" | ||
34376 | "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" | ||
34377 | "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" | ||
34378 | "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" | ||
34379 | "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97" | ||
34380 | "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20" | ||
34381 | "\x73\x79\x73\x74\x65\x6d\x73\x2e", | ||
34382 | |||
34336 | }, | 34383 | }, |
34337 | }; | 34384 | }; |
34338 | 34385 | ||
34339 | static struct comp_testvec lz4hc_decomp_tv_template[] = { | 34386 | static struct comp_testvec lz4hc_decomp_tv_template[] = { |
34340 | { | 34387 | { |
34341 | .inlen = 45, | 34388 | .inlen = 216, |
34342 | .outlen = 70, | 34389 | .outlen = 255, |
34343 | .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75" | 34390 | .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73" |
34344 | "\x73\x20\x6e\x6f\x77\x20\x61\x6e" | 34391 | "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73" |
34345 | "\x64\x20\x73\x68\x61\x72\x65\x20" | 34392 | "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d" |
34346 | "\x74\x68\x65\x20\x73\x6f\x66\x74" | 34393 | "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00" |
34347 | "\x77\x0d\x00\x0f\x23\x00\x0b\x50" | 34394 | "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30" |
34348 | "\x77\x61\x72\x65\x20", | 34395 | "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f" |
34349 | .output = "Join us now and share the software " | 34396 | "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20" |
34350 | "Join us now and share the software ", | 34397 | "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00" |
34398 | "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66" | ||
34399 | "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78" | ||
34400 | "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20" | ||
34401 | "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00" | ||
34402 | "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00" | ||
34403 | "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72" | ||
34404 | "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97" | ||
34405 | "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20" | ||
34406 | "\x73\x79\x73\x74\x65\x6d\x73\x2e", | ||
34407 | .output = "LZ4 is lossless compression algorithm, providing" | ||
34408 | " compression speed at 400 MB/s per core, scalable " | ||
34409 | "with multi-cores CPU. It features an extremely fast " | ||
34410 | "decoder, with speed in multiple GB/s per core, " | ||
34411 | "typically reaching RAM speed limits on multi-core " | ||
34412 | "systems.", | ||
34351 | }, | 34413 | }, |
34352 | }; | 34414 | }; |
34353 | 34415 | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15b263a420e8..2bbcdc6fdfee 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -3342,7 +3342,7 @@ static void binder_vma_close(struct vm_area_struct *vma) | |||
3342 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); | 3342 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); |
3343 | } | 3343 | } |
3344 | 3344 | ||
3345 | static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 3345 | static int binder_vm_fault(struct vm_fault *vmf) |
3346 | { | 3346 | { |
3347 | return VM_FAULT_SIGBUS; | 3347 | return VM_FAULT_SIGBUS; |
3348 | } | 3348 | } |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 8c25e68e67d7..3050e6f99403 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -638,6 +638,11 @@ int lock_device_hotplug_sysfs(void) | |||
638 | return restart_syscall(); | 638 | return restart_syscall(); |
639 | } | 639 | } |
640 | 640 | ||
641 | void assert_held_device_hotplug(void) | ||
642 | { | ||
643 | lockdep_assert_held(&device_hotplug_lock); | ||
644 | } | ||
645 | |||
641 | #ifdef CONFIG_BLOCK | 646 | #ifdef CONFIG_BLOCK |
642 | static inline int device_is_not_partition(struct device *dev) | 647 | static inline int device_is_not_partition(struct device *dev) |
643 | { | 648 | { |
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index e167a1e1bccb..b55804cac4c4 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c | |||
@@ -181,6 +181,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |||
181 | * @dev: Pointer to device for which the allocation is performed. | 181 | * @dev: Pointer to device for which the allocation is performed. |
182 | * @count: Requested number of pages. | 182 | * @count: Requested number of pages. |
183 | * @align: Requested alignment of pages (in PAGE_SIZE order). | 183 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
184 | * @gfp_mask: GFP flags to use for this allocation. | ||
184 | * | 185 | * |
185 | * This function allocates memory buffer for specified device. It uses | 186 | * This function allocates memory buffer for specified device. It uses |
186 | * device specific contiguous memory area if available or the default | 187 | * device specific contiguous memory area if available or the default |
@@ -188,12 +189,12 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |||
188 | * function. | 189 | * function. |
189 | */ | 190 | */ |
190 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | 191 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
191 | unsigned int align) | 192 | unsigned int align, gfp_t gfp_mask) |
192 | { | 193 | { |
193 | if (align > CONFIG_CMA_ALIGNMENT) | 194 | if (align > CONFIG_CMA_ALIGNMENT) |
194 | align = CONFIG_CMA_ALIGNMENT; | 195 | align = CONFIG_CMA_ALIGNMENT; |
195 | 196 | ||
196 | return cma_alloc(dev_get_cma_area(dev), count, align); | 197 | return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask); |
197 | } | 198 | } |
198 | 199 | ||
199 | /** | 200 | /** |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index fa26ffd25fa6..cc4f1d0cbffe 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -249,7 +249,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t | |||
249 | return ret; | 249 | return ret; |
250 | } | 250 | } |
251 | 251 | ||
252 | int memory_block_change_state(struct memory_block *mem, | 252 | static int memory_block_change_state(struct memory_block *mem, |
253 | unsigned long to_state, unsigned long from_state_req) | 253 | unsigned long to_state, unsigned long from_state_req) |
254 | { | 254 | { |
255 | int ret = 0; | 255 | int ret = 0; |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c73fede582f7..e27d89a36c34 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index, | |||
74 | meta->table[index].value &= ~BIT(flag); | 74 | meta->table[index].value &= ~BIT(flag); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline void zram_set_element(struct zram_meta *meta, u32 index, | ||
78 | unsigned long element) | ||
79 | { | ||
80 | meta->table[index].element = element; | ||
81 | } | ||
82 | |||
83 | static inline void zram_clear_element(struct zram_meta *meta, u32 index) | ||
84 | { | ||
85 | meta->table[index].element = 0; | ||
86 | } | ||
87 | |||
77 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | 88 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) |
78 | { | 89 | { |
79 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | 90 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); |
@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram, | |||
146 | } while (old_max != cur_max); | 157 | } while (old_max != cur_max); |
147 | } | 158 | } |
148 | 159 | ||
149 | static bool page_zero_filled(void *ptr) | 160 | static inline void zram_fill_page(char *ptr, unsigned long len, |
161 | unsigned long value) | ||
162 | { | ||
163 | int i; | ||
164 | unsigned long *page = (unsigned long *)ptr; | ||
165 | |||
166 | WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); | ||
167 | |||
168 | if (likely(value == 0)) { | ||
169 | memset(ptr, 0, len); | ||
170 | } else { | ||
171 | for (i = 0; i < len / sizeof(*page); i++) | ||
172 | page[i] = value; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static bool page_same_filled(void *ptr, unsigned long *element) | ||
150 | { | 177 | { |
151 | unsigned int pos; | 178 | unsigned int pos; |
152 | unsigned long *page; | 179 | unsigned long *page; |
153 | 180 | ||
154 | page = (unsigned long *)ptr; | 181 | page = (unsigned long *)ptr; |
155 | 182 | ||
156 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | 183 | for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) { |
157 | if (page[pos]) | 184 | if (page[pos] != page[pos + 1]) |
158 | return false; | 185 | return false; |
159 | } | 186 | } |
160 | 187 | ||
188 | *element = page[pos]; | ||
189 | |||
161 | return true; | 190 | return true; |
162 | } | 191 | } |
163 | 192 | ||
164 | static void handle_zero_page(struct bio_vec *bvec) | 193 | static void handle_same_page(struct bio_vec *bvec, unsigned long element) |
165 | { | 194 | { |
166 | struct page *page = bvec->bv_page; | 195 | struct page *page = bvec->bv_page; |
167 | void *user_mem; | 196 | void *user_mem; |
168 | 197 | ||
169 | user_mem = kmap_atomic(page); | 198 | user_mem = kmap_atomic(page); |
170 | if (is_partial_io(bvec)) | 199 | zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element); |
171 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | ||
172 | else | ||
173 | clear_page(user_mem); | ||
174 | kunmap_atomic(user_mem); | 200 | kunmap_atomic(user_mem); |
175 | 201 | ||
176 | flush_dcache_page(page); | 202 | flush_dcache_page(page); |
@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev, | |||
363 | mem_used << PAGE_SHIFT, | 389 | mem_used << PAGE_SHIFT, |
364 | zram->limit_pages << PAGE_SHIFT, | 390 | zram->limit_pages << PAGE_SHIFT, |
365 | max_used << PAGE_SHIFT, | 391 | max_used << PAGE_SHIFT, |
366 | (u64)atomic64_read(&zram->stats.zero_pages), | 392 | (u64)atomic64_read(&zram->stats.same_pages), |
367 | pool_stats.pages_compacted); | 393 | pool_stats.pages_compacted); |
368 | up_read(&zram->init_lock); | 394 | up_read(&zram->init_lock); |
369 | 395 | ||
@@ -391,18 +417,6 @@ static DEVICE_ATTR_RO(io_stat); | |||
391 | static DEVICE_ATTR_RO(mm_stat); | 417 | static DEVICE_ATTR_RO(mm_stat); |
392 | static DEVICE_ATTR_RO(debug_stat); | 418 | static DEVICE_ATTR_RO(debug_stat); |
393 | 419 | ||
394 | static inline bool zram_meta_get(struct zram *zram) | ||
395 | { | ||
396 | if (atomic_inc_not_zero(&zram->refcount)) | ||
397 | return true; | ||
398 | return false; | ||
399 | } | ||
400 | |||
401 | static inline void zram_meta_put(struct zram *zram) | ||
402 | { | ||
403 | atomic_dec(&zram->refcount); | ||
404 | } | ||
405 | |||
406 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) | 420 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
407 | { | 421 | { |
408 | size_t num_pages = disksize >> PAGE_SHIFT; | 422 | size_t num_pages = disksize >> PAGE_SHIFT; |
@@ -411,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize) | |||
411 | /* Free all pages that are still in this zram device */ | 425 | /* Free all pages that are still in this zram device */ |
412 | for (index = 0; index < num_pages; index++) { | 426 | for (index = 0; index < num_pages; index++) { |
413 | unsigned long handle = meta->table[index].handle; | 427 | unsigned long handle = meta->table[index].handle; |
414 | 428 | /* | |
415 | if (!handle) | 429 | * No memory is allocated for same element filled pages. |
430 | * Simply clear same page flag. | ||
431 | */ | ||
432 | if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) | ||
416 | continue; | 433 | continue; |
417 | 434 | ||
418 | zs_free(meta->mem_pool, handle); | 435 | zs_free(meta->mem_pool, handle); |
@@ -462,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
462 | struct zram_meta *meta = zram->meta; | 479 | struct zram_meta *meta = zram->meta; |
463 | unsigned long handle = meta->table[index].handle; | 480 | unsigned long handle = meta->table[index].handle; |
464 | 481 | ||
465 | if (unlikely(!handle)) { | 482 | /* |
466 | /* | 483 | * No memory is allocated for same element filled pages. |
467 | * No memory is allocated for zero filled pages. | 484 | * Simply clear same page flag. |
468 | * Simply clear zero page flag. | 485 | */ |
469 | */ | 486 | if (zram_test_flag(meta, index, ZRAM_SAME)) { |
470 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { | 487 | zram_clear_flag(meta, index, ZRAM_SAME); |
471 | zram_clear_flag(meta, index, ZRAM_ZERO); | 488 | zram_clear_element(meta, index); |
472 | atomic64_dec(&zram->stats.zero_pages); | 489 | atomic64_dec(&zram->stats.same_pages); |
473 | } | ||
474 | return; | 490 | return; |
475 | } | 491 | } |
476 | 492 | ||
493 | if (!handle) | ||
494 | return; | ||
495 | |||
477 | zs_free(meta->mem_pool, handle); | 496 | zs_free(meta->mem_pool, handle); |
478 | 497 | ||
479 | atomic64_sub(zram_get_obj_size(meta, index), | 498 | atomic64_sub(zram_get_obj_size(meta, index), |
@@ -496,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | |||
496 | handle = meta->table[index].handle; | 515 | handle = meta->table[index].handle; |
497 | size = zram_get_obj_size(meta, index); | 516 | size = zram_get_obj_size(meta, index); |
498 | 517 | ||
499 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { | 518 | if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) { |
500 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 519 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
501 | clear_page(mem); | 520 | zram_fill_page(mem, PAGE_SIZE, meta->table[index].element); |
502 | return 0; | 521 | return 0; |
503 | } | 522 | } |
504 | 523 | ||
@@ -534,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |||
534 | 553 | ||
535 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 554 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
536 | if (unlikely(!meta->table[index].handle) || | 555 | if (unlikely(!meta->table[index].handle) || |
537 | zram_test_flag(meta, index, ZRAM_ZERO)) { | 556 | zram_test_flag(meta, index, ZRAM_SAME)) { |
538 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 557 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
539 | handle_zero_page(bvec); | 558 | handle_same_page(bvec, meta->table[index].element); |
540 | return 0; | 559 | return 0; |
541 | } | 560 | } |
542 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 561 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
@@ -584,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
584 | struct zram_meta *meta = zram->meta; | 603 | struct zram_meta *meta = zram->meta; |
585 | struct zcomp_strm *zstrm = NULL; | 604 | struct zcomp_strm *zstrm = NULL; |
586 | unsigned long alloced_pages; | 605 | unsigned long alloced_pages; |
606 | unsigned long element; | ||
587 | 607 | ||
588 | page = bvec->bv_page; | 608 | page = bvec->bv_page; |
589 | if (is_partial_io(bvec)) { | 609 | if (is_partial_io(bvec)) { |
@@ -612,16 +632,17 @@ compress_again: | |||
612 | uncmem = user_mem; | 632 | uncmem = user_mem; |
613 | } | 633 | } |
614 | 634 | ||
615 | if (page_zero_filled(uncmem)) { | 635 | if (page_same_filled(uncmem, &element)) { |
616 | if (user_mem) | 636 | if (user_mem) |
617 | kunmap_atomic(user_mem); | 637 | kunmap_atomic(user_mem); |
618 | /* Free memory associated with this sector now. */ | 638 | /* Free memory associated with this sector now. */ |
619 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 639 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
620 | zram_free_page(zram, index); | 640 | zram_free_page(zram, index); |
621 | zram_set_flag(meta, index, ZRAM_ZERO); | 641 | zram_set_flag(meta, index, ZRAM_SAME); |
642 | zram_set_element(meta, index, element); | ||
622 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 643 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
623 | 644 | ||
624 | atomic64_inc(&zram->stats.zero_pages); | 645 | atomic64_inc(&zram->stats.same_pages); |
625 | ret = 0; | 646 | ret = 0; |
626 | goto out; | 647 | goto out; |
627 | } | 648 | } |
@@ -859,22 +880,17 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) | |||
859 | { | 880 | { |
860 | struct zram *zram = queue->queuedata; | 881 | struct zram *zram = queue->queuedata; |
861 | 882 | ||
862 | if (unlikely(!zram_meta_get(zram))) | ||
863 | goto error; | ||
864 | |||
865 | blk_queue_split(queue, &bio, queue->bio_split); | 883 | blk_queue_split(queue, &bio, queue->bio_split); |
866 | 884 | ||
867 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, | 885 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
868 | bio->bi_iter.bi_size)) { | 886 | bio->bi_iter.bi_size)) { |
869 | atomic64_inc(&zram->stats.invalid_io); | 887 | atomic64_inc(&zram->stats.invalid_io); |
870 | goto put_zram; | 888 | goto error; |
871 | } | 889 | } |
872 | 890 | ||
873 | __zram_make_request(zram, bio); | 891 | __zram_make_request(zram, bio); |
874 | zram_meta_put(zram); | ||
875 | return BLK_QC_T_NONE; | 892 | return BLK_QC_T_NONE; |
876 | put_zram: | 893 | |
877 | zram_meta_put(zram); | ||
878 | error: | 894 | error: |
879 | bio_io_error(bio); | 895 | bio_io_error(bio); |
880 | return BLK_QC_T_NONE; | 896 | return BLK_QC_T_NONE; |
@@ -904,13 +920,11 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, | |||
904 | struct bio_vec bv; | 920 | struct bio_vec bv; |
905 | 921 | ||
906 | zram = bdev->bd_disk->private_data; | 922 | zram = bdev->bd_disk->private_data; |
907 | if (unlikely(!zram_meta_get(zram))) | ||
908 | goto out; | ||
909 | 923 | ||
910 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { | 924 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { |
911 | atomic64_inc(&zram->stats.invalid_io); | 925 | atomic64_inc(&zram->stats.invalid_io); |
912 | err = -EINVAL; | 926 | err = -EINVAL; |
913 | goto put_zram; | 927 | goto out; |
914 | } | 928 | } |
915 | 929 | ||
916 | index = sector >> SECTORS_PER_PAGE_SHIFT; | 930 | index = sector >> SECTORS_PER_PAGE_SHIFT; |
@@ -921,8 +935,6 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, | |||
921 | bv.bv_offset = 0; | 935 | bv.bv_offset = 0; |
922 | 936 | ||
923 | err = zram_bvec_rw(zram, &bv, index, offset, is_write); | 937 | err = zram_bvec_rw(zram, &bv, index, offset, is_write); |
924 | put_zram: | ||
925 | zram_meta_put(zram); | ||
926 | out: | 938 | out: |
927 | /* | 939 | /* |
928 | * If I/O fails, just return error(ie, non-zero) without | 940 | * If I/O fails, just return error(ie, non-zero) without |
@@ -955,17 +967,6 @@ static void zram_reset_device(struct zram *zram) | |||
955 | meta = zram->meta; | 967 | meta = zram->meta; |
956 | comp = zram->comp; | 968 | comp = zram->comp; |
957 | disksize = zram->disksize; | 969 | disksize = zram->disksize; |
958 | /* | ||
959 | * Refcount will go down to 0 eventually and r/w handler | ||
960 | * cannot handle further I/O so it will bail out by | ||
961 | * check zram_meta_get. | ||
962 | */ | ||
963 | zram_meta_put(zram); | ||
964 | /* | ||
965 | * We want to free zram_meta in process context to avoid | ||
966 | * deadlock between reclaim path and any other locks. | ||
967 | */ | ||
968 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); | ||
969 | 970 | ||
970 | /* Reset stats */ | 971 | /* Reset stats */ |
971 | memset(&zram->stats, 0, sizeof(zram->stats)); | 972 | memset(&zram->stats, 0, sizeof(zram->stats)); |
@@ -1013,8 +1014,6 @@ static ssize_t disksize_store(struct device *dev, | |||
1013 | goto out_destroy_comp; | 1014 | goto out_destroy_comp; |
1014 | } | 1015 | } |
1015 | 1016 | ||
1016 | init_waitqueue_head(&zram->io_done); | ||
1017 | atomic_set(&zram->refcount, 1); | ||
1018 | zram->meta = meta; | 1017 | zram->meta = meta; |
1019 | zram->comp = comp; | 1018 | zram->comp = comp; |
1020 | zram->disksize = disksize; | 1019 | zram->disksize = disksize; |
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 74fcf10da374..caeff51f1571 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h | |||
@@ -61,7 +61,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; | |||
61 | /* Flags for zram pages (table[page_no].value) */ | 61 | /* Flags for zram pages (table[page_no].value) */ |
62 | enum zram_pageflags { | 62 | enum zram_pageflags { |
63 | /* Page consists entirely of zeros */ | 63 | /* Page consists entirely of zeros */ |
64 | ZRAM_ZERO = ZRAM_FLAG_SHIFT, | 64 | ZRAM_SAME = ZRAM_FLAG_SHIFT, |
65 | ZRAM_ACCESS, /* page is now accessed */ | 65 | ZRAM_ACCESS, /* page is now accessed */ |
66 | 66 | ||
67 | __NR_ZRAM_PAGEFLAGS, | 67 | __NR_ZRAM_PAGEFLAGS, |
@@ -71,7 +71,10 @@ enum zram_pageflags { | |||
71 | 71 | ||
72 | /* Allocated for each disk page */ | 72 | /* Allocated for each disk page */ |
73 | struct zram_table_entry { | 73 | struct zram_table_entry { |
74 | unsigned long handle; | 74 | union { |
75 | unsigned long handle; | ||
76 | unsigned long element; | ||
77 | }; | ||
75 | unsigned long value; | 78 | unsigned long value; |
76 | }; | 79 | }; |
77 | 80 | ||
@@ -83,7 +86,7 @@ struct zram_stats { | |||
83 | atomic64_t failed_writes; /* can happen when memory is too low */ | 86 | atomic64_t failed_writes; /* can happen when memory is too low */ |
84 | atomic64_t invalid_io; /* non-page-aligned I/O requests */ | 87 | atomic64_t invalid_io; /* non-page-aligned I/O requests */ |
85 | atomic64_t notify_free; /* no. of swap slot free notifications */ | 88 | atomic64_t notify_free; /* no. of swap slot free notifications */ |
86 | atomic64_t zero_pages; /* no. of zero filled pages */ | 89 | atomic64_t same_pages; /* no. of same element filled pages */ |
87 | atomic64_t pages_stored; /* no. of pages currently stored */ | 90 | atomic64_t pages_stored; /* no. of pages currently stored */ |
88 | atomic_long_t max_used_pages; /* no. of maximum pages stored */ | 91 | atomic_long_t max_used_pages; /* no. of maximum pages stored */ |
89 | atomic64_t writestall; /* no. of write slow paths */ | 92 | atomic64_t writestall; /* no. of write slow paths */ |
@@ -106,9 +109,6 @@ struct zram { | |||
106 | unsigned long limit_pages; | 109 | unsigned long limit_pages; |
107 | 110 | ||
108 | struct zram_stats stats; | 111 | struct zram_stats stats; |
109 | atomic_t refcount; /* refcount for zram_meta */ | ||
110 | /* wait all IO under all of cpu are done */ | ||
111 | wait_queue_head_t io_done; | ||
112 | /* | 112 | /* |
113 | * This is the limit on amount of *uncompressed* worth of data | 113 | * This is the limit on amount of *uncompressed* worth of data |
114 | * we can store in a disk. | 114 | * we can store in a disk. |
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index 737187865269..53fe633df1e8 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c | |||
@@ -11,15 +11,14 @@ | |||
11 | 11 | ||
12 | #include "agp.h" | 12 | #include "agp.h" |
13 | 13 | ||
14 | static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, | 14 | static int alpha_core_agp_vm_fault(struct vm_fault *vmf) |
15 | struct vm_fault *vmf) | ||
16 | { | 15 | { |
17 | alpha_agp_info *agp = agp_bridge->dev_private_data; | 16 | alpha_agp_info *agp = agp_bridge->dev_private_data; |
18 | dma_addr_t dma_addr; | 17 | dma_addr_t dma_addr; |
19 | unsigned long pa; | 18 | unsigned long pa; |
20 | struct page *page; | 19 | struct page *page; |
21 | 20 | ||
22 | dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base; | 21 | dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; |
23 | pa = agp->ops->translate(agp, dma_addr); | 22 | pa = agp->ops->translate(agp, dma_addr); |
24 | 23 | ||
25 | if (pa == (unsigned long)-EINVAL) | 24 | if (pa == (unsigned long)-EINVAL) |
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index a697ca0cab1e..a9c2fa3c81e5 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c | |||
@@ -191,12 +191,12 @@ mspec_close(struct vm_area_struct *vma) | |||
191 | * Creates a mspec page and maps it to user space. | 191 | * Creates a mspec page and maps it to user space. |
192 | */ | 192 | */ |
193 | static int | 193 | static int |
194 | mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 194 | mspec_fault(struct vm_fault *vmf) |
195 | { | 195 | { |
196 | unsigned long paddr, maddr; | 196 | unsigned long paddr, maddr; |
197 | unsigned long pfn; | 197 | unsigned long pfn; |
198 | pgoff_t index = vmf->pgoff; | 198 | pgoff_t index = vmf->pgoff; |
199 | struct vma_data *vdata = vma->vm_private_data; | 199 | struct vma_data *vdata = vmf->vma->vm_private_data; |
200 | 200 | ||
201 | maddr = (volatile unsigned long) vdata->maddr[index]; | 201 | maddr = (volatile unsigned long) vdata->maddr[index]; |
202 | if (maddr == 0) { | 202 | if (maddr == 0) { |
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
227 | * be because another thread has installed the pte first, so it | 227 | * be because another thread has installed the pte first, so it |
228 | * is no problem. | 228 | * is no problem. |
229 | */ | 229 | */ |
230 | vm_insert_pfn(vma, vmf->address, pfn); | 230 | vm_insert_pfn(vmf->vma, vmf->address, pfn); |
231 | 231 | ||
232 | return VM_FAULT_NOPAGE; | 232 | return VM_FAULT_NOPAGE; |
233 | } | 233 | } |
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 18e9875f6277..b75c77254fdb 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c | |||
@@ -419,8 +419,7 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff, | |||
419 | return -1; | 419 | return -1; |
420 | } | 420 | } |
421 | 421 | ||
422 | static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, | 422 | static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) |
423 | struct vm_fault *vmf) | ||
424 | { | 423 | { |
425 | struct device *dev = &dax_dev->dev; | 424 | struct device *dev = &dax_dev->dev; |
426 | struct dax_region *dax_region; | 425 | struct dax_region *dax_region; |
@@ -428,7 +427,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, | |||
428 | phys_addr_t phys; | 427 | phys_addr_t phys; |
429 | pfn_t pfn; | 428 | pfn_t pfn; |
430 | 429 | ||
431 | if (check_vma(dax_dev, vma, __func__)) | 430 | if (check_vma(dax_dev, vmf->vma, __func__)) |
432 | return VM_FAULT_SIGBUS; | 431 | return VM_FAULT_SIGBUS; |
433 | 432 | ||
434 | dax_region = dax_dev->region; | 433 | dax_region = dax_dev->region; |
@@ -446,7 +445,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, | |||
446 | 445 | ||
447 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); | 446 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); |
448 | 447 | ||
449 | rc = vm_insert_mixed(vma, vmf->address, pfn); | 448 | rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); |
450 | 449 | ||
451 | if (rc == -ENOMEM) | 450 | if (rc == -ENOMEM) |
452 | return VM_FAULT_OOM; | 451 | return VM_FAULT_OOM; |
@@ -456,22 +455,6 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, | |||
456 | return VM_FAULT_NOPAGE; | 455 | return VM_FAULT_NOPAGE; |
457 | } | 456 | } |
458 | 457 | ||
459 | static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
460 | { | ||
461 | int rc; | ||
462 | struct file *filp = vma->vm_file; | ||
463 | struct dax_dev *dax_dev = filp->private_data; | ||
464 | |||
465 | dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__, | ||
466 | current->comm, (vmf->flags & FAULT_FLAG_WRITE) | ||
467 | ? "write" : "read", vma->vm_start, vma->vm_end); | ||
468 | rcu_read_lock(); | ||
469 | rc = __dax_dev_fault(dax_dev, vma, vmf); | ||
470 | rcu_read_unlock(); | ||
471 | |||
472 | return rc; | ||
473 | } | ||
474 | |||
475 | static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | 458 | static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) |
476 | { | 459 | { |
477 | unsigned long pmd_addr = vmf->address & PMD_MASK; | 460 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
@@ -510,7 +493,53 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
510 | vmf->flags & FAULT_FLAG_WRITE); | 493 | vmf->flags & FAULT_FLAG_WRITE); |
511 | } | 494 | } |
512 | 495 | ||
513 | static int dax_dev_pmd_fault(struct vm_fault *vmf) | 496 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
497 | static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | ||
498 | { | ||
499 | unsigned long pud_addr = vmf->address & PUD_MASK; | ||
500 | struct device *dev = &dax_dev->dev; | ||
501 | struct dax_region *dax_region; | ||
502 | phys_addr_t phys; | ||
503 | pgoff_t pgoff; | ||
504 | pfn_t pfn; | ||
505 | |||
506 | if (check_vma(dax_dev, vmf->vma, __func__)) | ||
507 | return VM_FAULT_SIGBUS; | ||
508 | |||
509 | dax_region = dax_dev->region; | ||
510 | if (dax_region->align > PUD_SIZE) { | ||
511 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | ||
512 | return VM_FAULT_SIGBUS; | ||
513 | } | ||
514 | |||
515 | /* dax pud mappings require pfn_t_devmap() */ | ||
516 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { | ||
517 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | ||
518 | return VM_FAULT_SIGBUS; | ||
519 | } | ||
520 | |||
521 | pgoff = linear_page_index(vmf->vma, pud_addr); | ||
522 | phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); | ||
523 | if (phys == -1) { | ||
524 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, | ||
525 | pgoff); | ||
526 | return VM_FAULT_SIGBUS; | ||
527 | } | ||
528 | |||
529 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); | ||
530 | |||
531 | return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, | ||
532 | vmf->flags & FAULT_FLAG_WRITE); | ||
533 | } | ||
534 | #else | ||
535 | static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | ||
536 | { | ||
537 | return VM_FAULT_FALLBACK; | ||
538 | } | ||
539 | #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | ||
540 | |||
541 | static int dax_dev_huge_fault(struct vm_fault *vmf, | ||
542 | enum page_entry_size pe_size) | ||
514 | { | 543 | { |
515 | int rc; | 544 | int rc; |
516 | struct file *filp = vmf->vma->vm_file; | 545 | struct file *filp = vmf->vma->vm_file; |
@@ -522,15 +551,32 @@ static int dax_dev_pmd_fault(struct vm_fault *vmf) | |||
522 | vmf->vma->vm_start, vmf->vma->vm_end); | 551 | vmf->vma->vm_start, vmf->vma->vm_end); |
523 | 552 | ||
524 | rcu_read_lock(); | 553 | rcu_read_lock(); |
525 | rc = __dax_dev_pmd_fault(dax_dev, vmf); | 554 | switch (pe_size) { |
555 | case PE_SIZE_PTE: | ||
556 | rc = __dax_dev_pte_fault(dax_dev, vmf); | ||
557 | break; | ||
558 | case PE_SIZE_PMD: | ||
559 | rc = __dax_dev_pmd_fault(dax_dev, vmf); | ||
560 | break; | ||
561 | case PE_SIZE_PUD: | ||
562 | rc = __dax_dev_pud_fault(dax_dev, vmf); | ||
563 | break; | ||
564 | default: | ||
565 | return VM_FAULT_FALLBACK; | ||
566 | } | ||
526 | rcu_read_unlock(); | 567 | rcu_read_unlock(); |
527 | 568 | ||
528 | return rc; | 569 | return rc; |
529 | } | 570 | } |
530 | 571 | ||
572 | static int dax_dev_fault(struct vm_fault *vmf) | ||
573 | { | ||
574 | return dax_dev_huge_fault(vmf, PE_SIZE_PTE); | ||
575 | } | ||
576 | |||
531 | static const struct vm_operations_struct dax_dev_vm_ops = { | 577 | static const struct vm_operations_struct dax_dev_vm_ops = { |
532 | .fault = dax_dev_fault, | 578 | .fault = dax_dev_fault, |
533 | .pmd_fault = dax_dev_pmd_fault, | 579 | .huge_fault = dax_dev_huge_fault, |
534 | }; | 580 | }; |
535 | 581 | ||
536 | static int dax_mmap(struct file *filp, struct vm_area_struct *vma) | 582 | static int dax_mmap(struct file *filp, struct vm_area_struct *vma) |
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 560d416deab2..1597458d884e 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c | |||
@@ -14,14 +14,15 @@ | |||
14 | #include <drm/armada_drm.h> | 14 | #include <drm/armada_drm.h> |
15 | #include "armada_ioctlP.h" | 15 | #include "armada_ioctlP.h" |
16 | 16 | ||
17 | static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 17 | static int armada_gem_vm_fault(struct vm_fault *vmf) |
18 | { | 18 | { |
19 | struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); | 19 | struct drm_gem_object *gobj = vmf->vma->vm_private_data; |
20 | struct armada_gem_object *obj = drm_to_armada_gem(gobj); | ||
20 | unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; | 21 | unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; |
21 | int ret; | 22 | int ret; |
22 | 23 | ||
23 | pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT; | 24 | pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT; |
24 | ret = vm_insert_pfn(vma, vmf->address, pfn); | 25 | ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); |
25 | 26 | ||
26 | switch (ret) { | 27 | switch (ret) { |
27 | case 0: | 28 | case 0: |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index bd311c77c254..1170b3209a12 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -96,8 +96,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) | |||
96 | * map, get the page, increment the use count and return it. | 96 | * map, get the page, increment the use count and return it. |
97 | */ | 97 | */ |
98 | #if IS_ENABLED(CONFIG_AGP) | 98 | #if IS_ENABLED(CONFIG_AGP) |
99 | static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 99 | static int drm_vm_fault(struct vm_fault *vmf) |
100 | { | 100 | { |
101 | struct vm_area_struct *vma = vmf->vma; | ||
101 | struct drm_file *priv = vma->vm_file->private_data; | 102 | struct drm_file *priv = vma->vm_file->private_data; |
102 | struct drm_device *dev = priv->minor->dev; | 103 | struct drm_device *dev = priv->minor->dev; |
103 | struct drm_local_map *map = NULL; | 104 | struct drm_local_map *map = NULL; |
@@ -168,7 +169,7 @@ vm_fault_error: | |||
168 | return VM_FAULT_SIGBUS; /* Disallow mremap */ | 169 | return VM_FAULT_SIGBUS; /* Disallow mremap */ |
169 | } | 170 | } |
170 | #else | 171 | #else |
171 | static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 172 | static int drm_vm_fault(struct vm_fault *vmf) |
172 | { | 173 | { |
173 | return VM_FAULT_SIGBUS; | 174 | return VM_FAULT_SIGBUS; |
174 | } | 175 | } |
@@ -184,8 +185,9 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
184 | * Get the mapping, find the real physical page to map, get the page, and | 185 | * Get the mapping, find the real physical page to map, get the page, and |
185 | * return it. | 186 | * return it. |
186 | */ | 187 | */ |
187 | static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 188 | static int drm_vm_shm_fault(struct vm_fault *vmf) |
188 | { | 189 | { |
190 | struct vm_area_struct *vma = vmf->vma; | ||
189 | struct drm_local_map *map = vma->vm_private_data; | 191 | struct drm_local_map *map = vma->vm_private_data; |
190 | unsigned long offset; | 192 | unsigned long offset; |
191 | unsigned long i; | 193 | unsigned long i; |
@@ -280,14 +282,14 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
280 | /** | 282 | /** |
281 | * \c fault method for DMA virtual memory. | 283 | * \c fault method for DMA virtual memory. |
282 | * | 284 | * |
283 | * \param vma virtual memory area. | ||
284 | * \param address access address. | 285 | * \param address access address. |
285 | * \return pointer to the page structure. | 286 | * \return pointer to the page structure. |
286 | * | 287 | * |
287 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. | 288 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. |
288 | */ | 289 | */ |
289 | static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 290 | static int drm_vm_dma_fault(struct vm_fault *vmf) |
290 | { | 291 | { |
292 | struct vm_area_struct *vma = vmf->vma; | ||
291 | struct drm_file *priv = vma->vm_file->private_data; | 293 | struct drm_file *priv = vma->vm_file->private_data; |
292 | struct drm_device *dev = priv->minor->dev; | 294 | struct drm_device *dev = priv->minor->dev; |
293 | struct drm_device_dma *dma = dev->dma; | 295 | struct drm_device_dma *dma = dev->dma; |
@@ -315,14 +317,14 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
315 | /** | 317 | /** |
316 | * \c fault method for scatter-gather virtual memory. | 318 | * \c fault method for scatter-gather virtual memory. |
317 | * | 319 | * |
318 | * \param vma virtual memory area. | ||
319 | * \param address access address. | 320 | * \param address access address. |
320 | * \return pointer to the page structure. | 321 | * \return pointer to the page structure. |
321 | * | 322 | * |
322 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. | 323 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. |
323 | */ | 324 | */ |
324 | static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 325 | static int drm_vm_sg_fault(struct vm_fault *vmf) |
325 | { | 326 | { |
327 | struct vm_area_struct *vma = vmf->vma; | ||
326 | struct drm_local_map *map = vma->vm_private_data; | 328 | struct drm_local_map *map = vma->vm_private_data; |
327 | struct drm_file *priv = vma->vm_file->private_data; | 329 | struct drm_file *priv = vma->vm_file->private_data; |
328 | struct drm_device *dev = priv->minor->dev; | 330 | struct drm_device *dev = priv->minor->dev; |
@@ -347,26 +349,6 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
347 | return 0; | 349 | return 0; |
348 | } | 350 | } |
349 | 351 | ||
350 | static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
351 | { | ||
352 | return drm_do_vm_fault(vma, vmf); | ||
353 | } | ||
354 | |||
355 | static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
356 | { | ||
357 | return drm_do_vm_shm_fault(vma, vmf); | ||
358 | } | ||
359 | |||
360 | static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
361 | { | ||
362 | return drm_do_vm_dma_fault(vma, vmf); | ||
363 | } | ||
364 | |||
365 | static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
366 | { | ||
367 | return drm_do_vm_sg_fault(vma, vmf); | ||
368 | } | ||
369 | |||
370 | /** AGP virtual memory operations */ | 352 | /** AGP virtual memory operations */ |
371 | static const struct vm_operations_struct drm_vm_ops = { | 353 | static const struct vm_operations_struct drm_vm_ops = { |
372 | .fault = drm_vm_fault, | 354 | .fault = drm_vm_fault, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index c255eda40526..e41f38667c1c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h | |||
@@ -73,7 +73,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
73 | struct drm_file *file); | 73 | struct drm_file *file); |
74 | 74 | ||
75 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 75 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
76 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 76 | int etnaviv_gem_fault(struct vm_fault *vmf); |
77 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); | 77 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); |
78 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); | 78 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); |
79 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); | 79 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index aa6e35ddc87f..e78f1406885d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -175,8 +175,9 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
175 | return obj->ops->mmap(obj, vma); | 175 | return obj->ops->mmap(obj, vma); |
176 | } | 176 | } |
177 | 177 | ||
178 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 178 | int etnaviv_gem_fault(struct vm_fault *vmf) |
179 | { | 179 | { |
180 | struct vm_area_struct *vma = vmf->vma; | ||
180 | struct drm_gem_object *obj = vma->vm_private_data; | 181 | struct drm_gem_object *obj = vma->vm_private_data; |
181 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 182 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
182 | struct page **pages, *page; | 183 | struct page **pages, *page; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 57b81460fec8..4c28f7ffcc4d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -447,8 +447,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
447 | return ret; | 447 | return ret; |
448 | } | 448 | } |
449 | 449 | ||
450 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 450 | int exynos_drm_gem_fault(struct vm_fault *vmf) |
451 | { | 451 | { |
452 | struct vm_area_struct *vma = vmf->vma; | ||
452 | struct drm_gem_object *obj = vma->vm_private_data; | 453 | struct drm_gem_object *obj = vma->vm_private_data; |
453 | struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); | 454 | struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); |
454 | unsigned long pfn; | 455 | unsigned long pfn; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index df7c543d6558..85457255fcd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -116,7 +116,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
116 | uint64_t *offset); | 116 | uint64_t *offset); |
117 | 117 | ||
118 | /* page fault handler and mmap fault address(virtual) to physical memory. */ | 118 | /* page fault handler and mmap fault address(virtual) to physical memory. */ |
119 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 119 | int exynos_drm_gem_fault(struct vm_fault *vmf); |
120 | 120 | ||
121 | /* set vm_flags and we can change the vm attribute to other one at here. */ | 121 | /* set vm_flags and we can change the vm attribute to other one at here. */ |
122 | int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 122 | int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index da42d2e1d397..ffe6b4ffa1a8 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c | |||
@@ -111,8 +111,9 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 114 | static int psbfb_vm_fault(struct vm_fault *vmf) |
115 | { | 115 | { |
116 | struct vm_area_struct *vma = vmf->vma; | ||
116 | struct psb_framebuffer *psbfb = vma->vm_private_data; | 117 | struct psb_framebuffer *psbfb = vma->vm_private_data; |
117 | struct drm_device *dev = psbfb->base.dev; | 118 | struct drm_device *dev = psbfb->base.dev; |
118 | struct drm_psb_private *dev_priv = dev->dev_private; | 119 | struct drm_psb_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 527c62917660..7da061aab729 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c | |||
@@ -164,8 +164,9 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
164 | * vma->vm_private_data points to the GEM object that is backing this | 164 | * vma->vm_private_data points to the GEM object that is backing this |
165 | * mapping. | 165 | * mapping. |
166 | */ | 166 | */ |
167 | int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 167 | int psb_gem_fault(struct vm_fault *vmf) |
168 | { | 168 | { |
169 | struct vm_area_struct *vma = vmf->vma; | ||
169 | struct drm_gem_object *obj; | 170 | struct drm_gem_object *obj; |
170 | struct gtt_range *r; | 171 | struct gtt_range *r; |
171 | int ret; | 172 | int ret; |
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 05d7aaf47eea..83e22fd4cfc0 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h | |||
@@ -752,7 +752,7 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
752 | struct drm_mode_create_dumb *args); | 752 | struct drm_mode_create_dumb *args); |
753 | extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, | 753 | extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, |
754 | uint32_t handle, uint64_t *offset); | 754 | uint32_t handle, uint64_t *offset); |
755 | extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 755 | extern int psb_gem_fault(struct vm_fault *vmf); |
756 | 756 | ||
757 | /* psb_device.c */ | 757 | /* psb_device.c */ |
758 | extern const struct psb_ops psb_chip_ops; | 758 | extern const struct psb_ops psb_chip_ops; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bcc81912b5e5..0a4b42d31391 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -3352,7 +3352,7 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, | |||
3352 | unsigned int flags); | 3352 | unsigned int flags); |
3353 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); | 3353 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); |
3354 | void i915_gem_resume(struct drm_i915_private *dev_priv); | 3354 | void i915_gem_resume(struct drm_i915_private *dev_priv); |
3355 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 3355 | int i915_gem_fault(struct vm_fault *vmf); |
3356 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, | 3356 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, |
3357 | unsigned int flags, | 3357 | unsigned int flags, |
3358 | long timeout, | 3358 | long timeout, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 88f3628b4e29..6908123162d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1772,7 +1772,6 @@ compute_partial_view(struct drm_i915_gem_object *obj, | |||
1772 | 1772 | ||
1773 | /** | 1773 | /** |
1774 | * i915_gem_fault - fault a page into the GTT | 1774 | * i915_gem_fault - fault a page into the GTT |
1775 | * @area: CPU VMA in question | ||
1776 | * @vmf: fault info | 1775 | * @vmf: fault info |
1777 | * | 1776 | * |
1778 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | 1777 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped |
@@ -1789,9 +1788,10 @@ compute_partial_view(struct drm_i915_gem_object *obj, | |||
1789 | * The current feature set supported by i915_gem_fault() and thus GTT mmaps | 1788 | * The current feature set supported by i915_gem_fault() and thus GTT mmaps |
1790 | * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). | 1789 | * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). |
1791 | */ | 1790 | */ |
1792 | int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) | 1791 | int i915_gem_fault(struct vm_fault *vmf) |
1793 | { | 1792 | { |
1794 | #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ | 1793 | #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ |
1794 | struct vm_area_struct *area = vmf->vma; | ||
1795 | struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); | 1795 | struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); |
1796 | struct drm_device *dev = obj->base.dev; | 1796 | struct drm_device *dev = obj->base.dev; |
1797 | struct drm_i915_private *dev_priv = to_i915(dev); | 1797 | struct drm_i915_private *dev_priv = to_i915(dev); |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index cdd7b2f8e977..c3b14876edaa 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -206,7 +206,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); | |||
206 | int msm_gem_mmap_obj(struct drm_gem_object *obj, | 206 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
207 | struct vm_area_struct *vma); | 207 | struct vm_area_struct *vma); |
208 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 208 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
209 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 209 | int msm_gem_fault(struct vm_fault *vmf); |
210 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); | 210 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
211 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | 211 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, |
212 | uint64_t *iova); | 212 | uint64_t *iova); |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index e140b05af134..59811f29607d 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -191,8 +191,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
191 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | 191 | return msm_gem_mmap_obj(vma->vm_private_data, vma); |
192 | } | 192 | } |
193 | 193 | ||
194 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 194 | int msm_gem_fault(struct vm_fault *vmf) |
195 | { | 195 | { |
196 | struct vm_area_struct *vma = vmf->vma; | ||
196 | struct drm_gem_object *obj = vma->vm_private_data; | 197 | struct drm_gem_object *obj = vma->vm_private_data; |
197 | struct drm_device *dev = obj->dev; | 198 | struct drm_device *dev = obj->dev; |
198 | struct msm_drm_private *priv = dev->dev_private; | 199 | struct msm_drm_private *priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 36d93ce84a29..65977982f15f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h | |||
@@ -188,7 +188,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
188 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 188 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
189 | int omap_gem_mmap_obj(struct drm_gem_object *obj, | 189 | int omap_gem_mmap_obj(struct drm_gem_object *obj, |
190 | struct vm_area_struct *vma); | 190 | struct vm_area_struct *vma); |
191 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 191 | int omap_gem_fault(struct vm_fault *vmf); |
192 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op); | 192 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op); |
193 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op); | 193 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op); |
194 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op); | 194 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op); |
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 74a9968df421..5d5a9f517c30 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -518,7 +518,6 @@ static int fault_2d(struct drm_gem_object *obj, | |||
518 | 518 | ||
519 | /** | 519 | /** |
520 | * omap_gem_fault - pagefault handler for GEM objects | 520 | * omap_gem_fault - pagefault handler for GEM objects |
521 | * @vma: the VMA of the GEM object | ||
522 | * @vmf: fault detail | 521 | * @vmf: fault detail |
523 | * | 522 | * |
524 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM | 523 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM |
@@ -529,8 +528,9 @@ static int fault_2d(struct drm_gem_object *obj, | |||
529 | * vma->vm_private_data points to the GEM object that is backing this | 528 | * vma->vm_private_data points to the GEM object that is backing this |
530 | * mapping. | 529 | * mapping. |
531 | */ | 530 | */ |
532 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 531 | int omap_gem_fault(struct vm_fault *vmf) |
533 | { | 532 | { |
533 | struct vm_area_struct *vma = vmf->vma; | ||
534 | struct drm_gem_object *obj = vma->vm_private_data; | 534 | struct drm_gem_object *obj = vma->vm_private_data; |
535 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 535 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
536 | struct drm_device *dev = obj->dev; | 536 | struct drm_device *dev = obj->dev; |
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 4e1a40389964..7d1cab57c89e 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -105,15 +105,15 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev) | |||
105 | static struct vm_operations_struct qxl_ttm_vm_ops; | 105 | static struct vm_operations_struct qxl_ttm_vm_ops; |
106 | static const struct vm_operations_struct *ttm_vm_ops; | 106 | static const struct vm_operations_struct *ttm_vm_ops; |
107 | 107 | ||
108 | static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 108 | static int qxl_ttm_fault(struct vm_fault *vmf) |
109 | { | 109 | { |
110 | struct ttm_buffer_object *bo; | 110 | struct ttm_buffer_object *bo; |
111 | int r; | 111 | int r; |
112 | 112 | ||
113 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | 113 | bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; |
114 | if (bo == NULL) | 114 | if (bo == NULL) |
115 | return VM_FAULT_NOPAGE; | 115 | return VM_FAULT_NOPAGE; |
116 | r = ttm_vm_ops->fault(vma, vmf); | 116 | r = ttm_vm_ops->fault(vmf); |
117 | return r; | 117 | return r; |
118 | } | 118 | } |
119 | 119 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 7a10b3852970..684f1703aa5c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -979,19 +979,19 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |||
979 | static struct vm_operations_struct radeon_ttm_vm_ops; | 979 | static struct vm_operations_struct radeon_ttm_vm_ops; |
980 | static const struct vm_operations_struct *ttm_vm_ops = NULL; | 980 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
981 | 981 | ||
982 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 982 | static int radeon_ttm_fault(struct vm_fault *vmf) |
983 | { | 983 | { |
984 | struct ttm_buffer_object *bo; | 984 | struct ttm_buffer_object *bo; |
985 | struct radeon_device *rdev; | 985 | struct radeon_device *rdev; |
986 | int r; | 986 | int r; |
987 | 987 | ||
988 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | 988 | bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; |
989 | if (bo == NULL) { | 989 | if (bo == NULL) { |
990 | return VM_FAULT_NOPAGE; | 990 | return VM_FAULT_NOPAGE; |
991 | } | 991 | } |
992 | rdev = radeon_get_rdev(bo->bdev); | 992 | rdev = radeon_get_rdev(bo->bdev); |
993 | down_read(&rdev->pm.mclk_lock); | 993 | down_read(&rdev->pm.mclk_lock); |
994 | r = ttm_vm_ops->fault(vma, vmf); | 994 | r = ttm_vm_ops->fault(vmf); |
995 | up_read(&rdev->pm.mclk_lock); | 995 | up_read(&rdev->pm.mclk_lock); |
996 | return r; | 996 | return r; |
997 | } | 997 | } |
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b523a5d4a38c..17e62ecb5d4d 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c | |||
@@ -441,8 +441,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, | |||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | 443 | ||
444 | static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 444 | static int tegra_bo_fault(struct vm_fault *vmf) |
445 | { | 445 | { |
446 | struct vm_area_struct *vma = vmf->vma; | ||
446 | struct drm_gem_object *gem = vma->vm_private_data; | 447 | struct drm_gem_object *gem = vma->vm_private_data; |
447 | struct tegra_bo *bo = to_tegra_bo(gem); | 448 | struct tegra_bo *bo = to_tegra_bo(gem); |
448 | struct page *page; | 449 | struct page *page; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 88169141bef5..35ffb3754feb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #define TTM_BO_VM_NUM_PREFAULT 16 | 43 | #define TTM_BO_VM_NUM_PREFAULT 16 |
44 | 44 | ||
45 | static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | 45 | static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
46 | struct vm_area_struct *vma, | ||
47 | struct vm_fault *vmf) | 46 | struct vm_fault *vmf) |
48 | { | 47 | { |
49 | int ret = 0; | 48 | int ret = 0; |
@@ -67,7 +66,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
67 | goto out_unlock; | 66 | goto out_unlock; |
68 | 67 | ||
69 | ttm_bo_reference(bo); | 68 | ttm_bo_reference(bo); |
70 | up_read(&vma->vm_mm->mmap_sem); | 69 | up_read(&vmf->vma->vm_mm->mmap_sem); |
71 | (void) dma_fence_wait(bo->moving, true); | 70 | (void) dma_fence_wait(bo->moving, true); |
72 | ttm_bo_unreserve(bo); | 71 | ttm_bo_unreserve(bo); |
73 | ttm_bo_unref(&bo); | 72 | ttm_bo_unref(&bo); |
@@ -92,8 +91,9 @@ out_unlock: | |||
92 | return ret; | 91 | return ret; |
93 | } | 92 | } |
94 | 93 | ||
95 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 94 | static int ttm_bo_vm_fault(struct vm_fault *vmf) |
96 | { | 95 | { |
96 | struct vm_area_struct *vma = vmf->vma; | ||
97 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | 97 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
98 | vma->vm_private_data; | 98 | vma->vm_private_data; |
99 | struct ttm_bo_device *bdev = bo->bdev; | 99 | struct ttm_bo_device *bdev = bo->bdev; |
@@ -124,7 +124,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
124 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | 124 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
125 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | 125 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
126 | ttm_bo_reference(bo); | 126 | ttm_bo_reference(bo); |
127 | up_read(&vma->vm_mm->mmap_sem); | 127 | up_read(&vmf->vma->vm_mm->mmap_sem); |
128 | (void) ttm_bo_wait_unreserved(bo); | 128 | (void) ttm_bo_wait_unreserved(bo); |
129 | ttm_bo_unref(&bo); | 129 | ttm_bo_unref(&bo); |
130 | } | 130 | } |
@@ -168,7 +168,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
168 | * Wait for buffer data in transit, due to a pipelined | 168 | * Wait for buffer data in transit, due to a pipelined |
169 | * move. | 169 | * move. |
170 | */ | 170 | */ |
171 | ret = ttm_bo_vm_fault_idle(bo, vma, vmf); | 171 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
172 | if (unlikely(ret != 0)) { | 172 | if (unlikely(ret != 0)) { |
173 | retval = ret; | 173 | retval = ret; |
174 | 174 | ||
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 6c4286e57362..2a75ab80527a 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h | |||
@@ -134,7 +134,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj); | |||
134 | int udl_gem_vmap(struct udl_gem_object *obj); | 134 | int udl_gem_vmap(struct udl_gem_object *obj); |
135 | void udl_gem_vunmap(struct udl_gem_object *obj); | 135 | void udl_gem_vunmap(struct udl_gem_object *obj); |
136 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 136 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
137 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 137 | int udl_gem_fault(struct vm_fault *vmf); |
138 | 138 | ||
139 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | 139 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, |
140 | int width, int height); | 140 | int width, int height); |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 3c0c4bd3f750..775c50e4f02c 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -100,8 +100,9 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
100 | return ret; | 100 | return ret; |
101 | } | 101 | } |
102 | 102 | ||
103 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 103 | int udl_gem_fault(struct vm_fault *vmf) |
104 | { | 104 | { |
105 | struct vm_area_struct *vma = vmf->vma; | ||
105 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); | 106 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); |
106 | struct page *page; | 107 | struct page *page; |
107 | unsigned int page_offset; | 108 | unsigned int page_offset; |
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 477e07f0ecb6..7ccbb03e98de 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c | |||
@@ -50,8 +50,9 @@ static void vgem_gem_free_object(struct drm_gem_object *obj) | |||
50 | kfree(vgem_obj); | 50 | kfree(vgem_obj); |
51 | } | 51 | } |
52 | 52 | ||
53 | static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 53 | static int vgem_gem_fault(struct vm_fault *vmf) |
54 | { | 54 | { |
55 | struct vm_area_struct *vma = vmf->vma; | ||
55 | struct drm_vgem_gem_object *obj = vma->vm_private_data; | 56 | struct drm_vgem_gem_object *obj = vma->vm_private_data; |
56 | /* We don't use vmf->pgoff since that has the fake offset */ | 57 | /* We don't use vmf->pgoff since that has the fake offset */ |
57 | unsigned long vaddr = vmf->address; | 58 | unsigned long vaddr = vmf->address; |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 9cc7079f7aca..70ec8ca8d9b1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c | |||
@@ -114,18 +114,17 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev) | |||
114 | static struct vm_operations_struct virtio_gpu_ttm_vm_ops; | 114 | static struct vm_operations_struct virtio_gpu_ttm_vm_ops; |
115 | static const struct vm_operations_struct *ttm_vm_ops; | 115 | static const struct vm_operations_struct *ttm_vm_ops; |
116 | 116 | ||
117 | static int virtio_gpu_ttm_fault(struct vm_area_struct *vma, | 117 | static int virtio_gpu_ttm_fault(struct vm_fault *vmf) |
118 | struct vm_fault *vmf) | ||
119 | { | 118 | { |
120 | struct ttm_buffer_object *bo; | 119 | struct ttm_buffer_object *bo; |
121 | struct virtio_gpu_device *vgdev; | 120 | struct virtio_gpu_device *vgdev; |
122 | int r; | 121 | int r; |
123 | 122 | ||
124 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | 123 | bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; |
125 | if (bo == NULL) | 124 | if (bo == NULL) |
126 | return VM_FAULT_NOPAGE; | 125 | return VM_FAULT_NOPAGE; |
127 | vgdev = virtio_gpu_get_vgdev(bo->bdev); | 126 | vgdev = virtio_gpu_get_vgdev(bo->bdev); |
128 | r = ttm_vm_ops->fault(vma, vmf); | 127 | r = ttm_vm_ops->fault(vmf); |
129 | return r; | 128 | return r; |
130 | } | 129 | } |
131 | #endif | 130 | #endif |
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 3deef6cc7d7c..7175e6bedf21 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c | |||
@@ -1098,9 +1098,9 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi) | |||
1098 | kfree(hi); | 1098 | kfree(hi); |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1101 | static int cs_char_vma_fault(struct vm_fault *vmf) |
1102 | { | 1102 | { |
1103 | struct cs_char *csdata = vma->vm_private_data; | 1103 | struct cs_char *csdata = vmf->vma->vm_private_data; |
1104 | struct page *page; | 1104 | struct page *page; |
1105 | 1105 | ||
1106 | page = virt_to_page(csdata->mmap_base); | 1106 | page = virt_to_page(csdata->mmap_base); |
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index e8d55a153a65..e88afe1a435c 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c | |||
@@ -1188,9 +1188,9 @@ static void msc_mmap_close(struct vm_area_struct *vma) | |||
1188 | mutex_unlock(&msc->buf_mutex); | 1188 | mutex_unlock(&msc->buf_mutex); |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1191 | static int msc_mmap_fault(struct vm_fault *vmf) |
1192 | { | 1192 | { |
1193 | struct msc_iter *iter = vma->vm_file->private_data; | 1193 | struct msc_iter *iter = vmf->vma->vm_file->private_data; |
1194 | struct msc *msc = iter->msc; | 1194 | struct msc *msc = iter->msc; |
1195 | 1195 | ||
1196 | vmf->page = msc_buffer_get_page(msc, vmf->pgoff); | 1196 | vmf->page = msc_buffer_get_page(msc, vmf->pgoff); |
@@ -1198,7 +1198,7 @@ static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1198 | return VM_FAULT_SIGBUS; | 1198 | return VM_FAULT_SIGBUS; |
1199 | 1199 | ||
1200 | get_page(vmf->page); | 1200 | get_page(vmf->page); |
1201 | vmf->page->mapping = vma->vm_file->f_mapping; | 1201 | vmf->page->mapping = vmf->vma->vm_file->f_mapping; |
1202 | vmf->page->index = vmf->pgoff; | 1202 | vmf->page->index = vmf->pgoff; |
1203 | 1203 | ||
1204 | return 0; | 1204 | return 0; |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index bd786b7bd30b..f46033984d07 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -92,7 +92,7 @@ static unsigned int poll_next(struct file *, struct poll_table_struct *); | |||
92 | static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); | 92 | static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); |
93 | static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); | 93 | static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); |
94 | static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); | 94 | static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); |
95 | static int vma_fault(struct vm_area_struct *, struct vm_fault *); | 95 | static int vma_fault(struct vm_fault *); |
96 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | 96 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
97 | unsigned long arg); | 97 | unsigned long arg); |
98 | 98 | ||
@@ -695,7 +695,7 @@ done: | |||
695 | * Local (non-chip) user memory is not mapped right away but as it is | 695 | * Local (non-chip) user memory is not mapped right away but as it is |
696 | * accessed by the user-level code. | 696 | * accessed by the user-level code. |
697 | */ | 697 | */ |
698 | static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 698 | static int vma_fault(struct vm_fault *vmf) |
699 | { | 699 | { |
700 | struct page *page; | 700 | struct page *page; |
701 | 701 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 2d1eacf1dfed..9396c1807cc3 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -893,7 +893,7 @@ bail: | |||
893 | /* | 893 | /* |
894 | * qib_file_vma_fault - handle a VMA page fault. | 894 | * qib_file_vma_fault - handle a VMA page fault. |
895 | */ | 895 | */ |
896 | static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 896 | static int qib_file_vma_fault(struct vm_fault *vmf) |
897 | { | 897 | { |
898 | struct page *page; | 898 | struct page *page; |
899 | 899 | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1b5b8c5361c5..09bd3b290bb8 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2672,7 +2672,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2672 | return NULL; | 2672 | return NULL; |
2673 | 2673 | ||
2674 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 2674 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
2675 | get_order(size)); | 2675 | get_order(size), flag); |
2676 | if (!page) | 2676 | if (!page) |
2677 | return NULL; | 2677 | return NULL; |
2678 | } | 2678 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f5e02f8e7371..a8f7ae0eb7a4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3829,7 +3829,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, | |||
3829 | if (gfpflags_allow_blocking(flags)) { | 3829 | if (gfpflags_allow_blocking(flags)) { |
3830 | unsigned int count = size >> PAGE_SHIFT; | 3830 | unsigned int count = size >> PAGE_SHIFT; |
3831 | 3831 | ||
3832 | page = dma_alloc_from_contiguous(dev, count, order); | 3832 | page = dma_alloc_from_contiguous(dev, count, order, flags); |
3833 | if (page && iommu_no_mapping(dev) && | 3833 | if (page && iommu_no_mapping(dev) && |
3834 | page_to_phys(page) + size > dev->coherent_dma_mask) { | 3834 | page_to_phys(page) + size > dev->coherent_dma_mask) { |
3835 | dma_release_from_contiguous(dev, page, count); | 3835 | dma_release_from_contiguous(dev, page, count); |
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index ba63ca57ed7e..36bd904946bd 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c | |||
@@ -434,8 +434,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma) | |||
434 | * now ...). Bounce buffers don't work very well for the data rates | 434 | * now ...). Bounce buffers don't work very well for the data rates |
435 | * video capture has. | 435 | * video capture has. |
436 | */ | 436 | */ |
437 | static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 437 | static int videobuf_vm_fault(struct vm_fault *vmf) |
438 | { | 438 | { |
439 | struct vm_area_struct *vma = vmf->vma; | ||
439 | struct page *page; | 440 | struct page *page; |
440 | 441 | ||
441 | dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", | 442 | dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 3907387b6d15..062bf6ca2625 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -121,8 +121,9 @@ void cxl_context_set_mapping(struct cxl_context *ctx, | |||
121 | mutex_unlock(&ctx->mapping_lock); | 121 | mutex_unlock(&ctx->mapping_lock); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 124 | static int cxl_mmap_fault(struct vm_fault *vmf) |
125 | { | 125 | { |
126 | struct vm_area_struct *vma = vmf->vma; | ||
126 | struct cxl_context *ctx = vma->vm_file->private_data; | 127 | struct cxl_context *ctx = vma->vm_file->private_data; |
127 | u64 area, offset; | 128 | u64 area, offset; |
128 | 129 | ||
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index af2e077da4b8..3641f1334cf0 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c | |||
@@ -926,8 +926,9 @@ again: | |||
926 | * | 926 | * |
927 | * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. | 927 | * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. |
928 | */ | 928 | */ |
929 | int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 929 | int gru_fault(struct vm_fault *vmf) |
930 | { | 930 | { |
931 | struct vm_area_struct *vma = vmf->vma; | ||
931 | struct gru_thread_state *gts; | 932 | struct gru_thread_state *gts; |
932 | unsigned long paddr, vaddr; | 933 | unsigned long paddr, vaddr; |
933 | unsigned long expires; | 934 | unsigned long expires; |
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5c3ce2459675..b5e308b50ed1 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h | |||
@@ -665,7 +665,7 @@ extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, | |||
665 | int cbr_au_count, char *cbmap); | 665 | int cbr_au_count, char *cbmap); |
666 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, | 666 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, |
667 | int dsr_au_count, char *dsmap); | 667 | int dsr_au_count, char *dsmap); |
668 | extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); | 668 | extern int gru_fault(struct vm_fault *vmf); |
669 | extern struct gru_mm_struct *gru_register_mmu_notifier(void); | 669 | extern struct gru_mm_struct *gru_register_mmu_notifier(void); |
670 | extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); | 670 | extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); |
671 | 671 | ||
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 90869cee2b20..ef5bf55f08a4 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c | |||
@@ -1053,7 +1053,6 @@ out: | |||
1053 | 1053 | ||
1054 | /** | 1054 | /** |
1055 | * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor | 1055 | * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor |
1056 | * @vma: VM area associated with mapping. | ||
1057 | * @vmf: VM fault associated with current fault. | 1056 | * @vmf: VM fault associated with current fault. |
1058 | * | 1057 | * |
1059 | * To support error notification via MMIO, faults are 'caught' by this routine | 1058 | * To support error notification via MMIO, faults are 'caught' by this routine |
@@ -1067,8 +1066,9 @@ out: | |||
1067 | * | 1066 | * |
1068 | * Return: 0 on success, VM_FAULT_SIGBUS on failure | 1067 | * Return: 0 on success, VM_FAULT_SIGBUS on failure |
1069 | */ | 1068 | */ |
1070 | static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1069 | static int cxlflash_mmap_fault(struct vm_fault *vmf) |
1071 | { | 1070 | { |
1071 | struct vm_area_struct *vma = vmf->vma; | ||
1072 | struct file *file = vma->vm_file; | 1072 | struct file *file = vma->vm_file; |
1073 | struct cxl_context *ctx = cxl_fops_get_context(file); | 1073 | struct cxl_context *ctx = cxl_fops_get_context(file); |
1074 | struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, | 1074 | struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, |
@@ -1097,7 +1097,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1097 | 1097 | ||
1098 | if (likely(!ctxi->err_recovery_active)) { | 1098 | if (likely(!ctxi->err_recovery_active)) { |
1099 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 1099 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1100 | rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); | 1100 | rc = ctxi->cxl_mmap_vmops->fault(vmf); |
1101 | } else { | 1101 | } else { |
1102 | dev_dbg(dev, "%s: err recovery active, use err_page\n", | 1102 | dev_dbg(dev, "%s: err recovery active, use err_page\n", |
1103 | __func__); | 1103 | __func__); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index e831e01f9fa6..29b86505f796 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1185,8 +1185,9 @@ sg_fasync(int fd, struct file *filp, int mode) | |||
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static int | 1187 | static int |
1188 | sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1188 | sg_vma_fault(struct vm_fault *vmf) |
1189 | { | 1189 | { |
1190 | struct vm_area_struct *vma = vmf->vma; | ||
1190 | Sg_fd *sfp; | 1191 | Sg_fd *sfp; |
1191 | unsigned long offset, len, sa; | 1192 | unsigned long offset, len, sa; |
1192 | Sg_scatter_hold *rsv_schp; | 1193 | Sg_scatter_hold *rsv_schp; |
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 969600779e44..2c3ffbcbd621 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
@@ -870,9 +870,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | |||
870 | mutex_unlock(&buffer->lock); | 870 | mutex_unlock(&buffer->lock); |
871 | } | 871 | } |
872 | 872 | ||
873 | static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 873 | static int ion_vm_fault(struct vm_fault *vmf) |
874 | { | 874 | { |
875 | struct ion_buffer *buffer = vma->vm_private_data; | 875 | struct ion_buffer *buffer = vmf->vma->vm_private_data; |
876 | unsigned long pfn; | 876 | unsigned long pfn; |
877 | int ret; | 877 | int ret; |
878 | 878 | ||
@@ -881,7 +881,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
881 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); | 881 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); |
882 | 882 | ||
883 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); | 883 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); |
884 | ret = vm_insert_pfn(vma, vmf->address, pfn); | 884 | ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); |
885 | mutex_unlock(&buffer->lock); | 885 | mutex_unlock(&buffer->lock); |
886 | if (ret) | 886 | if (ret) |
887 | return VM_FAULT_ERROR; | 887 | return VM_FAULT_ERROR; |
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 9afa6bec3e6f..896196c74cd2 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c | |||
@@ -321,7 +321,7 @@ out: | |||
321 | return fault_ret; | 321 | return fault_ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 324 | static int ll_fault(struct vm_fault *vmf) |
325 | { | 325 | { |
326 | int count = 0; | 326 | int count = 0; |
327 | bool printed = false; | 327 | bool printed = false; |
@@ -335,7 +335,7 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
335 | set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); | 335 | set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); |
336 | 336 | ||
337 | restart: | 337 | restart: |
338 | result = ll_fault0(vma, vmf); | 338 | result = ll_fault0(vmf->vma, vmf); |
339 | LASSERT(!(result & VM_FAULT_LOCKED)); | 339 | LASSERT(!(result & VM_FAULT_LOCKED)); |
340 | if (result == 0) { | 340 | if (result == 0) { |
341 | struct page *vmpage = vmf->page; | 341 | struct page *vmpage = vmf->page; |
@@ -362,8 +362,9 @@ restart: | |||
362 | return result; | 362 | return result; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 365 | static int ll_page_mkwrite(struct vm_fault *vmf) |
366 | { | 366 | { |
367 | struct vm_area_struct *vma = vmf->vma; | ||
367 | int count = 0; | 368 | int count = 0; |
368 | bool printed = false; | 369 | bool printed = false; |
369 | bool retry; | 370 | bool retry; |
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 3e9cf710501b..4c57755e06e7 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c | |||
@@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |||
1014 | { | 1014 | { |
1015 | struct vm_fault *vmf = cfio->ft_vmf; | 1015 | struct vm_fault *vmf = cfio->ft_vmf; |
1016 | 1016 | ||
1017 | cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf); | 1017 | cfio->ft_flags = filemap_fault(vmf); |
1018 | cfio->ft_flags_valid = 1; | 1018 | cfio->ft_flags_valid = 1; |
1019 | 1019 | ||
1020 | if (vmf->page) { | 1020 | if (vmf->page) { |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8041710b6972..5c1cb2df3a54 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -783,15 +783,15 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma) | |||
783 | return -1; | 783 | return -1; |
784 | } | 784 | } |
785 | 785 | ||
786 | static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 786 | static int tcmu_vma_fault(struct vm_fault *vmf) |
787 | { | 787 | { |
788 | struct tcmu_dev *udev = vma->vm_private_data; | 788 | struct tcmu_dev *udev = vmf->vma->vm_private_data; |
789 | struct uio_info *info = &udev->uio_info; | 789 | struct uio_info *info = &udev->uio_info; |
790 | struct page *page; | 790 | struct page *page; |
791 | unsigned long offset; | 791 | unsigned long offset; |
792 | void *addr; | 792 | void *addr; |
793 | 793 | ||
794 | int mi = tcmu_find_mem_index(vma); | 794 | int mi = tcmu_find_mem_index(vmf->vma); |
795 | if (mi < 0) | 795 | if (mi < 0) |
796 | return VM_FAULT_SIGBUS; | 796 | return VM_FAULT_SIGBUS; |
797 | 797 | ||
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index fba021f5736a..31d95dc9c202 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -597,14 +597,14 @@ static int uio_find_mem_index(struct vm_area_struct *vma) | |||
597 | return -1; | 597 | return -1; |
598 | } | 598 | } |
599 | 599 | ||
600 | static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 600 | static int uio_vma_fault(struct vm_fault *vmf) |
601 | { | 601 | { |
602 | struct uio_device *idev = vma->vm_private_data; | 602 | struct uio_device *idev = vmf->vma->vm_private_data; |
603 | struct page *page; | 603 | struct page *page; |
604 | unsigned long offset; | 604 | unsigned long offset; |
605 | void *addr; | 605 | void *addr; |
606 | 606 | ||
607 | int mi = uio_find_mem_index(vma); | 607 | int mi = uio_find_mem_index(vmf->vma); |
608 | if (mi < 0) | 608 | if (mi < 0) |
609 | return VM_FAULT_SIGBUS; | 609 | return VM_FAULT_SIGBUS; |
610 | 610 | ||
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 91c22276c03b..9fb8b1e6ecc2 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c | |||
@@ -1223,9 +1223,9 @@ static void mon_bin_vma_close(struct vm_area_struct *vma) | |||
1223 | /* | 1223 | /* |
1224 | * Map ring pages to user space. | 1224 | * Map ring pages to user space. |
1225 | */ | 1225 | */ |
1226 | static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1226 | static int mon_bin_vma_fault(struct vm_fault *vmf) |
1227 | { | 1227 | { |
1228 | struct mon_reader_bin *rp = vma->vm_private_data; | 1228 | struct mon_reader_bin *rp = vmf->vma->vm_private_data; |
1229 | unsigned long offset, chunk_idx; | 1229 | unsigned long offset, chunk_idx; |
1230 | struct page *pageptr; | 1230 | struct page *pageptr; |
1231 | 1231 | ||
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 74b5bcac8bf2..37f69c061210 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c | |||
@@ -37,12 +37,11 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs | |||
37 | } | 37 | } |
38 | 38 | ||
39 | /* this is to find and return the vmalloc-ed fb pages */ | 39 | /* this is to find and return the vmalloc-ed fb pages */ |
40 | static int fb_deferred_io_fault(struct vm_area_struct *vma, | 40 | static int fb_deferred_io_fault(struct vm_fault *vmf) |
41 | struct vm_fault *vmf) | ||
42 | { | 41 | { |
43 | unsigned long offset; | 42 | unsigned long offset; |
44 | struct page *page; | 43 | struct page *page; |
45 | struct fb_info *info = vma->vm_private_data; | 44 | struct fb_info *info = vmf->vma->vm_private_data; |
46 | 45 | ||
47 | offset = vmf->pgoff << PAGE_SHIFT; | 46 | offset = vmf->pgoff << PAGE_SHIFT; |
48 | if (offset >= info->fix.smem_len) | 47 | if (offset >= info->fix.smem_len) |
@@ -54,8 +53,8 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, | |||
54 | 53 | ||
55 | get_page(page); | 54 | get_page(page); |
56 | 55 | ||
57 | if (vma->vm_file) | 56 | if (vmf->vma->vm_file) |
58 | page->mapping = vma->vm_file->f_mapping; | 57 | page->mapping = vmf->vma->vm_file->f_mapping; |
59 | else | 58 | else |
60 | printk(KERN_ERR "no mapping available\n"); | 59 | printk(KERN_ERR "no mapping available\n"); |
61 | 60 | ||
@@ -91,11 +90,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy | |||
91 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); | 90 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); |
92 | 91 | ||
93 | /* vm_ops->page_mkwrite handler */ | 92 | /* vm_ops->page_mkwrite handler */ |
94 | static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, | 93 | static int fb_deferred_io_mkwrite(struct vm_fault *vmf) |
95 | struct vm_fault *vmf) | ||
96 | { | 94 | { |
97 | struct page *page = vmf->page; | 95 | struct page *page = vmf->page; |
98 | struct fb_info *info = vma->vm_private_data; | 96 | struct fb_info *info = vmf->vma->vm_private_data; |
99 | struct fb_deferred_io *fbdefio = info->fbdefio; | 97 | struct fb_deferred_io *fbdefio = info->fbdefio; |
100 | struct page *cur; | 98 | struct page *cur; |
101 | 99 | ||
@@ -105,7 +103,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, | |||
105 | deferred framebuffer IO. then if userspace touches a page | 103 | deferred framebuffer IO. then if userspace touches a page |
106 | again, we repeat the same scheme */ | 104 | again, we repeat the same scheme */ |
107 | 105 | ||
108 | file_update_time(vma->vm_file); | 106 | file_update_time(vmf->vma->vm_file); |
109 | 107 | ||
110 | /* protect against the workqueue changing the page list */ | 108 | /* protect against the workqueue changing the page list */ |
111 | mutex_lock(&fbdefio->lock); | 109 | mutex_lock(&fbdefio->lock); |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 181793f07852..9d2738e9217f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -615,8 +615,12 @@ static void virtballoon_remove(struct virtio_device *vdev) | |||
615 | cancel_work_sync(&vb->update_balloon_stats_work); | 615 | cancel_work_sync(&vb->update_balloon_stats_work); |
616 | 616 | ||
617 | remove_common(vb); | 617 | remove_common(vb); |
618 | #ifdef CONFIG_BALLOON_COMPACTION | ||
618 | if (vb->vb_dev_info.inode) | 619 | if (vb->vb_dev_info.inode) |
619 | iput(vb->vb_dev_info.inode); | 620 | iput(vb->vb_dev_info.inode); |
621 | |||
622 | kern_unmount(balloon_mnt); | ||
623 | #endif | ||
620 | kfree(vb); | 624 | kfree(vb); |
621 | } | 625 | } |
622 | 626 | ||
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 2077a3ac7c0c..7a92a5e1d40c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -804,10 +804,10 @@ static void privcmd_close(struct vm_area_struct *vma) | |||
804 | kfree(pages); | 804 | kfree(pages); |
805 | } | 805 | } |
806 | 806 | ||
807 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 807 | static int privcmd_fault(struct vm_fault *vmf) |
808 | { | 808 | { |
809 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", | 809 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", |
810 | vma, vma->vm_start, vma->vm_end, | 810 | vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, |
811 | vmf->pgoff, (void *)vmf->address); | 811 | vmf->pgoff, (void *)vmf->address); |
812 | 812 | ||
813 | return VM_FAULT_SIGBUS; | 813 | return VM_FAULT_SIGBUS; |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 6a0f3fa85ef7..3de3b4a89d89 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -534,11 +534,11 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) | |||
534 | } | 534 | } |
535 | 535 | ||
536 | static int | 536 | static int |
537 | v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 537 | v9fs_vm_page_mkwrite(struct vm_fault *vmf) |
538 | { | 538 | { |
539 | struct v9fs_inode *v9inode; | 539 | struct v9fs_inode *v9inode; |
540 | struct page *page = vmf->page; | 540 | struct page *page = vmf->page; |
541 | struct file *filp = vma->vm_file; | 541 | struct file *filp = vmf->vma->vm_file; |
542 | struct inode *inode = file_inode(filp); | 542 | struct inode *inode = file_inode(filp); |
543 | 543 | ||
544 | 544 | ||
@@ -512,7 +512,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
512 | 512 | ||
513 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, | 513 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
514 | PROT_READ | PROT_WRITE, | 514 | PROT_READ | PROT_WRITE, |
515 | MAP_SHARED, 0, &unused); | 515 | MAP_SHARED, 0, &unused, NULL); |
516 | up_write(&mm->mmap_sem); | 516 | up_write(&mm->mmap_sem); |
517 | if (IS_ERR((void *)ctx->mmap_base)) { | 517 | if (IS_ERR((void *)ctx->mmap_base)) { |
518 | ctx->mmap_size = 0; | 518 | ctx->mmap_size = 0; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6a823719b6c5..adf16307842a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3147,7 +3147,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |||
3147 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, | 3147 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
3148 | size_t size, struct bio *bio, | 3148 | size_t size, struct bio *bio, |
3149 | unsigned long bio_flags); | 3149 | unsigned long bio_flags); |
3150 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 3150 | int btrfs_page_mkwrite(struct vm_fault *vmf); |
3151 | int btrfs_readpage(struct file *file, struct page *page); | 3151 | int btrfs_readpage(struct file *file, struct page *page); |
3152 | void btrfs_evict_inode(struct inode *inode); | 3152 | void btrfs_evict_inode(struct inode *inode); |
3153 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); | 3153 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1e861a063721..ea1d500cfba6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -8964,10 +8964,10 @@ again: | |||
8964 | * beyond EOF, then the page is guaranteed safe against truncation until we | 8964 | * beyond EOF, then the page is guaranteed safe against truncation until we |
8965 | * unlock the page. | 8965 | * unlock the page. |
8966 | */ | 8966 | */ |
8967 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 8967 | int btrfs_page_mkwrite(struct vm_fault *vmf) |
8968 | { | 8968 | { |
8969 | struct page *page = vmf->page; | 8969 | struct page *page = vmf->page; |
8970 | struct inode *inode = file_inode(vma->vm_file); | 8970 | struct inode *inode = file_inode(vmf->vma->vm_file); |
8971 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 8971 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
8972 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 8972 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
8973 | struct btrfs_ordered_extent *ordered; | 8973 | struct btrfs_ordered_extent *ordered; |
@@ -9000,7 +9000,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
9000 | ret = btrfs_delalloc_reserve_space(inode, page_start, | 9000 | ret = btrfs_delalloc_reserve_space(inode, page_start, |
9001 | reserved_space); | 9001 | reserved_space); |
9002 | if (!ret) { | 9002 | if (!ret) { |
9003 | ret = file_update_time(vma->vm_file); | 9003 | ret = file_update_time(vmf->vma->vm_file); |
9004 | reserved = 1; | 9004 | reserved = 1; |
9005 | } | 9005 | } |
9006 | if (ret) { | 9006 | if (ret) { |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e4b066cd912a..09860c0ec7c1 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -1386,8 +1386,9 @@ static void ceph_restore_sigs(sigset_t *oldset) | |||
1386 | /* | 1386 | /* |
1387 | * vm ops | 1387 | * vm ops |
1388 | */ | 1388 | */ |
1389 | static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1389 | static int ceph_filemap_fault(struct vm_fault *vmf) |
1390 | { | 1390 | { |
1391 | struct vm_area_struct *vma = vmf->vma; | ||
1391 | struct inode *inode = file_inode(vma->vm_file); | 1392 | struct inode *inode = file_inode(vma->vm_file); |
1392 | struct ceph_inode_info *ci = ceph_inode(inode); | 1393 | struct ceph_inode_info *ci = ceph_inode(inode); |
1393 | struct ceph_file_info *fi = vma->vm_file->private_data; | 1394 | struct ceph_file_info *fi = vma->vm_file->private_data; |
@@ -1416,7 +1417,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1416 | if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || | 1417 | if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || |
1417 | ci->i_inline_version == CEPH_INLINE_NONE) { | 1418 | ci->i_inline_version == CEPH_INLINE_NONE) { |
1418 | current->journal_info = vma->vm_file; | 1419 | current->journal_info = vma->vm_file; |
1419 | ret = filemap_fault(vma, vmf); | 1420 | ret = filemap_fault(vmf); |
1420 | current->journal_info = NULL; | 1421 | current->journal_info = NULL; |
1421 | } else | 1422 | } else |
1422 | ret = -EAGAIN; | 1423 | ret = -EAGAIN; |
@@ -1477,8 +1478,9 @@ out_restore: | |||
1477 | /* | 1478 | /* |
1478 | * Reuse write_begin here for simplicity. | 1479 | * Reuse write_begin here for simplicity. |
1479 | */ | 1480 | */ |
1480 | static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 1481 | static int ceph_page_mkwrite(struct vm_fault *vmf) |
1481 | { | 1482 | { |
1483 | struct vm_area_struct *vma = vmf->vma; | ||
1482 | struct inode *inode = file_inode(vma->vm_file); | 1484 | struct inode *inode = file_inode(vma->vm_file); |
1483 | struct ceph_inode_info *ci = ceph_inode(inode); | 1485 | struct ceph_inode_info *ci = ceph_inode(inode); |
1484 | struct ceph_file_info *fi = vma->vm_file->private_data; | 1486 | struct ceph_file_info *fi = vma->vm_file->private_data; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 98dc842e7245..aa3debbba826 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3282,7 +3282,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) | |||
3282 | * sure that it doesn't change while being written back. | 3282 | * sure that it doesn't change while being written back. |
3283 | */ | 3283 | */ |
3284 | static int | 3284 | static int |
3285 | cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 3285 | cifs_page_mkwrite(struct vm_fault *vmf) |
3286 | { | 3286 | { |
3287 | struct page *page = vmf->page; | 3287 | struct page *page = vmf->page; |
3288 | 3288 | ||
@@ -925,12 +925,11 @@ static int dax_insert_mapping(struct address_space *mapping, | |||
925 | 925 | ||
926 | /** | 926 | /** |
927 | * dax_pfn_mkwrite - handle first write to DAX page | 927 | * dax_pfn_mkwrite - handle first write to DAX page |
928 | * @vma: The virtual memory area where the fault occurred | ||
929 | * @vmf: The description of the fault | 928 | * @vmf: The description of the fault |
930 | */ | 929 | */ |
931 | int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 930 | int dax_pfn_mkwrite(struct vm_fault *vmf) |
932 | { | 931 | { |
933 | struct file *file = vma->vm_file; | 932 | struct file *file = vmf->vma->vm_file; |
934 | struct address_space *mapping = file->f_mapping; | 933 | struct address_space *mapping = file->f_mapping; |
935 | void *entry, **slot; | 934 | void *entry, **slot; |
936 | pgoff_t index = vmf->pgoff; | 935 | pgoff_t index = vmf->pgoff; |
@@ -1119,20 +1118,10 @@ static int dax_fault_return(int error) | |||
1119 | return VM_FAULT_SIGBUS; | 1118 | return VM_FAULT_SIGBUS; |
1120 | } | 1119 | } |
1121 | 1120 | ||
1122 | /** | 1121 | static int dax_iomap_pte_fault(struct vm_fault *vmf, |
1123 | * dax_iomap_fault - handle a page fault on a DAX file | 1122 | const struct iomap_ops *ops) |
1124 | * @vma: The virtual memory area where the fault occurred | ||
1125 | * @vmf: The description of the fault | ||
1126 | * @ops: iomap ops passed from the file system | ||
1127 | * | ||
1128 | * When a page fault occurs, filesystems may call this helper in their fault | ||
1129 | * or mkwrite handler for DAX files. Assumes the caller has done all the | ||
1130 | * necessary locking for the page fault to proceed successfully. | ||
1131 | */ | ||
1132 | int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
1133 | const struct iomap_ops *ops) | ||
1134 | { | 1123 | { |
1135 | struct address_space *mapping = vma->vm_file->f_mapping; | 1124 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
1136 | struct inode *inode = mapping->host; | 1125 | struct inode *inode = mapping->host; |
1137 | unsigned long vaddr = vmf->address; | 1126 | unsigned long vaddr = vmf->address; |
1138 | loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; | 1127 | loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; |
@@ -1205,11 +1194,11 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
1205 | case IOMAP_MAPPED: | 1194 | case IOMAP_MAPPED: |
1206 | if (iomap.flags & IOMAP_F_NEW) { | 1195 | if (iomap.flags & IOMAP_F_NEW) { |
1207 | count_vm_event(PGMAJFAULT); | 1196 | count_vm_event(PGMAJFAULT); |
1208 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | 1197 | mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); |
1209 | major = VM_FAULT_MAJOR; | 1198 | major = VM_FAULT_MAJOR; |
1210 | } | 1199 | } |
1211 | error = dax_insert_mapping(mapping, iomap.bdev, sector, | 1200 | error = dax_insert_mapping(mapping, iomap.bdev, sector, |
1212 | PAGE_SIZE, &entry, vma, vmf); | 1201 | PAGE_SIZE, &entry, vmf->vma, vmf); |
1213 | /* -EBUSY is fine, somebody else faulted on the same PTE */ | 1202 | /* -EBUSY is fine, somebody else faulted on the same PTE */ |
1214 | if (error == -EBUSY) | 1203 | if (error == -EBUSY) |
1215 | error = 0; | 1204 | error = 0; |
@@ -1247,7 +1236,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
1247 | } | 1236 | } |
1248 | return vmf_ret; | 1237 | return vmf_ret; |
1249 | } | 1238 | } |
1250 | EXPORT_SYMBOL_GPL(dax_iomap_fault); | ||
1251 | 1239 | ||
1252 | #ifdef CONFIG_FS_DAX_PMD | 1240 | #ifdef CONFIG_FS_DAX_PMD |
1253 | /* | 1241 | /* |
@@ -1338,7 +1326,8 @@ fallback: | |||
1338 | return VM_FAULT_FALLBACK; | 1326 | return VM_FAULT_FALLBACK; |
1339 | } | 1327 | } |
1340 | 1328 | ||
1341 | int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops) | 1329 | static int dax_iomap_pmd_fault(struct vm_fault *vmf, |
1330 | const struct iomap_ops *ops) | ||
1342 | { | 1331 | { |
1343 | struct vm_area_struct *vma = vmf->vma; | 1332 | struct vm_area_struct *vma = vmf->vma; |
1344 | struct address_space *mapping = vma->vm_file->f_mapping; | 1333 | struct address_space *mapping = vma->vm_file->f_mapping; |
@@ -1446,5 +1435,33 @@ out: | |||
1446 | trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); | 1435 | trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); |
1447 | return result; | 1436 | return result; |
1448 | } | 1437 | } |
1449 | EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); | 1438 | #else |
1439 | static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops) | ||
1440 | { | ||
1441 | return VM_FAULT_FALLBACK; | ||
1442 | } | ||
1450 | #endif /* CONFIG_FS_DAX_PMD */ | 1443 | #endif /* CONFIG_FS_DAX_PMD */ |
1444 | |||
1445 | /** | ||
1446 | * dax_iomap_fault - handle a page fault on a DAX file | ||
1447 | * @vmf: The description of the fault | ||
1448 | * @ops: iomap ops passed from the file system | ||
1449 | * | ||
1450 | * When a page fault occurs, filesystems may call this helper in | ||
1451 | * their fault handler for DAX files. dax_iomap_fault() assumes the caller | ||
1452 | * has done all the necessary locking for page fault to proceed | ||
1453 | * successfully. | ||
1454 | */ | ||
1455 | int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, | ||
1456 | const struct iomap_ops *ops) | ||
1457 | { | ||
1458 | switch (pe_size) { | ||
1459 | case PE_SIZE_PTE: | ||
1460 | return dax_iomap_pte_fault(vmf, ops); | ||
1461 | case PE_SIZE_PMD: | ||
1462 | return dax_iomap_pmd_fault(vmf, ops); | ||
1463 | default: | ||
1464 | return VM_FAULT_FALLBACK; | ||
1465 | } | ||
1466 | } | ||
1467 | EXPORT_SYMBOL_GPL(dax_iomap_fault); | ||
diff --git a/fs/ext2/file.c b/fs/ext2/file.c index b0f241528a30..b21891a6bfca 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c | |||
@@ -87,19 +87,19 @@ out_unlock: | |||
87 | * The default page_lock and i_size verification done by non-DAX fault paths | 87 | * The default page_lock and i_size verification done by non-DAX fault paths |
88 | * is sufficient because ext2 doesn't support hole punching. | 88 | * is sufficient because ext2 doesn't support hole punching. |
89 | */ | 89 | */ |
90 | static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 90 | static int ext2_dax_fault(struct vm_fault *vmf) |
91 | { | 91 | { |
92 | struct inode *inode = file_inode(vma->vm_file); | 92 | struct inode *inode = file_inode(vmf->vma->vm_file); |
93 | struct ext2_inode_info *ei = EXT2_I(inode); | 93 | struct ext2_inode_info *ei = EXT2_I(inode); |
94 | int ret; | 94 | int ret; |
95 | 95 | ||
96 | if (vmf->flags & FAULT_FLAG_WRITE) { | 96 | if (vmf->flags & FAULT_FLAG_WRITE) { |
97 | sb_start_pagefault(inode->i_sb); | 97 | sb_start_pagefault(inode->i_sb); |
98 | file_update_time(vma->vm_file); | 98 | file_update_time(vmf->vma->vm_file); |
99 | } | 99 | } |
100 | down_read(&ei->dax_sem); | 100 | down_read(&ei->dax_sem); |
101 | 101 | ||
102 | ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops); | 102 | ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &ext2_iomap_ops); |
103 | 103 | ||
104 | up_read(&ei->dax_sem); | 104 | up_read(&ei->dax_sem); |
105 | if (vmf->flags & FAULT_FLAG_WRITE) | 105 | if (vmf->flags & FAULT_FLAG_WRITE) |
@@ -107,16 +107,15 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
107 | return ret; | 107 | return ret; |
108 | } | 108 | } |
109 | 109 | ||
110 | static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, | 110 | static int ext2_dax_pfn_mkwrite(struct vm_fault *vmf) |
111 | struct vm_fault *vmf) | ||
112 | { | 111 | { |
113 | struct inode *inode = file_inode(vma->vm_file); | 112 | struct inode *inode = file_inode(vmf->vma->vm_file); |
114 | struct ext2_inode_info *ei = EXT2_I(inode); | 113 | struct ext2_inode_info *ei = EXT2_I(inode); |
115 | loff_t size; | 114 | loff_t size; |
116 | int ret; | 115 | int ret; |
117 | 116 | ||
118 | sb_start_pagefault(inode->i_sb); | 117 | sb_start_pagefault(inode->i_sb); |
119 | file_update_time(vma->vm_file); | 118 | file_update_time(vmf->vma->vm_file); |
120 | down_read(&ei->dax_sem); | 119 | down_read(&ei->dax_sem); |
121 | 120 | ||
122 | /* check that the faulting page hasn't raced with truncate */ | 121 | /* check that the faulting page hasn't raced with truncate */ |
@@ -124,7 +123,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, | |||
124 | if (vmf->pgoff >= size) | 123 | if (vmf->pgoff >= size) |
125 | ret = VM_FAULT_SIGBUS; | 124 | ret = VM_FAULT_SIGBUS; |
126 | else | 125 | else |
127 | ret = dax_pfn_mkwrite(vma, vmf); | 126 | ret = dax_pfn_mkwrite(vmf); |
128 | 127 | ||
129 | up_read(&ei->dax_sem); | 128 | up_read(&ei->dax_sem); |
130 | sb_end_pagefault(inode->i_sb); | 129 | sb_end_pagefault(inode->i_sb); |
@@ -134,7 +133,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, | |||
134 | static const struct vm_operations_struct ext2_dax_vm_ops = { | 133 | static const struct vm_operations_struct ext2_dax_vm_ops = { |
135 | .fault = ext2_dax_fault, | 134 | .fault = ext2_dax_fault, |
136 | /* | 135 | /* |
137 | * .pmd_fault is not supported for DAX because allocation in ext2 | 136 | * .huge_fault is not supported for DAX because allocation in ext2 |
138 | * cannot be reliably aligned to huge page sizes and so pmd faults | 137 | * cannot be reliably aligned to huge page sizes and so pmd faults |
139 | * will always fail and fail back to regular faults. | 138 | * will always fail and fail back to regular faults. |
140 | */ | 139 | */ |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index cee23b684f47..2fd17e8e4984 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -2483,8 +2483,8 @@ extern int ext4_writepage_trans_blocks(struct inode *); | |||
2483 | extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | 2483 | extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); |
2484 | extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, | 2484 | extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, |
2485 | loff_t lstart, loff_t lend); | 2485 | loff_t lstart, loff_t lend); |
2486 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2486 | extern int ext4_page_mkwrite(struct vm_fault *vmf); |
2487 | extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 2487 | extern int ext4_filemap_fault(struct vm_fault *vmf); |
2488 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); | 2488 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
2489 | extern int ext4_get_projid(struct inode *inode, kprojid_t *projid); | 2489 | extern int ext4_get_projid(struct inode *inode, kprojid_t *projid); |
2490 | extern void ext4_da_update_reserve_space(struct inode *inode, | 2490 | extern void ext4_da_update_reserve_space(struct inode *inode, |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 13021a054fc0..8210c1f43556 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -253,19 +253,20 @@ out: | |||
253 | } | 253 | } |
254 | 254 | ||
255 | #ifdef CONFIG_FS_DAX | 255 | #ifdef CONFIG_FS_DAX |
256 | static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 256 | static int ext4_dax_huge_fault(struct vm_fault *vmf, |
257 | enum page_entry_size pe_size) | ||
257 | { | 258 | { |
258 | int result; | 259 | int result; |
259 | struct inode *inode = file_inode(vma->vm_file); | 260 | struct inode *inode = file_inode(vmf->vma->vm_file); |
260 | struct super_block *sb = inode->i_sb; | 261 | struct super_block *sb = inode->i_sb; |
261 | bool write = vmf->flags & FAULT_FLAG_WRITE; | 262 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
262 | 263 | ||
263 | if (write) { | 264 | if (write) { |
264 | sb_start_pagefault(sb); | 265 | sb_start_pagefault(sb); |
265 | file_update_time(vma->vm_file); | 266 | file_update_time(vmf->vma->vm_file); |
266 | } | 267 | } |
267 | down_read(&EXT4_I(inode)->i_mmap_sem); | 268 | down_read(&EXT4_I(inode)->i_mmap_sem); |
268 | result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops); | 269 | result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); |
269 | up_read(&EXT4_I(inode)->i_mmap_sem); | 270 | up_read(&EXT4_I(inode)->i_mmap_sem); |
270 | if (write) | 271 | if (write) |
271 | sb_end_pagefault(sb); | 272 | sb_end_pagefault(sb); |
@@ -273,25 +274,9 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
273 | return result; | 274 | return result; |
274 | } | 275 | } |
275 | 276 | ||
276 | static int | 277 | static int ext4_dax_fault(struct vm_fault *vmf) |
277 | ext4_dax_pmd_fault(struct vm_fault *vmf) | ||
278 | { | 278 | { |
279 | int result; | 279 | return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); |
280 | struct inode *inode = file_inode(vmf->vma->vm_file); | ||
281 | struct super_block *sb = inode->i_sb; | ||
282 | bool write = vmf->flags & FAULT_FLAG_WRITE; | ||
283 | |||
284 | if (write) { | ||
285 | sb_start_pagefault(sb); | ||
286 | file_update_time(vmf->vma->vm_file); | ||
287 | } | ||
288 | down_read(&EXT4_I(inode)->i_mmap_sem); | ||
289 | result = dax_iomap_pmd_fault(vmf, &ext4_iomap_ops); | ||
290 | up_read(&EXT4_I(inode)->i_mmap_sem); | ||
291 | if (write) | ||
292 | sb_end_pagefault(sb); | ||
293 | |||
294 | return result; | ||
295 | } | 280 | } |
296 | 281 | ||
297 | /* | 282 | /* |
@@ -303,22 +288,21 @@ ext4_dax_pmd_fault(struct vm_fault *vmf) | |||
303 | * wp_pfn_shared() fails. Thus fault gets retried and things work out as | 288 | * wp_pfn_shared() fails. Thus fault gets retried and things work out as |
304 | * desired. | 289 | * desired. |
305 | */ | 290 | */ |
306 | static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma, | 291 | static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) |
307 | struct vm_fault *vmf) | ||
308 | { | 292 | { |
309 | struct inode *inode = file_inode(vma->vm_file); | 293 | struct inode *inode = file_inode(vmf->vma->vm_file); |
310 | struct super_block *sb = inode->i_sb; | 294 | struct super_block *sb = inode->i_sb; |
311 | loff_t size; | 295 | loff_t size; |
312 | int ret; | 296 | int ret; |
313 | 297 | ||
314 | sb_start_pagefault(sb); | 298 | sb_start_pagefault(sb); |
315 | file_update_time(vma->vm_file); | 299 | file_update_time(vmf->vma->vm_file); |
316 | down_read(&EXT4_I(inode)->i_mmap_sem); | 300 | down_read(&EXT4_I(inode)->i_mmap_sem); |
317 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 301 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
318 | if (vmf->pgoff >= size) | 302 | if (vmf->pgoff >= size) |
319 | ret = VM_FAULT_SIGBUS; | 303 | ret = VM_FAULT_SIGBUS; |
320 | else | 304 | else |
321 | ret = dax_pfn_mkwrite(vma, vmf); | 305 | ret = dax_pfn_mkwrite(vmf); |
322 | up_read(&EXT4_I(inode)->i_mmap_sem); | 306 | up_read(&EXT4_I(inode)->i_mmap_sem); |
323 | sb_end_pagefault(sb); | 307 | sb_end_pagefault(sb); |
324 | 308 | ||
@@ -327,7 +311,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma, | |||
327 | 311 | ||
328 | static const struct vm_operations_struct ext4_dax_vm_ops = { | 312 | static const struct vm_operations_struct ext4_dax_vm_ops = { |
329 | .fault = ext4_dax_fault, | 313 | .fault = ext4_dax_fault, |
330 | .pmd_fault = ext4_dax_pmd_fault, | 314 | .huge_fault = ext4_dax_huge_fault, |
331 | .page_mkwrite = ext4_dax_fault, | 315 | .page_mkwrite = ext4_dax_fault, |
332 | .pfn_mkwrite = ext4_dax_pfn_mkwrite, | 316 | .pfn_mkwrite = ext4_dax_pfn_mkwrite, |
333 | }; | 317 | }; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 75212a6e69f8..41d8e53e5a7f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -5821,8 +5821,9 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) | |||
5821 | return !buffer_mapped(bh); | 5821 | return !buffer_mapped(bh); |
5822 | } | 5822 | } |
5823 | 5823 | ||
5824 | int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 5824 | int ext4_page_mkwrite(struct vm_fault *vmf) |
5825 | { | 5825 | { |
5826 | struct vm_area_struct *vma = vmf->vma; | ||
5826 | struct page *page = vmf->page; | 5827 | struct page *page = vmf->page; |
5827 | loff_t size; | 5828 | loff_t size; |
5828 | unsigned long len; | 5829 | unsigned long len; |
@@ -5912,13 +5913,13 @@ out: | |||
5912 | return ret; | 5913 | return ret; |
5913 | } | 5914 | } |
5914 | 5915 | ||
5915 | int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 5916 | int ext4_filemap_fault(struct vm_fault *vmf) |
5916 | { | 5917 | { |
5917 | struct inode *inode = file_inode(vma->vm_file); | 5918 | struct inode *inode = file_inode(vmf->vma->vm_file); |
5918 | int err; | 5919 | int err; |
5919 | 5920 | ||
5920 | down_read(&EXT4_I(inode)->i_mmap_sem); | 5921 | down_read(&EXT4_I(inode)->i_mmap_sem); |
5921 | err = filemap_fault(vma, vmf); | 5922 | err = filemap_fault(vmf); |
5922 | up_read(&EXT4_I(inode)->i_mmap_sem); | 5923 | up_read(&EXT4_I(inode)->i_mmap_sem); |
5923 | 5924 | ||
5924 | return err; | 5925 | return err; |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 49f10dce817d..1edc86e874e3 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -32,11 +32,10 @@ | |||
32 | #include "trace.h" | 32 | #include "trace.h" |
33 | #include <trace/events/f2fs.h> | 33 | #include <trace/events/f2fs.h> |
34 | 34 | ||
35 | static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | 35 | static int f2fs_vm_page_mkwrite(struct vm_fault *vmf) |
36 | struct vm_fault *vmf) | ||
37 | { | 36 | { |
38 | struct page *page = vmf->page; | 37 | struct page *page = vmf->page; |
39 | struct inode *inode = file_inode(vma->vm_file); | 38 | struct inode *inode = file_inode(vmf->vma->vm_file); |
40 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 39 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
41 | struct dnode_of_data dn; | 40 | struct dnode_of_data dn; |
42 | int err; | 41 | int err; |
@@ -58,7 +57,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
58 | 57 | ||
59 | f2fs_balance_fs(sbi, dn.node_changed); | 58 | f2fs_balance_fs(sbi, dn.node_changed); |
60 | 59 | ||
61 | file_update_time(vma->vm_file); | 60 | file_update_time(vmf->vma->vm_file); |
62 | lock_page(page); | 61 | lock_page(page); |
63 | if (unlikely(page->mapping != inode->i_mapping || | 62 | if (unlikely(page->mapping != inode->i_mapping || |
64 | page_offset(page) > i_size_read(inode) || | 63 | page_offset(page) > i_size_read(inode) || |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2401c5dabb2a..e80bfd06daf5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -2043,12 +2043,12 @@ static void fuse_vma_close(struct vm_area_struct *vma) | |||
2043 | * - sync(2) | 2043 | * - sync(2) |
2044 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER | 2044 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER |
2045 | */ | 2045 | */ |
2046 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 2046 | static int fuse_page_mkwrite(struct vm_fault *vmf) |
2047 | { | 2047 | { |
2048 | struct page *page = vmf->page; | 2048 | struct page *page = vmf->page; |
2049 | struct inode *inode = file_inode(vma->vm_file); | 2049 | struct inode *inode = file_inode(vmf->vma->vm_file); |
2050 | 2050 | ||
2051 | file_update_time(vma->vm_file); | 2051 | file_update_time(vmf->vma->vm_file); |
2052 | lock_page(page); | 2052 | lock_page(page); |
2053 | if (page->mapping != inode->i_mapping) { | 2053 | if (page->mapping != inode->i_mapping) { |
2054 | unlock_page(page); | 2054 | unlock_page(page); |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 016c11eaca7c..6fe2a59c6a9a 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -379,10 +379,10 @@ static int gfs2_allocate_page_backing(struct page *page) | |||
379 | * blocks allocated on disk to back that page. | 379 | * blocks allocated on disk to back that page. |
380 | */ | 380 | */ |
381 | 381 | ||
382 | static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 382 | static int gfs2_page_mkwrite(struct vm_fault *vmf) |
383 | { | 383 | { |
384 | struct page *page = vmf->page; | 384 | struct page *page = vmf->page; |
385 | struct inode *inode = file_inode(vma->vm_file); | 385 | struct inode *inode = file_inode(vmf->vma->vm_file); |
386 | struct gfs2_inode *ip = GFS2_I(inode); | 386 | struct gfs2_inode *ip = GFS2_I(inode); |
387 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 387 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
388 | struct gfs2_alloc_parms ap = { .aflags = 0, }; | 388 | struct gfs2_alloc_parms ap = { .aflags = 0, }; |
@@ -399,7 +399,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
399 | if (ret) | 399 | if (ret) |
400 | goto out; | 400 | goto out; |
401 | 401 | ||
402 | gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE); | 402 | gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE); |
403 | 403 | ||
404 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); | 404 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); |
405 | ret = gfs2_glock_nq(&gh); | 405 | ret = gfs2_glock_nq(&gh); |
@@ -407,7 +407,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
407 | goto out_uninit; | 407 | goto out_uninit; |
408 | 408 | ||
409 | /* Update file times before taking page lock */ | 409 | /* Update file times before taking page lock */ |
410 | file_update_time(vma->vm_file); | 410 | file_update_time(vmf->vma->vm_file); |
411 | 411 | ||
412 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); | 412 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); |
413 | set_bit(GIF_SW_PAGED, &ip->i_flags); | 413 | set_bit(GIF_SW_PAGED, &ip->i_flags); |
diff --git a/fs/iomap.c b/fs/iomap.c index d89f70bbb952..d209f42cdcb8 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -445,11 +445,10 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |||
445 | return length; | 445 | return length; |
446 | } | 446 | } |
447 | 447 | ||
448 | int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 448 | int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
449 | const struct iomap_ops *ops) | ||
450 | { | 449 | { |
451 | struct page *page = vmf->page; | 450 | struct page *page = vmf->page; |
452 | struct inode *inode = file_inode(vma->vm_file); | 451 | struct inode *inode = file_inode(vmf->vma->vm_file); |
453 | unsigned long length; | 452 | unsigned long length; |
454 | loff_t offset, size; | 453 | loff_t offset, size; |
455 | ssize_t ret; | 454 | ssize_t ret; |
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 78219d5644e9..4f0535890b30 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
@@ -348,9 +348,9 @@ static void kernfs_vma_open(struct vm_area_struct *vma) | |||
348 | kernfs_put_active(of->kn); | 348 | kernfs_put_active(of->kn); |
349 | } | 349 | } |
350 | 350 | ||
351 | static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 351 | static int kernfs_vma_fault(struct vm_fault *vmf) |
352 | { | 352 | { |
353 | struct file *file = vma->vm_file; | 353 | struct file *file = vmf->vma->vm_file; |
354 | struct kernfs_open_file *of = kernfs_of(file); | 354 | struct kernfs_open_file *of = kernfs_of(file); |
355 | int ret; | 355 | int ret; |
356 | 356 | ||
@@ -362,16 +362,15 @@ static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
362 | 362 | ||
363 | ret = VM_FAULT_SIGBUS; | 363 | ret = VM_FAULT_SIGBUS; |
364 | if (of->vm_ops->fault) | 364 | if (of->vm_ops->fault) |
365 | ret = of->vm_ops->fault(vma, vmf); | 365 | ret = of->vm_ops->fault(vmf); |
366 | 366 | ||
367 | kernfs_put_active(of->kn); | 367 | kernfs_put_active(of->kn); |
368 | return ret; | 368 | return ret; |
369 | } | 369 | } |
370 | 370 | ||
371 | static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, | 371 | static int kernfs_vma_page_mkwrite(struct vm_fault *vmf) |
372 | struct vm_fault *vmf) | ||
373 | { | 372 | { |
374 | struct file *file = vma->vm_file; | 373 | struct file *file = vmf->vma->vm_file; |
375 | struct kernfs_open_file *of = kernfs_of(file); | 374 | struct kernfs_open_file *of = kernfs_of(file); |
376 | int ret; | 375 | int ret; |
377 | 376 | ||
@@ -383,7 +382,7 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, | |||
383 | 382 | ||
384 | ret = 0; | 383 | ret = 0; |
385 | if (of->vm_ops->page_mkwrite) | 384 | if (of->vm_ops->page_mkwrite) |
386 | ret = of->vm_ops->page_mkwrite(vma, vmf); | 385 | ret = of->vm_ops->page_mkwrite(vmf); |
387 | else | 386 | else |
388 | file_update_time(file); | 387 | file_update_time(file); |
389 | 388 | ||
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index 39f57bef8531..0c3905e0542e 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c | |||
@@ -27,10 +27,9 @@ | |||
27 | * XXX: how are we excluding truncate/invalidate here? Maybe need to lock | 27 | * XXX: how are we excluding truncate/invalidate here? Maybe need to lock |
28 | * page? | 28 | * page? |
29 | */ | 29 | */ |
30 | static int ncp_file_mmap_fault(struct vm_area_struct *area, | 30 | static int ncp_file_mmap_fault(struct vm_fault *vmf) |
31 | struct vm_fault *vmf) | ||
32 | { | 31 | { |
33 | struct inode *inode = file_inode(area->vm_file); | 32 | struct inode *inode = file_inode(vmf->vma->vm_file); |
34 | char *pg_addr; | 33 | char *pg_addr; |
35 | unsigned int already_read; | 34 | unsigned int already_read; |
36 | unsigned int count; | 35 | unsigned int count; |
@@ -90,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area, | |||
90 | * -- nyc | 89 | * -- nyc |
91 | */ | 90 | */ |
92 | count_vm_event(PGMAJFAULT); | 91 | count_vm_event(PGMAJFAULT); |
93 | mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); | 92 | mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); |
94 | return VM_FAULT_MAJOR; | 93 | return VM_FAULT_MAJOR; |
95 | } | 94 | } |
96 | 95 | ||
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 26dbe8b0c10d..668213984d68 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -528,10 +528,10 @@ const struct address_space_operations nfs_file_aops = { | |||
528 | * writable, implying that someone is about to modify the page through a | 528 | * writable, implying that someone is about to modify the page through a |
529 | * shared-writable mapping | 529 | * shared-writable mapping |
530 | */ | 530 | */ |
531 | static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 531 | static int nfs_vm_page_mkwrite(struct vm_fault *vmf) |
532 | { | 532 | { |
533 | struct page *page = vmf->page; | 533 | struct page *page = vmf->page; |
534 | struct file *filp = vma->vm_file; | 534 | struct file *filp = vmf->vma->vm_file; |
535 | struct inode *inode = file_inode(filp); | 535 | struct inode *inode = file_inode(filp); |
536 | unsigned pagelen; | 536 | unsigned pagelen; |
537 | int ret = VM_FAULT_NOPAGE; | 537 | int ret = VM_FAULT_NOPAGE; |
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 547381f3ce13..c5fa3dee72fc 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c | |||
@@ -51,8 +51,9 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
51 | return err; | 51 | return err; |
52 | } | 52 | } |
53 | 53 | ||
54 | static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 54 | static int nilfs_page_mkwrite(struct vm_fault *vmf) |
55 | { | 55 | { |
56 | struct vm_area_struct *vma = vmf->vma; | ||
56 | struct page *page = vmf->page; | 57 | struct page *page = vmf->page; |
57 | struct inode *inode = file_inode(vma->vm_file); | 58 | struct inode *inode = file_inode(vma->vm_file); |
58 | struct nilfs_transaction_info ti; | 59 | struct nilfs_transaction_info ti; |
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 429088786e93..098f5c712569 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -44,17 +44,18 @@ | |||
44 | #include "ocfs2_trace.h" | 44 | #include "ocfs2_trace.h" |
45 | 45 | ||
46 | 46 | ||
47 | static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) | 47 | static int ocfs2_fault(struct vm_fault *vmf) |
48 | { | 48 | { |
49 | struct vm_area_struct *vma = vmf->vma; | ||
49 | sigset_t oldset; | 50 | sigset_t oldset; |
50 | int ret; | 51 | int ret; |
51 | 52 | ||
52 | ocfs2_block_signals(&oldset); | 53 | ocfs2_block_signals(&oldset); |
53 | ret = filemap_fault(area, vmf); | 54 | ret = filemap_fault(vmf); |
54 | ocfs2_unblock_signals(&oldset); | 55 | ocfs2_unblock_signals(&oldset); |
55 | 56 | ||
56 | trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno, | 57 | trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno, |
57 | area, vmf->page, vmf->pgoff); | 58 | vma, vmf->page, vmf->pgoff); |
58 | return ret; | 59 | return ret; |
59 | } | 60 | } |
60 | 61 | ||
@@ -127,10 +128,10 @@ out: | |||
127 | return ret; | 128 | return ret; |
128 | } | 129 | } |
129 | 130 | ||
130 | static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 131 | static int ocfs2_page_mkwrite(struct vm_fault *vmf) |
131 | { | 132 | { |
132 | struct page *page = vmf->page; | 133 | struct page *page = vmf->page; |
133 | struct inode *inode = file_inode(vma->vm_file); | 134 | struct inode *inode = file_inode(vmf->vma->vm_file); |
134 | struct buffer_head *di_bh = NULL; | 135 | struct buffer_head *di_bh = NULL; |
135 | sigset_t oldset; | 136 | sigset_t oldset; |
136 | int ret; | 137 | int ret; |
@@ -160,7 +161,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
160 | */ | 161 | */ |
161 | down_write(&OCFS2_I(inode)->ip_alloc_sem); | 162 | down_write(&OCFS2_I(inode)->ip_alloc_sem); |
162 | 163 | ||
163 | ret = __ocfs2_page_mkwrite(vma->vm_file, di_bh, page); | 164 | ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); |
164 | 165 | ||
165 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | 166 | up_write(&OCFS2_I(inode)->ip_alloc_sem); |
166 | 167 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index b73b4de8fb36..b8f06273353e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -292,101 +292,69 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
292 | } | 292 | } |
293 | } else { | 293 | } else { |
294 | /* | 294 | /* |
295 | * Command line (1 string) occupies ARGV and maybe | ||
296 | * extends into ENVP. | ||
297 | */ | ||
298 | if (len1 + len2 <= *pos) | ||
299 | goto skip_argv_envp; | ||
300 | if (len1 <= *pos) | ||
301 | goto skip_argv; | ||
302 | |||
303 | p = arg_start + *pos; | ||
304 | len = len1 - *pos; | ||
305 | while (count > 0 && len > 0) { | ||
306 | unsigned int _count, l; | ||
307 | int nr_read; | ||
308 | bool final; | ||
309 | |||
310 | _count = min3(count, len, PAGE_SIZE); | ||
311 | nr_read = access_remote_vm(mm, p, page, _count, 0); | ||
312 | if (nr_read < 0) | ||
313 | rv = nr_read; | ||
314 | if (nr_read <= 0) | ||
315 | goto out_free_page; | ||
316 | |||
317 | /* | ||
318 | * Command line can be shorter than whole ARGV | ||
319 | * even if last "marker" byte says it is not. | ||
320 | */ | ||
321 | final = false; | ||
322 | l = strnlen(page, nr_read); | ||
323 | if (l < nr_read) { | ||
324 | nr_read = l; | ||
325 | final = true; | ||
326 | } | ||
327 | |||
328 | if (copy_to_user(buf, page, nr_read)) { | ||
329 | rv = -EFAULT; | ||
330 | goto out_free_page; | ||
331 | } | ||
332 | |||
333 | p += nr_read; | ||
334 | len -= nr_read; | ||
335 | buf += nr_read; | ||
336 | count -= nr_read; | ||
337 | rv += nr_read; | ||
338 | |||
339 | if (final) | ||
340 | goto out_free_page; | ||
341 | } | ||
342 | skip_argv: | ||
343 | /* | ||
344 | * Command line (1 string) occupies ARGV and | 295 | * Command line (1 string) occupies ARGV and |
345 | * extends into ENVP. | 296 | * extends into ENVP. |
346 | */ | 297 | */ |
347 | if (len1 <= *pos) { | 298 | struct { |
348 | p = env_start + *pos - len1; | 299 | unsigned long p; |
349 | len = len1 + len2 - *pos; | 300 | unsigned long len; |
350 | } else { | 301 | } cmdline[2] = { |
351 | p = env_start; | 302 | { .p = arg_start, .len = len1 }, |
352 | len = len2; | 303 | { .p = env_start, .len = len2 }, |
304 | }; | ||
305 | loff_t pos1 = *pos; | ||
306 | unsigned int i; | ||
307 | |||
308 | i = 0; | ||
309 | while (i < 2 && pos1 >= cmdline[i].len) { | ||
310 | pos1 -= cmdline[i].len; | ||
311 | i++; | ||
353 | } | 312 | } |
354 | while (count > 0 && len > 0) { | 313 | while (i < 2) { |
355 | unsigned int _count, l; | 314 | p = cmdline[i].p + pos1; |
356 | int nr_read; | 315 | len = cmdline[i].len - pos1; |
357 | bool final; | 316 | while (count > 0 && len > 0) { |
358 | 317 | unsigned int _count, l; | |
359 | _count = min3(count, len, PAGE_SIZE); | 318 | int nr_read; |
360 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 319 | bool final; |
361 | if (nr_read < 0) | 320 | |
362 | rv = nr_read; | 321 | _count = min3(count, len, PAGE_SIZE); |
363 | if (nr_read <= 0) | 322 | nr_read = access_remote_vm(mm, p, page, _count, 0); |
364 | goto out_free_page; | 323 | if (nr_read < 0) |
365 | 324 | rv = nr_read; | |
366 | /* Find EOS. */ | 325 | if (nr_read <= 0) |
367 | final = false; | 326 | goto out_free_page; |
368 | l = strnlen(page, nr_read); | 327 | |
369 | if (l < nr_read) { | 328 | /* |
370 | nr_read = l; | 329 | * Command line can be shorter than whole ARGV |
371 | final = true; | 330 | * even if last "marker" byte says it is not. |
372 | } | 331 | */ |
373 | 332 | final = false; | |
374 | if (copy_to_user(buf, page, nr_read)) { | 333 | l = strnlen(page, nr_read); |
375 | rv = -EFAULT; | 334 | if (l < nr_read) { |
376 | goto out_free_page; | 335 | nr_read = l; |
336 | final = true; | ||
337 | } | ||
338 | |||
339 | if (copy_to_user(buf, page, nr_read)) { | ||
340 | rv = -EFAULT; | ||
341 | goto out_free_page; | ||
342 | } | ||
343 | |||
344 | p += nr_read; | ||
345 | len -= nr_read; | ||
346 | buf += nr_read; | ||
347 | count -= nr_read; | ||
348 | rv += nr_read; | ||
349 | |||
350 | if (final) | ||
351 | goto out_free_page; | ||
377 | } | 352 | } |
378 | 353 | ||
379 | p += nr_read; | 354 | /* Only first chunk can be read partially. */ |
380 | len -= nr_read; | 355 | pos1 = 0; |
381 | buf += nr_read; | 356 | i++; |
382 | count -= nr_read; | ||
383 | rv += nr_read; | ||
384 | |||
385 | if (final) | ||
386 | goto out_free_page; | ||
387 | } | 357 | } |
388 | skip_argv_envp: | ||
389 | ; | ||
390 | } | 358 | } |
391 | 359 | ||
392 | out_free_page: | 360 | out_free_page: |
@@ -729,11 +697,11 @@ static int proc_pid_permission(struct inode *inode, int mask) | |||
729 | task = get_proc_task(inode); | 697 | task = get_proc_task(inode); |
730 | if (!task) | 698 | if (!task) |
731 | return -ESRCH; | 699 | return -ESRCH; |
732 | has_perms = has_pid_permissions(pid, task, 1); | 700 | has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS); |
733 | put_task_struct(task); | 701 | put_task_struct(task); |
734 | 702 | ||
735 | if (!has_perms) { | 703 | if (!has_perms) { |
736 | if (pid->hide_pid == 2) { | 704 | if (pid->hide_pid == HIDEPID_INVISIBLE) { |
737 | /* | 705 | /* |
738 | * Let's make getdents(), stat(), and open() | 706 | * Let's make getdents(), stat(), and open() |
739 | * consistent with each other. If a process | 707 | * consistent with each other. If a process |
@@ -1769,7 +1737,7 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |||
1769 | stat->gid = GLOBAL_ROOT_GID; | 1737 | stat->gid = GLOBAL_ROOT_GID; |
1770 | task = pid_task(proc_pid(inode), PIDTYPE_PID); | 1738 | task = pid_task(proc_pid(inode), PIDTYPE_PID); |
1771 | if (task) { | 1739 | if (task) { |
1772 | if (!has_pid_permissions(pid, task, 2)) { | 1740 | if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) { |
1773 | rcu_read_unlock(); | 1741 | rcu_read_unlock(); |
1774 | /* | 1742 | /* |
1775 | * This doesn't prevent learning whether PID exists, | 1743 | * This doesn't prevent learning whether PID exists, |
@@ -3200,7 +3168,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | |||
3200 | int len; | 3168 | int len; |
3201 | 3169 | ||
3202 | cond_resched(); | 3170 | cond_resched(); |
3203 | if (!has_pid_permissions(ns, iter.task, 2)) | 3171 | if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) |
3204 | continue; | 3172 | continue; |
3205 | 3173 | ||
3206 | len = snprintf(name, sizeof(name), "%d", iter.tgid); | 3174 | len = snprintf(name, sizeof(name), "%d", iter.tgid); |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index f6a01f09f79d..06c73904d497 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -57,9 +57,9 @@ static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir, | |||
57 | struct rb_node *node = dir->subdir.rb_node; | 57 | struct rb_node *node = dir->subdir.rb_node; |
58 | 58 | ||
59 | while (node) { | 59 | while (node) { |
60 | struct proc_dir_entry *de = container_of(node, | 60 | struct proc_dir_entry *de = rb_entry(node, |
61 | struct proc_dir_entry, | 61 | struct proc_dir_entry, |
62 | subdir_node); | 62 | subdir_node); |
63 | int result = proc_match(len, name, de); | 63 | int result = proc_match(len, name, de); |
64 | 64 | ||
65 | if (result < 0) | 65 | if (result < 0) |
@@ -80,8 +80,9 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir, | |||
80 | 80 | ||
81 | /* Figure out where to put new node */ | 81 | /* Figure out where to put new node */ |
82 | while (*new) { | 82 | while (*new) { |
83 | struct proc_dir_entry *this = | 83 | struct proc_dir_entry *this = rb_entry(*new, |
84 | container_of(*new, struct proc_dir_entry, subdir_node); | 84 | struct proc_dir_entry, |
85 | subdir_node); | ||
85 | int result = proc_match(de->namelen, de->name, this); | 86 | int result = proc_match(de->namelen, de->name, this); |
86 | 87 | ||
87 | parent = *new; | 88 | parent = *new; |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 7ad9ed7958af..2cc7a8030275 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -107,7 +107,7 @@ static int proc_show_options(struct seq_file *seq, struct dentry *root) | |||
107 | 107 | ||
108 | if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) | 108 | if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) |
109 | seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); | 109 | seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); |
110 | if (pid->hide_pid != 0) | 110 | if (pid->hide_pid != HIDEPID_OFF) |
111 | seq_printf(seq, ",hidepid=%u", pid->hide_pid); | 111 | seq_printf(seq, ",hidepid=%u", pid->hide_pid); |
112 | 112 | ||
113 | return 0; | 113 | return 0; |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 1988440b2049..b90da888b81a 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -58,7 +58,8 @@ int proc_parse_options(char *options, struct pid_namespace *pid) | |||
58 | case Opt_hidepid: | 58 | case Opt_hidepid: |
59 | if (match_int(&args[0], &option)) | 59 | if (match_int(&args[0], &option)) |
60 | return 0; | 60 | return 0; |
61 | if (option < 0 || option > 2) { | 61 | if (option < HIDEPID_OFF || |
62 | option > HIDEPID_INVISIBLE) { | ||
62 | pr_err("proc: hidepid value must be between 0 and 2.\n"); | 63 | pr_err("proc: hidepid value must be between 0 and 2.\n"); |
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 5105b1599981..885d445afa0d 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -265,10 +265,10 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
265 | * On s390 the fault handler is used for memory regions that can't be mapped | 265 | * On s390 the fault handler is used for memory regions that can't be mapped |
266 | * directly with remap_pfn_range(). | 266 | * directly with remap_pfn_range(). |
267 | */ | 267 | */ |
268 | static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 268 | static int mmap_vmcore_fault(struct vm_fault *vmf) |
269 | { | 269 | { |
270 | #ifdef CONFIG_S390 | 270 | #ifdef CONFIG_S390 |
271 | struct address_space *mapping = vma->vm_file->f_mapping; | 271 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
272 | pgoff_t index = vmf->pgoff; | 272 | pgoff_t index = vmf->pgoff; |
273 | struct page *page; | 273 | struct page *page; |
274 | loff_t offset; | 274 | loff_t offset; |
@@ -388,7 +388,7 @@ static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, | |||
388 | } | 388 | } |
389 | return 0; | 389 | return 0; |
390 | fail: | 390 | fail: |
391 | do_munmap(vma->vm_mm, from, len); | 391 | do_munmap(vma->vm_mm, from, len, NULL); |
392 | return -EAGAIN; | 392 | return -EAGAIN; |
393 | } | 393 | } |
394 | 394 | ||
@@ -481,7 +481,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | |||
481 | 481 | ||
482 | return 0; | 482 | return 0; |
483 | fail: | 483 | fail: |
484 | do_munmap(vma->vm_mm, vma->vm_start, len); | 484 | do_munmap(vma->vm_mm, vma->vm_start, len, NULL); |
485 | return -EAGAIN; | 485 | return -EAGAIN; |
486 | } | 486 | } |
487 | #else | 487 | #else |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 729677e18e36..efab7b64925b 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -342,31 +342,35 @@ static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen) | |||
342 | { | 342 | { |
343 | int ret; | 343 | int ret; |
344 | 344 | ||
345 | ret = lz4_compress(in, inlen, out, &outlen, workspace); | 345 | ret = LZ4_compress_default(in, out, inlen, outlen, workspace); |
346 | if (ret) { | 346 | if (!ret) { |
347 | pr_err("lz4_compress error, ret = %d!\n", ret); | 347 | pr_err("LZ4_compress_default error; compression failed!\n"); |
348 | return -EIO; | 348 | return -EIO; |
349 | } | 349 | } |
350 | 350 | ||
351 | return outlen; | 351 | return ret; |
352 | } | 352 | } |
353 | 353 | ||
354 | static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen) | 354 | static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen) |
355 | { | 355 | { |
356 | int ret; | 356 | int ret; |
357 | 357 | ||
358 | ret = lz4_decompress_unknownoutputsize(in, inlen, out, &outlen); | 358 | ret = LZ4_decompress_safe(in, out, inlen, outlen); |
359 | if (ret) { | 359 | if (ret < 0) { |
360 | pr_err("lz4_decompress error, ret = %d!\n", ret); | 360 | /* |
361 | * LZ4_decompress_safe will return an error code | ||
362 | * (< 0) if decompression failed | ||
363 | */ | ||
364 | pr_err("LZ4_decompress_safe error, ret = %d!\n", ret); | ||
361 | return -EIO; | 365 | return -EIO; |
362 | } | 366 | } |
363 | 367 | ||
364 | return outlen; | 368 | return ret; |
365 | } | 369 | } |
366 | 370 | ||
367 | static void allocate_lz4(void) | 371 | static void allocate_lz4(void) |
368 | { | 372 | { |
369 | big_oops_buf_sz = lz4_compressbound(psinfo->bufsize); | 373 | big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize); |
370 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); | 374 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); |
371 | if (big_oops_buf) { | 375 | if (big_oops_buf) { |
372 | workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL); | 376 | workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL); |
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c index ff4468bd18b0..95da65366548 100644 --- a/fs/squashfs/lz4_wrapper.c +++ b/fs/squashfs/lz4_wrapper.c | |||
@@ -97,7 +97,6 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, | |||
97 | struct squashfs_lz4 *stream = strm; | 97 | struct squashfs_lz4 *stream = strm; |
98 | void *buff = stream->input, *data; | 98 | void *buff = stream->input, *data; |
99 | int avail, i, bytes = length, res; | 99 | int avail, i, bytes = length, res; |
100 | size_t dest_len = output->length; | ||
101 | 100 | ||
102 | for (i = 0; i < b; i++) { | 101 | for (i = 0; i < b; i++) { |
103 | avail = min(bytes, msblk->devblksize - offset); | 102 | avail = min(bytes, msblk->devblksize - offset); |
@@ -108,12 +107,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, | |||
108 | put_bh(bh[i]); | 107 | put_bh(bh[i]); |
109 | } | 108 | } |
110 | 109 | ||
111 | res = lz4_decompress_unknownoutputsize(stream->input, length, | 110 | res = LZ4_decompress_safe(stream->input, stream->output, |
112 | stream->output, &dest_len); | 111 | length, output->length); |
113 | if (res) | 112 | |
113 | if (res < 0) | ||
114 | return -EIO; | 114 | return -EIO; |
115 | 115 | ||
116 | bytes = dest_len; | 116 | bytes = res; |
117 | data = squashfs_first_page(output); | 117 | data = squashfs_first_page(output); |
118 | buff = stream->output; | 118 | buff = stream->output; |
119 | while (data) { | 119 | while (data) { |
@@ -128,7 +128,7 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, | |||
128 | } | 128 | } |
129 | squashfs_finish_page(output); | 129 | squashfs_finish_page(output); |
130 | 130 | ||
131 | return dest_len; | 131 | return res; |
132 | } | 132 | } |
133 | 133 | ||
134 | const struct squashfs_decompressor squashfs_lz4_comp_ops = { | 134 | const struct squashfs_decompressor squashfs_lz4_comp_ops = { |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index b0d783774c96..d9ae86f96df7 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -1506,11 +1506,10 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) | |||
1506 | * mmap()d file has taken write protection fault and is being made writable. | 1506 | * mmap()d file has taken write protection fault and is being made writable. |
1507 | * UBIFS must ensure page is budgeted for. | 1507 | * UBIFS must ensure page is budgeted for. |
1508 | */ | 1508 | */ |
1509 | static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, | 1509 | static int ubifs_vm_page_mkwrite(struct vm_fault *vmf) |
1510 | struct vm_fault *vmf) | ||
1511 | { | 1510 | { |
1512 | struct page *page = vmf->page; | 1511 | struct page *page = vmf->page; |
1513 | struct inode *inode = file_inode(vma->vm_file); | 1512 | struct inode *inode = file_inode(vmf->vma->vm_file); |
1514 | struct ubifs_info *c = inode->i_sb->s_fs_info; | 1513 | struct ubifs_info *c = inode->i_sb->s_fs_info; |
1515 | struct timespec now = ubifs_current_time(inode); | 1514 | struct timespec now = ubifs_current_time(inode); |
1516 | struct ubifs_budget_req req = { .new_page = 1 }; | 1515 | struct ubifs_budget_req req = { .new_page = 1 }; |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 18406158e13f..625b7285a37b 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -71,6 +71,13 @@ struct userfaultfd_fork_ctx { | |||
71 | struct list_head list; | 71 | struct list_head list; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct userfaultfd_unmap_ctx { | ||
75 | struct userfaultfd_ctx *ctx; | ||
76 | unsigned long start; | ||
77 | unsigned long end; | ||
78 | struct list_head list; | ||
79 | }; | ||
80 | |||
74 | struct userfaultfd_wait_queue { | 81 | struct userfaultfd_wait_queue { |
75 | struct uffd_msg msg; | 82 | struct uffd_msg msg; |
76 | wait_queue_t wq; | 83 | wait_queue_t wq; |
@@ -681,16 +688,16 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, | |||
681 | userfaultfd_event_wait_completion(ctx, &ewq); | 688 | userfaultfd_event_wait_completion(ctx, &ewq); |
682 | } | 689 | } |
683 | 690 | ||
684 | void madvise_userfault_dontneed(struct vm_area_struct *vma, | 691 | void userfaultfd_remove(struct vm_area_struct *vma, |
685 | struct vm_area_struct **prev, | 692 | struct vm_area_struct **prev, |
686 | unsigned long start, unsigned long end) | 693 | unsigned long start, unsigned long end) |
687 | { | 694 | { |
688 | struct mm_struct *mm = vma->vm_mm; | 695 | struct mm_struct *mm = vma->vm_mm; |
689 | struct userfaultfd_ctx *ctx; | 696 | struct userfaultfd_ctx *ctx; |
690 | struct userfaultfd_wait_queue ewq; | 697 | struct userfaultfd_wait_queue ewq; |
691 | 698 | ||
692 | ctx = vma->vm_userfaultfd_ctx.ctx; | 699 | ctx = vma->vm_userfaultfd_ctx.ctx; |
693 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_MADVDONTNEED)) | 700 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
694 | return; | 701 | return; |
695 | 702 | ||
696 | userfaultfd_ctx_get(ctx); | 703 | userfaultfd_ctx_get(ctx); |
@@ -700,15 +707,101 @@ void madvise_userfault_dontneed(struct vm_area_struct *vma, | |||
700 | 707 | ||
701 | msg_init(&ewq.msg); | 708 | msg_init(&ewq.msg); |
702 | 709 | ||
703 | ewq.msg.event = UFFD_EVENT_MADVDONTNEED; | 710 | ewq.msg.event = UFFD_EVENT_REMOVE; |
704 | ewq.msg.arg.madv_dn.start = start; | 711 | ewq.msg.arg.remove.start = start; |
705 | ewq.msg.arg.madv_dn.end = end; | 712 | ewq.msg.arg.remove.end = end; |
706 | 713 | ||
707 | userfaultfd_event_wait_completion(ctx, &ewq); | 714 | userfaultfd_event_wait_completion(ctx, &ewq); |
708 | 715 | ||
709 | down_read(&mm->mmap_sem); | 716 | down_read(&mm->mmap_sem); |
710 | } | 717 | } |
711 | 718 | ||
719 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, | ||
720 | unsigned long start, unsigned long end) | ||
721 | { | ||
722 | struct userfaultfd_unmap_ctx *unmap_ctx; | ||
723 | |||
724 | list_for_each_entry(unmap_ctx, unmaps, list) | ||
725 | if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && | ||
726 | unmap_ctx->end == end) | ||
727 | return true; | ||
728 | |||
729 | return false; | ||
730 | } | ||
731 | |||
732 | int userfaultfd_unmap_prep(struct vm_area_struct *vma, | ||
733 | unsigned long start, unsigned long end, | ||
734 | struct list_head *unmaps) | ||
735 | { | ||
736 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { | ||
737 | struct userfaultfd_unmap_ctx *unmap_ctx; | ||
738 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | ||
739 | |||
740 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || | ||
741 | has_unmap_ctx(ctx, unmaps, start, end)) | ||
742 | continue; | ||
743 | |||
744 | unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); | ||
745 | if (!unmap_ctx) | ||
746 | return -ENOMEM; | ||
747 | |||
748 | userfaultfd_ctx_get(ctx); | ||
749 | unmap_ctx->ctx = ctx; | ||
750 | unmap_ctx->start = start; | ||
751 | unmap_ctx->end = end; | ||
752 | list_add_tail(&unmap_ctx->list, unmaps); | ||
753 | } | ||
754 | |||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) | ||
759 | { | ||
760 | struct userfaultfd_unmap_ctx *ctx, *n; | ||
761 | struct userfaultfd_wait_queue ewq; | ||
762 | |||
763 | list_for_each_entry_safe(ctx, n, uf, list) { | ||
764 | msg_init(&ewq.msg); | ||
765 | |||
766 | ewq.msg.event = UFFD_EVENT_UNMAP; | ||
767 | ewq.msg.arg.remove.start = ctx->start; | ||
768 | ewq.msg.arg.remove.end = ctx->end; | ||
769 | |||
770 | userfaultfd_event_wait_completion(ctx->ctx, &ewq); | ||
771 | |||
772 | list_del(&ctx->list); | ||
773 | kfree(ctx); | ||
774 | } | ||
775 | } | ||
776 | |||
777 | void userfaultfd_exit(struct mm_struct *mm) | ||
778 | { | ||
779 | struct vm_area_struct *vma = mm->mmap; | ||
780 | |||
781 | /* | ||
782 | * We can do the vma walk without locking because the caller | ||
783 | * (exit_mm) knows it now has exclusive access | ||
784 | */ | ||
785 | while (vma) { | ||
786 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | ||
787 | |||
788 | if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) { | ||
789 | struct userfaultfd_wait_queue ewq; | ||
790 | |||
791 | userfaultfd_ctx_get(ctx); | ||
792 | |||
793 | msg_init(&ewq.msg); | ||
794 | ewq.msg.event = UFFD_EVENT_EXIT; | ||
795 | |||
796 | userfaultfd_event_wait_completion(ctx, &ewq); | ||
797 | |||
798 | ctx->features &= ~UFFD_FEATURE_EVENT_EXIT; | ||
799 | } | ||
800 | |||
801 | vma = vma->vm_next; | ||
802 | } | ||
803 | } | ||
804 | |||
712 | static int userfaultfd_release(struct inode *inode, struct file *file) | 805 | static int userfaultfd_release(struct inode *inode, struct file *file) |
713 | { | 806 | { |
714 | struct userfaultfd_ctx *ctx = file->private_data; | 807 | struct userfaultfd_ctx *ctx = file->private_data; |
@@ -1514,6 +1607,8 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, | |||
1514 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, | 1607 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, |
1515 | uffdio_copy.len); | 1608 | uffdio_copy.len); |
1516 | mmput(ctx->mm); | 1609 | mmput(ctx->mm); |
1610 | } else { | ||
1611 | return -ENOSPC; | ||
1517 | } | 1612 | } |
1518 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) | 1613 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
1519 | return -EFAULT; | 1614 | return -EFAULT; |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 022014016d80..a50eca676670 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -1379,22 +1379,21 @@ xfs_file_llseek( | |||
1379 | */ | 1379 | */ |
1380 | STATIC int | 1380 | STATIC int |
1381 | xfs_filemap_page_mkwrite( | 1381 | xfs_filemap_page_mkwrite( |
1382 | struct vm_area_struct *vma, | ||
1383 | struct vm_fault *vmf) | 1382 | struct vm_fault *vmf) |
1384 | { | 1383 | { |
1385 | struct inode *inode = file_inode(vma->vm_file); | 1384 | struct inode *inode = file_inode(vmf->vma->vm_file); |
1386 | int ret; | 1385 | int ret; |
1387 | 1386 | ||
1388 | trace_xfs_filemap_page_mkwrite(XFS_I(inode)); | 1387 | trace_xfs_filemap_page_mkwrite(XFS_I(inode)); |
1389 | 1388 | ||
1390 | sb_start_pagefault(inode->i_sb); | 1389 | sb_start_pagefault(inode->i_sb); |
1391 | file_update_time(vma->vm_file); | 1390 | file_update_time(vmf->vma->vm_file); |
1392 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1391 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1393 | 1392 | ||
1394 | if (IS_DAX(inode)) { | 1393 | if (IS_DAX(inode)) { |
1395 | ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops); | 1394 | ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops); |
1396 | } else { | 1395 | } else { |
1397 | ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); | 1396 | ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops); |
1398 | ret = block_page_mkwrite_return(ret); | 1397 | ret = block_page_mkwrite_return(ret); |
1399 | } | 1398 | } |
1400 | 1399 | ||
@@ -1406,23 +1405,22 @@ xfs_filemap_page_mkwrite( | |||
1406 | 1405 | ||
1407 | STATIC int | 1406 | STATIC int |
1408 | xfs_filemap_fault( | 1407 | xfs_filemap_fault( |
1409 | struct vm_area_struct *vma, | ||
1410 | struct vm_fault *vmf) | 1408 | struct vm_fault *vmf) |
1411 | { | 1409 | { |
1412 | struct inode *inode = file_inode(vma->vm_file); | 1410 | struct inode *inode = file_inode(vmf->vma->vm_file); |
1413 | int ret; | 1411 | int ret; |
1414 | 1412 | ||
1415 | trace_xfs_filemap_fault(XFS_I(inode)); | 1413 | trace_xfs_filemap_fault(XFS_I(inode)); |
1416 | 1414 | ||
1417 | /* DAX can shortcut the normal fault path on write faults! */ | 1415 | /* DAX can shortcut the normal fault path on write faults! */ |
1418 | if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) | 1416 | if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) |
1419 | return xfs_filemap_page_mkwrite(vma, vmf); | 1417 | return xfs_filemap_page_mkwrite(vmf); |
1420 | 1418 | ||
1421 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1419 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1422 | if (IS_DAX(inode)) | 1420 | if (IS_DAX(inode)) |
1423 | ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops); | 1421 | ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops); |
1424 | else | 1422 | else |
1425 | ret = filemap_fault(vma, vmf); | 1423 | ret = filemap_fault(vmf); |
1426 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1424 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1427 | 1425 | ||
1428 | return ret; | 1426 | return ret; |
@@ -1431,13 +1429,14 @@ xfs_filemap_fault( | |||
1431 | /* | 1429 | /* |
1432 | * Similar to xfs_filemap_fault(), the DAX fault path can call into here on | 1430 | * Similar to xfs_filemap_fault(), the DAX fault path can call into here on |
1433 | * both read and write faults. Hence we need to handle both cases. There is no | 1431 | * both read and write faults. Hence we need to handle both cases. There is no |
1434 | * ->pmd_mkwrite callout for huge pages, so we have a single function here to | 1432 | * ->huge_mkwrite callout for huge pages, so we have a single function here to |
1435 | * handle both cases here. @flags carries the information on the type of fault | 1433 | * handle both cases here. @flags carries the information on the type of fault |
1436 | * occuring. | 1434 | * occuring. |
1437 | */ | 1435 | */ |
1438 | STATIC int | 1436 | STATIC int |
1439 | xfs_filemap_pmd_fault( | 1437 | xfs_filemap_huge_fault( |
1440 | struct vm_fault *vmf) | 1438 | struct vm_fault *vmf, |
1439 | enum page_entry_size pe_size) | ||
1441 | { | 1440 | { |
1442 | struct inode *inode = file_inode(vmf->vma->vm_file); | 1441 | struct inode *inode = file_inode(vmf->vma->vm_file); |
1443 | struct xfs_inode *ip = XFS_I(inode); | 1442 | struct xfs_inode *ip = XFS_I(inode); |
@@ -1446,7 +1445,7 @@ xfs_filemap_pmd_fault( | |||
1446 | if (!IS_DAX(inode)) | 1445 | if (!IS_DAX(inode)) |
1447 | return VM_FAULT_FALLBACK; | 1446 | return VM_FAULT_FALLBACK; |
1448 | 1447 | ||
1449 | trace_xfs_filemap_pmd_fault(ip); | 1448 | trace_xfs_filemap_huge_fault(ip); |
1450 | 1449 | ||
1451 | if (vmf->flags & FAULT_FLAG_WRITE) { | 1450 | if (vmf->flags & FAULT_FLAG_WRITE) { |
1452 | sb_start_pagefault(inode->i_sb); | 1451 | sb_start_pagefault(inode->i_sb); |
@@ -1454,7 +1453,7 @@ xfs_filemap_pmd_fault( | |||
1454 | } | 1453 | } |
1455 | 1454 | ||
1456 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1455 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1457 | ret = dax_iomap_pmd_fault(vmf, &xfs_iomap_ops); | 1456 | ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops); |
1458 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1457 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1459 | 1458 | ||
1460 | if (vmf->flags & FAULT_FLAG_WRITE) | 1459 | if (vmf->flags & FAULT_FLAG_WRITE) |
@@ -1471,11 +1470,10 @@ xfs_filemap_pmd_fault( | |||
1471 | */ | 1470 | */ |
1472 | static int | 1471 | static int |
1473 | xfs_filemap_pfn_mkwrite( | 1472 | xfs_filemap_pfn_mkwrite( |
1474 | struct vm_area_struct *vma, | ||
1475 | struct vm_fault *vmf) | 1473 | struct vm_fault *vmf) |
1476 | { | 1474 | { |
1477 | 1475 | ||
1478 | struct inode *inode = file_inode(vma->vm_file); | 1476 | struct inode *inode = file_inode(vmf->vma->vm_file); |
1479 | struct xfs_inode *ip = XFS_I(inode); | 1477 | struct xfs_inode *ip = XFS_I(inode); |
1480 | int ret = VM_FAULT_NOPAGE; | 1478 | int ret = VM_FAULT_NOPAGE; |
1481 | loff_t size; | 1479 | loff_t size; |
@@ -1483,7 +1481,7 @@ xfs_filemap_pfn_mkwrite( | |||
1483 | trace_xfs_filemap_pfn_mkwrite(ip); | 1481 | trace_xfs_filemap_pfn_mkwrite(ip); |
1484 | 1482 | ||
1485 | sb_start_pagefault(inode->i_sb); | 1483 | sb_start_pagefault(inode->i_sb); |
1486 | file_update_time(vma->vm_file); | 1484 | file_update_time(vmf->vma->vm_file); |
1487 | 1485 | ||
1488 | /* check if the faulting page hasn't raced with truncate */ | 1486 | /* check if the faulting page hasn't raced with truncate */ |
1489 | xfs_ilock(ip, XFS_MMAPLOCK_SHARED); | 1487 | xfs_ilock(ip, XFS_MMAPLOCK_SHARED); |
@@ -1491,7 +1489,7 @@ xfs_filemap_pfn_mkwrite( | |||
1491 | if (vmf->pgoff >= size) | 1489 | if (vmf->pgoff >= size) |
1492 | ret = VM_FAULT_SIGBUS; | 1490 | ret = VM_FAULT_SIGBUS; |
1493 | else if (IS_DAX(inode)) | 1491 | else if (IS_DAX(inode)) |
1494 | ret = dax_pfn_mkwrite(vma, vmf); | 1492 | ret = dax_pfn_mkwrite(vmf); |
1495 | xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); | 1493 | xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); |
1496 | sb_end_pagefault(inode->i_sb); | 1494 | sb_end_pagefault(inode->i_sb); |
1497 | return ret; | 1495 | return ret; |
@@ -1500,7 +1498,7 @@ xfs_filemap_pfn_mkwrite( | |||
1500 | 1498 | ||
1501 | static const struct vm_operations_struct xfs_file_vm_ops = { | 1499 | static const struct vm_operations_struct xfs_file_vm_ops = { |
1502 | .fault = xfs_filemap_fault, | 1500 | .fault = xfs_filemap_fault, |
1503 | .pmd_fault = xfs_filemap_pmd_fault, | 1501 | .huge_fault = xfs_filemap_huge_fault, |
1504 | .map_pages = filemap_map_pages, | 1502 | .map_pages = filemap_map_pages, |
1505 | .page_mkwrite = xfs_filemap_page_mkwrite, | 1503 | .page_mkwrite = xfs_filemap_page_mkwrite, |
1506 | .pfn_mkwrite = xfs_filemap_pfn_mkwrite, | 1504 | .pfn_mkwrite = xfs_filemap_pfn_mkwrite, |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index fb7555e73a62..383ac227ce2c 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -687,7 +687,7 @@ DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag); | |||
687 | DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid); | 687 | DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid); |
688 | 688 | ||
689 | DEFINE_INODE_EVENT(xfs_filemap_fault); | 689 | DEFINE_INODE_EVENT(xfs_filemap_fault); |
690 | DEFINE_INODE_EVENT(xfs_filemap_pmd_fault); | 690 | DEFINE_INODE_EVENT(xfs_filemap_huge_fault); |
691 | DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite); | 691 | DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite); |
692 | DEFINE_INODE_EVENT(xfs_filemap_pfn_mkwrite); | 692 | DEFINE_INODE_EVENT(xfs_filemap_pfn_mkwrite); |
693 | 693 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 18af2bcefe6a..f4ca23b158b3 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -36,6 +36,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, | |||
36 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | 36 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
37 | unsigned long address, pmd_t *pmdp, | 37 | unsigned long address, pmd_t *pmdp, |
38 | pmd_t entry, int dirty); | 38 | pmd_t entry, int dirty); |
39 | extern int pudp_set_access_flags(struct vm_area_struct *vma, | ||
40 | unsigned long address, pud_t *pudp, | ||
41 | pud_t entry, int dirty); | ||
39 | #else | 42 | #else |
40 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | 43 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, |
41 | unsigned long address, pmd_t *pmdp, | 44 | unsigned long address, pmd_t *pmdp, |
@@ -44,6 +47,13 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
44 | BUILD_BUG(); | 47 | BUILD_BUG(); |
45 | return 0; | 48 | return 0; |
46 | } | 49 | } |
50 | static inline int pudp_set_access_flags(struct vm_area_struct *vma, | ||
51 | unsigned long address, pud_t *pudp, | ||
52 | pud_t entry, int dirty) | ||
53 | { | ||
54 | BUILD_BUG(); | ||
55 | return 0; | ||
56 | } | ||
47 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 57 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
48 | #endif | 58 | #endif |
49 | 59 | ||
@@ -121,8 +131,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
121 | } | 131 | } |
122 | #endif | 132 | #endif |
123 | 133 | ||
124 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | ||
125 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 134 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
135 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | ||
126 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | 136 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
127 | unsigned long address, | 137 | unsigned long address, |
128 | pmd_t *pmdp) | 138 | pmd_t *pmdp) |
@@ -131,20 +141,40 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |||
131 | pmd_clear(pmdp); | 141 | pmd_clear(pmdp); |
132 | return pmd; | 142 | return pmd; |
133 | } | 143 | } |
144 | #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ | ||
145 | #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR | ||
146 | static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, | ||
147 | unsigned long address, | ||
148 | pud_t *pudp) | ||
149 | { | ||
150 | pud_t pud = *pudp; | ||
151 | |||
152 | pud_clear(pudp); | ||
153 | return pud; | ||
154 | } | ||
155 | #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ | ||
134 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 156 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
135 | #endif | ||
136 | 157 | ||
137 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL | ||
138 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 158 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
159 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL | ||
139 | static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, | 160 | static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, |
140 | unsigned long address, pmd_t *pmdp, | 161 | unsigned long address, pmd_t *pmdp, |
141 | int full) | 162 | int full) |
142 | { | 163 | { |
143 | return pmdp_huge_get_and_clear(mm, address, pmdp); | 164 | return pmdp_huge_get_and_clear(mm, address, pmdp); |
144 | } | 165 | } |
145 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
146 | #endif | 166 | #endif |
147 | 167 | ||
168 | #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL | ||
169 | static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, | ||
170 | unsigned long address, pud_t *pudp, | ||
171 | int full) | ||
172 | { | ||
173 | return pudp_huge_get_and_clear(mm, address, pudp); | ||
174 | } | ||
175 | #endif | ||
176 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
177 | |||
148 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 178 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
149 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | 179 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
150 | unsigned long address, pte_t *ptep, | 180 | unsigned long address, pte_t *ptep, |
@@ -181,6 +211,9 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
181 | extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, | 211 | extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, |
182 | unsigned long address, | 212 | unsigned long address, |
183 | pmd_t *pmdp); | 213 | pmd_t *pmdp); |
214 | extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, | ||
215 | unsigned long address, | ||
216 | pud_t *pudp); | ||
184 | #endif | 217 | #endif |
185 | 218 | ||
186 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | 219 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
@@ -192,6 +225,30 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
192 | } | 225 | } |
193 | #endif | 226 | #endif |
194 | 227 | ||
228 | #ifndef pte_savedwrite | ||
229 | #define pte_savedwrite pte_write | ||
230 | #endif | ||
231 | |||
232 | #ifndef pte_mk_savedwrite | ||
233 | #define pte_mk_savedwrite pte_mkwrite | ||
234 | #endif | ||
235 | |||
236 | #ifndef pte_clear_savedwrite | ||
237 | #define pte_clear_savedwrite pte_wrprotect | ||
238 | #endif | ||
239 | |||
240 | #ifndef pmd_savedwrite | ||
241 | #define pmd_savedwrite pmd_write | ||
242 | #endif | ||
243 | |||
244 | #ifndef pmd_mk_savedwrite | ||
245 | #define pmd_mk_savedwrite pmd_mkwrite | ||
246 | #endif | ||
247 | |||
248 | #ifndef pmd_clear_savedwrite | ||
249 | #define pmd_clear_savedwrite pmd_wrprotect | ||
250 | #endif | ||
251 | |||
195 | #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT | 252 | #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT |
196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 253 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
197 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | 254 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
@@ -208,6 +265,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
208 | } | 265 | } |
209 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 266 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
210 | #endif | 267 | #endif |
268 | #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT | ||
269 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
270 | static inline void pudp_set_wrprotect(struct mm_struct *mm, | ||
271 | unsigned long address, pud_t *pudp) | ||
272 | { | ||
273 | pud_t old_pud = *pudp; | ||
274 | |||
275 | set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); | ||
276 | } | ||
277 | #else | ||
278 | static inline void pudp_set_wrprotect(struct mm_struct *mm, | ||
279 | unsigned long address, pud_t *pudp) | ||
280 | { | ||
281 | BUILD_BUG(); | ||
282 | } | ||
283 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | ||
284 | #endif | ||
211 | 285 | ||
212 | #ifndef pmdp_collapse_flush | 286 | #ifndef pmdp_collapse_flush |
213 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 287 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -273,12 +347,23 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |||
273 | { | 347 | { |
274 | return pmd_val(pmd_a) == pmd_val(pmd_b); | 348 | return pmd_val(pmd_a) == pmd_val(pmd_b); |
275 | } | 349 | } |
350 | |||
351 | static inline int pud_same(pud_t pud_a, pud_t pud_b) | ||
352 | { | ||
353 | return pud_val(pud_a) == pud_val(pud_b); | ||
354 | } | ||
276 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 355 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
277 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | 356 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
278 | { | 357 | { |
279 | BUILD_BUG(); | 358 | BUILD_BUG(); |
280 | return 0; | 359 | return 0; |
281 | } | 360 | } |
361 | |||
362 | static inline int pud_same(pud_t pud_a, pud_t pud_b) | ||
363 | { | ||
364 | BUILD_BUG(); | ||
365 | return 0; | ||
366 | } | ||
282 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 367 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
283 | #endif | 368 | #endif |
284 | 369 | ||
@@ -640,6 +725,15 @@ static inline int pmd_write(pmd_t pmd) | |||
640 | #endif /* __HAVE_ARCH_PMD_WRITE */ | 725 | #endif /* __HAVE_ARCH_PMD_WRITE */ |
641 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 726 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
642 | 727 | ||
728 | #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ | ||
729 | (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ | ||
730 | !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) | ||
731 | static inline int pud_trans_huge(pud_t pud) | ||
732 | { | ||
733 | return 0; | ||
734 | } | ||
735 | #endif | ||
736 | |||
643 | #ifndef pmd_read_atomic | 737 | #ifndef pmd_read_atomic |
644 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | 738 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
645 | { | 739 | { |
@@ -785,8 +879,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) | |||
785 | * e.g. see arch/arc: flush_pmd_tlb_range | 879 | * e.g. see arch/arc: flush_pmd_tlb_range |
786 | */ | 880 | */ |
787 | #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | 881 | #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
882 | #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | ||
788 | #else | 883 | #else |
789 | #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() | 884 | #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() |
885 | #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() | ||
790 | #endif | 886 | #endif |
791 | #endif | 887 | #endif |
792 | 888 | ||
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7eed8cf3130a..4329bc6ef04b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -232,6 +232,20 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |||
232 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ | 232 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
233 | } while (0) | 233 | } while (0) |
234 | 234 | ||
235 | /** | ||
236 | * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb | ||
237 | * invalidation. This is a nop so far, because only x86 needs it. | ||
238 | */ | ||
239 | #ifndef __tlb_remove_pud_tlb_entry | ||
240 | #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) | ||
241 | #endif | ||
242 | |||
243 | #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ | ||
244 | do { \ | ||
245 | __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ | ||
246 | __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ | ||
247 | } while (0) | ||
248 | |||
235 | /* | 249 | /* |
236 | * For things like page tables caches (ie caching addresses "inside" the | 250 | * For things like page tables caches (ie caching addresses "inside" the |
237 | * page tables, like x86 does), for legacy reasons, flushing an | 251 | * page tables, like x86 does), for legacy reasons, flushing an |
diff --git a/include/linux/bug.h b/include/linux/bug.h index baff2e8fc8a8..5828489309bb 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h | |||
@@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, | |||
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Since detected data corruption should stop operation on the affected | 126 | * Since detected data corruption should stop operation on the affected |
127 | * structures, this returns false if the corruption condition is found. | 127 | * structures. Return value must be checked and sanely acted on by caller. |
128 | */ | 128 | */ |
129 | static inline __must_check bool check_data_corruption(bool v) { return v; } | ||
129 | #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ | 130 | #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ |
130 | do { \ | 131 | check_data_corruption(({ \ |
131 | if (unlikely(condition)) { \ | 132 | bool corruption = unlikely(condition); \ |
133 | if (corruption) { \ | ||
132 | if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ | 134 | if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ |
133 | pr_err(fmt, ##__VA_ARGS__); \ | 135 | pr_err(fmt, ##__VA_ARGS__); \ |
134 | BUG(); \ | 136 | BUG(); \ |
135 | } else \ | 137 | } else \ |
136 | WARN(1, fmt, ##__VA_ARGS__); \ | 138 | WARN(1, fmt, ##__VA_ARGS__); \ |
137 | return false; \ | ||
138 | } \ | 139 | } \ |
139 | } while (0) | 140 | corruption; \ |
141 | })) | ||
140 | 142 | ||
141 | #endif /* _LINUX_BUG_H */ | 143 | #endif /* _LINUX_BUG_H */ |
diff --git a/include/linux/cma.h b/include/linux/cma.h index 6f0a91b37f68..03f32d0bd1d8 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base, | |||
29 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 29 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
30 | unsigned int order_per_bit, | 30 | unsigned int order_per_bit, |
31 | struct cma **res_cma); | 31 | struct cma **res_cma); |
32 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); | 32 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, |
33 | gfp_t gfp_mask); | ||
33 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); | 34 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
34 | #endif | 35 | #endif |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index fddd1a5eb322..811f7a915658 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -122,6 +122,7 @@ | |||
122 | #define __attribute_const__ __attribute__((__const__)) | 122 | #define __attribute_const__ __attribute__((__const__)) |
123 | #define __maybe_unused __attribute__((unused)) | 123 | #define __maybe_unused __attribute__((unused)) |
124 | #define __always_unused __attribute__((unused)) | 124 | #define __always_unused __attribute__((unused)) |
125 | #define __mode(x) __attribute__((mode(x))) | ||
125 | 126 | ||
126 | /* gcc version specific checks */ | 127 | /* gcc version specific checks */ |
127 | 128 | ||
diff --git a/include/linux/dax.h b/include/linux/dax.h index 1e77ff5818f1..d8a3dc042e1c 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
@@ -38,8 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) | |||
38 | 38 | ||
39 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, | 39 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
40 | const struct iomap_ops *ops); | 40 | const struct iomap_ops *ops); |
41 | int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | 41 | int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
42 | const struct iomap_ops *ops); | 42 | const struct iomap_ops *ops); |
43 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); | 43 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
44 | int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); | 44 | int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); |
45 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, | 45 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
@@ -71,19 +71,13 @@ static inline unsigned int dax_radix_order(void *entry) | |||
71 | return PMD_SHIFT - PAGE_SHIFT; | 71 | return PMD_SHIFT - PAGE_SHIFT; |
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops); | ||
75 | #else | 74 | #else |
76 | static inline unsigned int dax_radix_order(void *entry) | 75 | static inline unsigned int dax_radix_order(void *entry) |
77 | { | 76 | { |
78 | return 0; | 77 | return 0; |
79 | } | 78 | } |
80 | static inline int dax_iomap_pmd_fault(struct vm_fault *vmf, | ||
81 | const struct iomap_ops *ops) | ||
82 | { | ||
83 | return VM_FAULT_FALLBACK; | ||
84 | } | ||
85 | #endif | 79 | #endif |
86 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | 80 | int dax_pfn_mkwrite(struct vm_fault *vmf); |
87 | 81 | ||
88 | static inline bool vma_is_dax(struct vm_area_struct *vma) | 82 | static inline bool vma_is_dax(struct vm_area_struct *vma) |
89 | { | 83 | { |
diff --git a/include/linux/device.h b/include/linux/device.h index bd684fc8ec1d..a48a7ff70164 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -1139,6 +1139,7 @@ static inline bool device_supports_offline(struct device *dev) | |||
1139 | extern void lock_device_hotplug(void); | 1139 | extern void lock_device_hotplug(void); |
1140 | extern void unlock_device_hotplug(void); | 1140 | extern void unlock_device_hotplug(void); |
1141 | extern int lock_device_hotplug_sysfs(void); | 1141 | extern int lock_device_hotplug_sysfs(void); |
1142 | void assert_held_device_hotplug(void); | ||
1142 | extern int device_offline(struct device *dev); | 1143 | extern int device_offline(struct device *dev); |
1143 | extern int device_online(struct device *dev); | 1144 | extern int device_online(struct device *dev); |
1144 | extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); | 1145 | extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); |
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index fec734df1524..b67bf6ac907d 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h | |||
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
112 | } | 112 | } |
113 | 113 | ||
114 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | 114 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
115 | unsigned int order); | 115 | unsigned int order, gfp_t gfp_mask); |
116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | 116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
117 | int count); | 117 | int count); |
118 | 118 | ||
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
145 | 145 | ||
146 | static inline | 146 | static inline |
147 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | 147 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
148 | unsigned int order) | 148 | unsigned int order, gfp_t gfp_mask) |
149 | { | 149 | { |
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fe0b6295ab5..db373b9d3223 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void) | |||
541 | #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) | 541 | #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) |
542 | /* The below functions must be run on a range from a single zone. */ | 542 | /* The below functions must be run on a range from a single zone. */ |
543 | extern int alloc_contig_range(unsigned long start, unsigned long end, | 543 | extern int alloc_contig_range(unsigned long start, unsigned long end, |
544 | unsigned migratetype); | 544 | unsigned migratetype, gfp_t gfp_mask); |
545 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); | 545 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); |
546 | #endif | 546 | #endif |
547 | 547 | ||
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f0029e786205..a3762d49ba39 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
6 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | 6 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
7 | struct vm_area_struct *vma); | 7 | struct vm_area_struct *vma); |
8 | extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); | 8 | extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); |
9 | extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, | ||
10 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | ||
11 | struct vm_area_struct *vma); | ||
12 | |||
13 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
14 | extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); | ||
15 | #else | ||
16 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | ||
17 | { | ||
18 | } | ||
19 | #endif | ||
20 | |||
9 | extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); | 21 | extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); |
10 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | 22 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
11 | unsigned long addr, | 23 | unsigned long addr, |
@@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, | |||
17 | extern int zap_huge_pmd(struct mmu_gather *tlb, | 29 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
18 | struct vm_area_struct *vma, | 30 | struct vm_area_struct *vma, |
19 | pmd_t *pmd, unsigned long addr); | 31 | pmd_t *pmd, unsigned long addr); |
32 | extern int zap_huge_pud(struct mmu_gather *tlb, | ||
33 | struct vm_area_struct *vma, | ||
34 | pud_t *pud, unsigned long addr); | ||
20 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 35 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
21 | unsigned long addr, unsigned long end, | 36 | unsigned long addr, unsigned long end, |
22 | unsigned char *vec); | 37 | unsigned char *vec); |
@@ -26,8 +41,10 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
26 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 41 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
27 | unsigned long addr, pgprot_t newprot, | 42 | unsigned long addr, pgprot_t newprot, |
28 | int prot_numa); | 43 | int prot_numa); |
29 | int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, | 44 | int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
30 | pfn_t pfn, bool write); | 45 | pmd_t *pmd, pfn_t pfn, bool write); |
46 | int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | ||
47 | pud_t *pud, pfn_t pfn, bool write); | ||
31 | enum transparent_hugepage_flag { | 48 | enum transparent_hugepage_flag { |
32 | TRANSPARENT_HUGEPAGE_FLAG, | 49 | TRANSPARENT_HUGEPAGE_FLAG, |
33 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | 50 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
@@ -58,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr; | |||
58 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | 75 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) |
59 | 76 | ||
60 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 77 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
61 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
62 | pmd_t *pmd, int flags); | ||
63 | |||
64 | #define HPAGE_PMD_SHIFT PMD_SHIFT | 78 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
65 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) | 79 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) |
66 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) | 80 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) |
67 | 81 | ||
82 | #define HPAGE_PUD_SHIFT PUD_SHIFT | ||
83 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) | ||
84 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) | ||
85 | |||
68 | extern bool is_vma_temporary_stack(struct vm_area_struct *vma); | 86 | extern bool is_vma_temporary_stack(struct vm_area_struct *vma); |
69 | 87 | ||
70 | #define transparent_hugepage_enabled(__vma) \ | 88 | #define transparent_hugepage_enabled(__vma) \ |
@@ -118,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
118 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | 136 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
119 | bool freeze, struct page *page); | 137 | bool freeze, struct page *page); |
120 | 138 | ||
139 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, | ||
140 | unsigned long address); | ||
141 | |||
142 | #define split_huge_pud(__vma, __pud, __address) \ | ||
143 | do { \ | ||
144 | pud_t *____pud = (__pud); \ | ||
145 | if (pud_trans_huge(*____pud) \ | ||
146 | || pud_devmap(*____pud)) \ | ||
147 | __split_huge_pud(__vma, __pud, __address); \ | ||
148 | } while (0) | ||
149 | |||
121 | extern int hugepage_madvise(struct vm_area_struct *vma, | 150 | extern int hugepage_madvise(struct vm_area_struct *vma, |
122 | unsigned long *vm_flags, int advice); | 151 | unsigned long *vm_flags, int advice); |
123 | extern void vma_adjust_trans_huge(struct vm_area_struct *vma, | 152 | extern void vma_adjust_trans_huge(struct vm_area_struct *vma, |
@@ -126,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
126 | long adjust_next); | 155 | long adjust_next); |
127 | extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, | 156 | extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, |
128 | struct vm_area_struct *vma); | 157 | struct vm_area_struct *vma); |
158 | extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, | ||
159 | struct vm_area_struct *vma); | ||
129 | /* mmap_sem must be held on entry */ | 160 | /* mmap_sem must be held on entry */ |
130 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | 161 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
131 | struct vm_area_struct *vma) | 162 | struct vm_area_struct *vma) |
@@ -136,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | |||
136 | else | 167 | else |
137 | return NULL; | 168 | return NULL; |
138 | } | 169 | } |
170 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, | ||
171 | struct vm_area_struct *vma) | ||
172 | { | ||
173 | VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); | ||
174 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) | ||
175 | return __pud_trans_huge_lock(pud, vma); | ||
176 | else | ||
177 | return NULL; | ||
178 | } | ||
139 | static inline int hpage_nr_pages(struct page *page) | 179 | static inline int hpage_nr_pages(struct page *page) |
140 | { | 180 | { |
141 | if (unlikely(PageTransHuge(page))) | 181 | if (unlikely(PageTransHuge(page))) |
@@ -143,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page) | |||
143 | return 1; | 183 | return 1; |
144 | } | 184 | } |
145 | 185 | ||
186 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
187 | pmd_t *pmd, int flags); | ||
188 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | ||
189 | pud_t *pud, int flags); | ||
190 | |||
146 | extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); | 191 | extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
147 | 192 | ||
148 | extern struct page *huge_zero_page; | 193 | extern struct page *huge_zero_page; |
@@ -157,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) | |||
157 | return is_huge_zero_page(pmd_page(pmd)); | 202 | return is_huge_zero_page(pmd_page(pmd)); |
158 | } | 203 | } |
159 | 204 | ||
205 | static inline bool is_huge_zero_pud(pud_t pud) | ||
206 | { | ||
207 | return false; | ||
208 | } | ||
209 | |||
160 | struct page *mm_get_huge_zero_page(struct mm_struct *mm); | 210 | struct page *mm_get_huge_zero_page(struct mm_struct *mm); |
161 | void mm_put_huge_zero_page(struct mm_struct *mm); | 211 | void mm_put_huge_zero_page(struct mm_struct *mm); |
162 | 212 | ||
@@ -167,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm); | |||
167 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | 217 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) |
168 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) | 218 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) |
169 | 219 | ||
220 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) | ||
221 | #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) | ||
222 | #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) | ||
223 | |||
170 | #define hpage_nr_pages(x) 1 | 224 | #define hpage_nr_pages(x) 1 |
171 | 225 | ||
172 | #define transparent_hugepage_enabled(__vma) 0 | 226 | #define transparent_hugepage_enabled(__vma) 0 |
@@ -195,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
195 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, | 249 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
196 | unsigned long address, bool freeze, struct page *page) {} | 250 | unsigned long address, bool freeze, struct page *page) {} |
197 | 251 | ||
252 | #define split_huge_pud(__vma, __pmd, __address) \ | ||
253 | do { } while (0) | ||
254 | |||
198 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 255 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
199 | unsigned long *vm_flags, int advice) | 256 | unsigned long *vm_flags, int advice) |
200 | { | 257 | { |
@@ -212,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | |||
212 | { | 269 | { |
213 | return NULL; | 270 | return NULL; |
214 | } | 271 | } |
272 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, | ||
273 | struct vm_area_struct *vma) | ||
274 | { | ||
275 | return NULL; | ||
276 | } | ||
215 | 277 | ||
216 | static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) | 278 | static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) |
217 | { | 279 | { |
@@ -223,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page) | |||
223 | return false; | 285 | return false; |
224 | } | 286 | } |
225 | 287 | ||
288 | static inline bool is_huge_zero_pud(pud_t pud) | ||
289 | { | ||
290 | return false; | ||
291 | } | ||
292 | |||
226 | static inline void mm_put_huge_zero_page(struct mm_struct *mm) | 293 | static inline void mm_put_huge_zero_page(struct mm_struct *mm) |
227 | { | 294 | { |
228 | return; | 295 | return; |
@@ -233,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | |||
233 | { | 300 | { |
234 | return NULL; | 301 | return NULL; |
235 | } | 302 | } |
303 | |||
304 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, | ||
305 | unsigned long addr, pud_t *pud, int flags) | ||
306 | { | ||
307 | return NULL; | ||
308 | } | ||
236 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 309 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
237 | 310 | ||
238 | #endif /* _LINUX_HUGE_MM_H */ | 311 | #endif /* _LINUX_HUGE_MM_H */ |
diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 891459caa278..7291810067eb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h | |||
@@ -79,8 +79,7 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, | |||
79 | bool *did_zero, const struct iomap_ops *ops); | 79 | bool *did_zero, const struct iomap_ops *ops); |
80 | int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | 80 | int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, |
81 | const struct iomap_ops *ops); | 81 | const struct iomap_ops *ops); |
82 | int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 82 | int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); |
83 | const struct iomap_ops *ops); | ||
84 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 83 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
85 | loff_t start, loff_t len, const struct iomap_ops *ops); | 84 | loff_t start, loff_t len, const struct iomap_ops *ops); |
86 | 85 | ||
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 1c30014ed176..d29e1e21bf3f 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/hrtimer.h> | 20 | #include <linux/ktime.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 820c0ad54a01..c908b25bf5a5 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -52,7 +52,7 @@ void kasan_free_pages(struct page *page, unsigned int order); | |||
52 | void kasan_cache_create(struct kmem_cache *cache, size_t *size, | 52 | void kasan_cache_create(struct kmem_cache *cache, size_t *size, |
53 | unsigned long *flags); | 53 | unsigned long *flags); |
54 | void kasan_cache_shrink(struct kmem_cache *cache); | 54 | void kasan_cache_shrink(struct kmem_cache *cache); |
55 | void kasan_cache_destroy(struct kmem_cache *cache); | 55 | void kasan_cache_shutdown(struct kmem_cache *cache); |
56 | 56 | ||
57 | void kasan_poison_slab(struct page *page); | 57 | void kasan_poison_slab(struct page *page); |
58 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); | 58 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); |
@@ -98,7 +98,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache, | |||
98 | size_t *size, | 98 | size_t *size, |
99 | unsigned long *flags) {} | 99 | unsigned long *flags) {} |
100 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} | 100 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} |
101 | static inline void kasan_cache_destroy(struct kmem_cache *cache) {} | 101 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} |
102 | 102 | ||
103 | static inline void kasan_poison_slab(struct page *page) {} | 103 | static inline void kasan_poison_slab(struct page *page) {} |
104 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, | 104 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cb09238f6d32..4c26dc3a8295 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -100,16 +100,18 @@ | |||
100 | ) | 100 | ) |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Divide positive or negative dividend by positive divisor and round | 103 | * Divide positive or negative dividend by positive or negative divisor |
104 | * to closest integer. Result is undefined for negative divisors and | 104 | * and round to closest integer. Result is undefined for negative |
105 | * for negative dividends if the divisor variable type is unsigned. | 105 | * divisors if he dividend variable type is unsigned and for negative |
106 | * dividends if the divisor variable type is unsigned. | ||
106 | */ | 107 | */ |
107 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 108 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
108 | { \ | 109 | { \ |
109 | typeof(x) __x = x; \ | 110 | typeof(x) __x = x; \ |
110 | typeof(divisor) __d = divisor; \ | 111 | typeof(divisor) __d = divisor; \ |
111 | (((typeof(x))-1) > 0 || \ | 112 | (((typeof(x))-1) > 0 || \ |
112 | ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ | 113 | ((typeof(divisor))-1) > 0 || \ |
114 | (((__x) > 0) == ((__d) > 0))) ? \ | ||
113 | (((__x) + ((__d) / 2)) / (__d)) : \ | 115 | (((__x) + ((__d) / 2)) / (__d)) : \ |
114 | (((__x) - ((__d) / 2)) / (__d)); \ | 116 | (((__x) - ((__d) / 2)) / (__d)); \ |
115 | } \ | 117 | } \ |
diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 6b784c59f321..394e3d9213b8 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h | |||
@@ -1,87 +1,648 @@ | |||
1 | #ifndef __LZ4_H__ | 1 | /* LZ4 Kernel Interface |
2 | #define __LZ4_H__ | ||
3 | /* | ||
4 | * LZ4 Kernel Interface | ||
5 | * | 2 | * |
6 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | 3 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> |
4 | * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> | ||
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | ||
10 | * This file is based on the original header file | ||
11 | * for LZ4 - Fast LZ compression algorithm. | ||
12 | * | ||
13 | * LZ4 - Fast LZ compression algorithm | ||
14 | * Copyright (C) 2011-2016, Yann Collet. | ||
15 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
16 | * Redistribution and use in source and binary forms, with or without | ||
17 | * modification, are permitted provided that the following conditions are | ||
18 | * met: | ||
19 | * * Redistributions of source code must retain the above copyright | ||
20 | * notice, this list of conditions and the following disclaimer. | ||
21 | * * Redistributions in binary form must reproduce the above | ||
22 | * copyright notice, this list of conditions and the following disclaimer | ||
23 | * in the documentation and/or other materials provided with the | ||
24 | * distribution. | ||
25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
26 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
27 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
28 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
29 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
30 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
31 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
32 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
33 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
34 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
35 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
36 | * You can contact the author at : | ||
37 | * - LZ4 homepage : http://www.lz4.org | ||
38 | * - LZ4 source repository : https://github.com/lz4/lz4 | ||
11 | */ | 39 | */ |
12 | #define LZ4_MEM_COMPRESS (16384) | ||
13 | #define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *))) | ||
14 | 40 | ||
41 | #ifndef __LZ4_H__ | ||
42 | #define __LZ4_H__ | ||
43 | |||
44 | #include <linux/types.h> | ||
45 | #include <linux/string.h> /* memset, memcpy */ | ||
46 | |||
47 | /*-************************************************************************ | ||
48 | * CONSTANTS | ||
49 | **************************************************************************/ | ||
15 | /* | 50 | /* |
16 | * lz4_compressbound() | 51 | * LZ4_MEMORY_USAGE : |
17 | * Provides the maximum size that LZ4 may output in a "worst case" scenario | 52 | * Memory usage formula : N->2^N Bytes |
18 | * (input data not compressible) | 53 | * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) |
54 | * Increasing memory usage improves compression ratio | ||
55 | * Reduced memory usage can improve speed, due to cache effect | ||
56 | * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache | ||
19 | */ | 57 | */ |
20 | static inline size_t lz4_compressbound(size_t isize) | 58 | #define LZ4_MEMORY_USAGE 14 |
21 | { | 59 | |
22 | return isize + (isize / 255) + 16; | 60 | #define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ |
23 | } | 61 | #define LZ4_COMPRESSBOUND(isize) (\ |
62 | (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ | ||
63 | ? 0 \ | ||
64 | : (isize) + ((isize)/255) + 16) | ||
65 | |||
66 | #define LZ4_ACCELERATION_DEFAULT 1 | ||
67 | #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) | ||
68 | #define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) | ||
69 | #define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) | ||
70 | |||
71 | #define LZ4HC_MIN_CLEVEL 3 | ||
72 | #define LZ4HC_DEFAULT_CLEVEL 9 | ||
73 | #define LZ4HC_MAX_CLEVEL 16 | ||
74 | |||
75 | #define LZ4HC_DICTIONARY_LOGSIZE 16 | ||
76 | #define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) | ||
77 | #define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) | ||
78 | #define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1) | ||
79 | #define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG) | ||
80 | #define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1) | ||
81 | |||
82 | /*-************************************************************************ | ||
83 | * STREAMING CONSTANTS AND STRUCTURES | ||
84 | **************************************************************************/ | ||
85 | #define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) | ||
86 | #define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) | ||
87 | |||
88 | #define LZ4_STREAMHCSIZE 262192 | ||
89 | #define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t)) | ||
90 | |||
91 | #define LZ4_STREAMDECODESIZE_U64 4 | ||
92 | #define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \ | ||
93 | sizeof(unsigned long long)) | ||
24 | 94 | ||
25 | /* | 95 | /* |
26 | * lz4_compress() | 96 | * LZ4_stream_t - information structure to track an LZ4 stream. |
27 | * src : source address of the original data | 97 | */ |
28 | * src_len : size of the original data | 98 | typedef struct { |
29 | * dst : output buffer address of the compressed data | 99 | uint32_t hashTable[LZ4_HASH_SIZE_U32]; |
30 | * This requires 'dst' of size LZ4_COMPRESSBOUND. | 100 | uint32_t currentOffset; |
31 | * dst_len : is the output size, which is returned after compress done | 101 | uint32_t initCheck; |
32 | * workmem : address of the working memory. | 102 | const uint8_t *dictionary; |
33 | * This requires 'workmem' of size LZ4_MEM_COMPRESS. | 103 | uint8_t *bufferStart; |
34 | * return : Success if return 0 | 104 | uint32_t dictSize; |
35 | * Error if return (< 0) | 105 | } LZ4_stream_t_internal; |
36 | * note : Destination buffer and workmem must be already allocated with | 106 | typedef union { |
37 | * the defined size. | 107 | unsigned long long table[LZ4_STREAMSIZE_U64]; |
38 | */ | 108 | LZ4_stream_t_internal internal_donotuse; |
39 | int lz4_compress(const unsigned char *src, size_t src_len, | 109 | } LZ4_stream_t; |
40 | unsigned char *dst, size_t *dst_len, void *wrkmem); | ||
41 | |||
42 | /* | ||
43 | * lz4hc_compress() | ||
44 | * src : source address of the original data | ||
45 | * src_len : size of the original data | ||
46 | * dst : output buffer address of the compressed data | ||
47 | * This requires 'dst' of size LZ4_COMPRESSBOUND. | ||
48 | * dst_len : is the output size, which is returned after compress done | ||
49 | * workmem : address of the working memory. | ||
50 | * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. | ||
51 | * return : Success if return 0 | ||
52 | * Error if return (< 0) | ||
53 | * note : Destination buffer and workmem must be already allocated with | ||
54 | * the defined size. | ||
55 | */ | ||
56 | int lz4hc_compress(const unsigned char *src, size_t src_len, | ||
57 | unsigned char *dst, size_t *dst_len, void *wrkmem); | ||
58 | 110 | ||
59 | /* | 111 | /* |
60 | * lz4_decompress() | 112 | * LZ4_streamHC_t - information structure to track an LZ4HC stream. |
61 | * src : source address of the compressed data | 113 | */ |
62 | * src_len : is the input size, whcih is returned after decompress done | 114 | typedef struct { |
63 | * dest : output buffer address of the decompressed data | 115 | unsigned int hashTable[LZ4HC_HASHTABLESIZE]; |
64 | * actual_dest_len: is the size of uncompressed data, supposing it's known | 116 | unsigned short chainTable[LZ4HC_MAXD]; |
65 | * return : Success if return 0 | 117 | /* next block to continue on current prefix */ |
66 | * Error if return (< 0) | 118 | const unsigned char *end; |
67 | * note : Destination buffer must be already allocated. | 119 | /* All index relative to this position */ |
68 | * slightly faster than lz4_decompress_unknownoutputsize() | 120 | const unsigned char *base; |
69 | */ | 121 | /* alternate base for extDict */ |
70 | int lz4_decompress(const unsigned char *src, size_t *src_len, | 122 | const unsigned char *dictBase; |
71 | unsigned char *dest, size_t actual_dest_len); | 123 | /* below that point, need extDict */ |
124 | unsigned int dictLimit; | ||
125 | /* below that point, no more dict */ | ||
126 | unsigned int lowLimit; | ||
127 | /* index from which to continue dict update */ | ||
128 | unsigned int nextToUpdate; | ||
129 | unsigned int compressionLevel; | ||
130 | } LZ4HC_CCtx_internal; | ||
131 | typedef union { | ||
132 | size_t table[LZ4_STREAMHCSIZE_SIZET]; | ||
133 | LZ4HC_CCtx_internal internal_donotuse; | ||
134 | } LZ4_streamHC_t; | ||
72 | 135 | ||
73 | /* | 136 | /* |
74 | * lz4_decompress_unknownoutputsize() | 137 | * LZ4_streamDecode_t - information structure to track an |
75 | * src : source address of the compressed data | 138 | * LZ4 stream during decompression. |
76 | * src_len : is the input size, therefore the compressed size | 139 | * |
77 | * dest : output buffer address of the decompressed data | 140 | * init this structure using LZ4_setStreamDecode (or memset()) before first use |
78 | * dest_len: is the max size of the destination buffer, which is | 141 | */ |
79 | * returned with actual size of decompressed data after | 142 | typedef struct { |
80 | * decompress done | 143 | const uint8_t *externalDict; |
81 | * return : Success if return 0 | 144 | size_t extDictSize; |
82 | * Error if return (< 0) | 145 | const uint8_t *prefixEnd; |
83 | * note : Destination buffer must be already allocated. | 146 | size_t prefixSize; |
84 | */ | 147 | } LZ4_streamDecode_t_internal; |
85 | int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, | 148 | typedef union { |
86 | unsigned char *dest, size_t *dest_len); | 149 | unsigned long long table[LZ4_STREAMDECODESIZE_U64]; |
150 | LZ4_streamDecode_t_internal internal_donotuse; | ||
151 | } LZ4_streamDecode_t; | ||
152 | |||
153 | /*-************************************************************************ | ||
154 | * SIZE OF STATE | ||
155 | **************************************************************************/ | ||
156 | #define LZ4_MEM_COMPRESS LZ4_STREAMSIZE | ||
157 | #define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE | ||
158 | |||
159 | /*-************************************************************************ | ||
160 | * Compression Functions | ||
161 | **************************************************************************/ | ||
162 | |||
163 | /** | ||
164 | * LZ4_compressBound() - Max. output size in worst case szenarios | ||
165 | * @isize: Size of the input data | ||
166 | * | ||
167 | * Return: Max. size LZ4 may output in a "worst case" szenario | ||
168 | * (data not compressible) | ||
169 | */ | ||
170 | static inline int LZ4_compressBound(size_t isize) | ||
171 | { | ||
172 | return LZ4_COMPRESSBOUND(isize); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * LZ4_compress_default() - Compress data from source to dest | ||
177 | * @source: source address of the original data | ||
178 | * @dest: output buffer address of the compressed data | ||
179 | * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE | ||
180 | * @maxOutputSize: full or partial size of buffer 'dest' | ||
181 | * which must be already allocated | ||
182 | * @wrkmem: address of the working memory. | ||
183 | * This requires 'workmem' of LZ4_MEM_COMPRESS. | ||
184 | * | ||
185 | * Compresses 'sourceSize' bytes from buffer 'source' | ||
186 | * into already allocated 'dest' buffer of size 'maxOutputSize'. | ||
187 | * Compression is guaranteed to succeed if | ||
188 | * 'maxOutputSize' >= LZ4_compressBound(inputSize). | ||
189 | * It also runs faster, so it's a recommended setting. | ||
190 | * If the function cannot compress 'source' into a more limited 'dest' budget, | ||
191 | * compression stops *immediately*, and the function result is zero. | ||
192 | * As a consequence, 'dest' content is not valid. | ||
193 | * | ||
194 | * Return: Number of bytes written into buffer 'dest' | ||
195 | * (necessarily <= maxOutputSize) or 0 if compression fails | ||
196 | */ | ||
197 | int LZ4_compress_default(const char *source, char *dest, int inputSize, | ||
198 | int maxOutputSize, void *wrkmem); | ||
199 | |||
200 | /** | ||
201 | * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param | ||
202 | * @source: source address of the original data | ||
203 | * @dest: output buffer address of the compressed data | ||
204 | * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE | ||
205 | * @maxOutputSize: full or partial size of buffer 'dest' | ||
206 | * which must be already allocated | ||
207 | * @acceleration: acceleration factor | ||
208 | * @wrkmem: address of the working memory. | ||
209 | * This requires 'workmem' of LZ4_MEM_COMPRESS. | ||
210 | * | ||
211 | * Same as LZ4_compress_default(), but allows to select an "acceleration" | ||
212 | * factor. The larger the acceleration value, the faster the algorithm, | ||
213 | * but also the lesser the compression. It's a trade-off. It can be fine tuned, | ||
214 | * with each successive value providing roughly +~3% to speed. | ||
215 | * An acceleration value of "1" is the same as regular LZ4_compress_default() | ||
216 | * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. | ||
217 | * | ||
218 | * Return: Number of bytes written into buffer 'dest' | ||
219 | * (necessarily <= maxOutputSize) or 0 if compression fails | ||
220 | */ | ||
221 | int LZ4_compress_fast(const char *source, char *dest, int inputSize, | ||
222 | int maxOutputSize, int acceleration, void *wrkmem); | ||
223 | |||
224 | /** | ||
225 | * LZ4_compress_destSize() - Compress as much data as possible | ||
226 | * from source to dest | ||
227 | * @source: source address of the original data | ||
228 | * @dest: output buffer address of the compressed data | ||
229 | * @sourceSizePtr: will be modified to indicate how many bytes where read | ||
230 | * from 'source' to fill 'dest'. New value is necessarily <= old value. | ||
231 | * @targetDestSize: Size of buffer 'dest' which must be already allocated | ||
232 | * @wrkmem: address of the working memory. | ||
233 | * This requires 'workmem' of LZ4_MEM_COMPRESS. | ||
234 | * | ||
235 | * Reverse the logic, by compressing as much data as possible | ||
236 | * from 'source' buffer into already allocated buffer 'dest' | ||
237 | * of size 'targetDestSize'. | ||
238 | * This function either compresses the entire 'source' content into 'dest' | ||
239 | * if it's large enough, or fill 'dest' buffer completely with as much data as | ||
240 | * possible from 'source'. | ||
241 | * | ||
242 | * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) | ||
243 | * or 0 if compression fails | ||
244 | */ | ||
245 | int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, | ||
246 | int targetDestSize, void *wrkmem); | ||
247 | |||
248 | /*-************************************************************************ | ||
249 | * Decompression Functions | ||
250 | **************************************************************************/ | ||
251 | |||
252 | /** | ||
253 | * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' | ||
254 | * @source: source address of the compressed data | ||
255 | * @dest: output buffer address of the uncompressed data | ||
256 | * which must be already allocated with 'originalSize' bytes | ||
257 | * @originalSize: is the original and therefore uncompressed size | ||
258 | * | ||
259 | * Decompresses data from 'source' into 'dest'. | ||
260 | * This function fully respect memory boundaries for properly formed | ||
261 | * compressed data. | ||
262 | * It is a bit faster than LZ4_decompress_safe(). | ||
263 | * However, it does not provide any protection against intentionally | ||
264 | * modified data stream (malicious input). | ||
265 | * Use this function in trusted environment only | ||
266 | * (data to decode comes from a trusted source). | ||
267 | * | ||
268 | * Return: number of bytes read from the source buffer | ||
269 | * or a negative result if decompression fails. | ||
270 | */ | ||
271 | int LZ4_decompress_fast(const char *source, char *dest, int originalSize); | ||
272 | |||
273 | /** | ||
274 | * LZ4_decompress_safe() - Decompression protected against buffer overflow | ||
275 | * @source: source address of the compressed data | ||
276 | * @dest: output buffer address of the uncompressed data | ||
277 | * which must be already allocated | ||
278 | * @compressedSize: is the precise full size of the compressed block | ||
279 | * @maxDecompressedSize: is the size of 'dest' buffer | ||
280 | * | ||
281 | * Decompresses data fom 'source' into 'dest'. | ||
282 | * If the source stream is detected malformed, the function will | ||
283 | * stop decoding and return a negative result. | ||
284 | * This function is protected against buffer overflow exploits, | ||
285 | * including malicious data packets. It never writes outside output buffer, | ||
286 | * nor reads outside input buffer. | ||
287 | * | ||
288 | * Return: number of bytes decompressed into destination buffer | ||
289 | * (necessarily <= maxDecompressedSize) | ||
290 | * or a negative result in case of error | ||
291 | */ | ||
292 | int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, | ||
293 | int maxDecompressedSize); | ||
294 | |||
295 | /** | ||
296 | * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' | ||
297 | * at position 'source' into buffer 'dest' | ||
298 | * @source: source address of the compressed data | ||
299 | * @dest: output buffer address of the decompressed data which must be | ||
300 | * already allocated | ||
301 | * @compressedSize: is the precise full size of the compressed block. | ||
302 | * @targetOutputSize: the decompression operation will try | ||
303 | * to stop as soon as 'targetOutputSize' has been reached | ||
304 | * @maxDecompressedSize: is the size of destination buffer | ||
305 | * | ||
306 | * This function decompresses a compressed block of size 'compressedSize' | ||
307 | * at position 'source' into destination buffer 'dest' | ||
308 | * of size 'maxDecompressedSize'. | ||
309 | * The function tries to stop decompressing operation as soon as | ||
310 | * 'targetOutputSize' has been reached, reducing decompression time. | ||
311 | * This function never writes outside of output buffer, | ||
312 | * and never reads outside of input buffer. | ||
313 | * It is therefore protected against malicious data packets. | ||
314 | * | ||
315 | * Return: the number of bytes decoded in the destination buffer | ||
316 | * (necessarily <= maxDecompressedSize) | ||
317 | * or a negative result in case of error | ||
318 | * | ||
319 | */ | ||
320 | int LZ4_decompress_safe_partial(const char *source, char *dest, | ||
321 | int compressedSize, int targetOutputSize, int maxDecompressedSize); | ||
322 | |||
323 | /*-************************************************************************ | ||
324 | * LZ4 HC Compression | ||
325 | **************************************************************************/ | ||
326 | |||
327 | /** | ||
328 | * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm | ||
329 | * @src: source address of the original data | ||
330 | * @dst: output buffer address of the compressed data | ||
331 | * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE | ||
332 | * @dstCapacity: full or partial size of buffer 'dst', | ||
333 | * which must be already allocated | ||
334 | * @compressionLevel: Recommended values are between 4 and 9, although any | ||
335 | * value between 1 and LZ4HC_MAX_CLEVEL will work. | ||
336 | * Values >LZ4HC_MAX_CLEVEL behave the same as 16. | ||
337 | * @wrkmem: address of the working memory. | ||
338 | * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. | ||
339 | * | ||
340 | * Compress data from 'src' into 'dst', using the more powerful | ||
341 | * but slower "HC" algorithm. Compression is guaranteed to succeed if | ||
342 | * `dstCapacity >= LZ4_compressBound(srcSize) | ||
343 | * | ||
344 | * Return : the number of bytes written into 'dst' or 0 if compression fails. | ||
345 | */ | ||
346 | int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, | ||
347 | int compressionLevel, void *wrkmem); | ||
348 | |||
349 | /** | ||
350 | * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure | ||
351 | * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure | ||
352 | * @compressionLevel: Recommended values are between 4 and 9, although any | ||
353 | * value between 1 and LZ4HC_MAX_CLEVEL will work. | ||
354 | * Values >LZ4HC_MAX_CLEVEL behave the same as 16. | ||
355 | * | ||
356 | * An LZ4_streamHC_t structure can be allocated once | ||
357 | * and re-used multiple times. | ||
358 | * Use this function to init an allocated `LZ4_streamHC_t` structure | ||
359 | * and start a new compression. | ||
360 | */ | ||
361 | void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); | ||
362 | |||
363 | /** | ||
364 | * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC | ||
365 | * @streamHCPtr: pointer to the LZ4HC_stream_t | ||
366 | * @dictionary: dictionary to load | ||
367 | * @dictSize: size of dictionary | ||
368 | * | ||
369 | * Use this function to load a static dictionary into LZ4HC_stream. | ||
370 | * Any previous data will be forgotten, only 'dictionary' | ||
371 | * will remain in memory. | ||
372 | * Loading a size of 0 is allowed. | ||
373 | * | ||
374 | * Return : dictionary size, in bytes (necessarily <= 64 KB) | ||
375 | */ | ||
376 | int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, | ||
377 | int dictSize); | ||
378 | |||
379 | /** | ||
380 | * LZ4_compress_HC_continue() - Compress 'src' using data from previously | ||
381 | * compressed blocks as a dictionary using the HC algorithm | ||
382 | * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure | ||
383 | * @src: source address of the original data | ||
384 | * @dst: output buffer address of the compressed data, | ||
385 | * which must be already allocated | ||
386 | * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE | ||
387 | * @maxDstSize: full or partial size of buffer 'dest' | ||
388 | * which must be already allocated | ||
389 | * | ||
390 | * These functions compress data in successive blocks of any size, using | ||
391 | * previous blocks as dictionary. One key assumption is that previous | ||
392 | * blocks (up to 64 KB) remain read-accessible while | ||
393 | * compressing next blocks. There is an exception for ring buffers, | ||
394 | * which can be smaller than 64 KB. | ||
395 | * Ring buffers scenario is automatically detected and handled by | ||
396 | * LZ4_compress_HC_continue(). | ||
397 | * Before starting compression, state must be properly initialized, | ||
398 | * using LZ4_resetStreamHC(). | ||
399 | * A first "fictional block" can then be designated as | ||
400 | * initial dictionary, using LZ4_loadDictHC() (Optional). | ||
401 | * Then, use LZ4_compress_HC_continue() | ||
402 | * to compress each successive block. Previous memory blocks | ||
403 | * (including initial dictionary when present) must remain accessible | ||
404 | * and unmodified during compression. | ||
405 | * 'dst' buffer should be sized to handle worst case scenarios, using | ||
406 | * LZ4_compressBound(), to ensure operation success. | ||
407 | * If, for any reason, previous data blocks can't be preserved unmodified | ||
408 | * in memory during next compression block, | ||
409 | * you must save it to a safer memory space, using LZ4_saveDictHC(). | ||
410 | * Return value of LZ4_saveDictHC() is the size of dictionary | ||
411 | * effectively saved into 'safeBuffer'. | ||
412 | * | ||
413 | * Return: Number of bytes written into buffer 'dst' or 0 if compression fails | ||
414 | */ | ||
415 | int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, | ||
416 | char *dst, int srcSize, int maxDstSize); | ||
417 | |||
418 | /** | ||
419 | * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream | ||
420 | * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure | ||
421 | * @safeBuffer: buffer to save dictionary to, must be already allocated | ||
422 | * @maxDictSize: size of 'safeBuffer' | ||
423 | * | ||
424 | * If previously compressed data block is not guaranteed | ||
425 | * to remain available at its memory location, | ||
426 | * save it into a safer place (char *safeBuffer). | ||
427 | * Note : you don't need to call LZ4_loadDictHC() afterwards, | ||
428 | * dictionary is immediately usable, you can therefore call | ||
429 | * LZ4_compress_HC_continue(). | ||
430 | * | ||
431 | * Return : saved dictionary size in bytes (necessarily <= maxDictSize), | ||
432 | * or 0 if error. | ||
433 | */ | ||
434 | int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, | ||
435 | int maxDictSize); | ||
436 | |||
437 | /*-********************************************* | ||
438 | * Streaming Compression Functions | ||
439 | ***********************************************/ | ||
440 | |||
441 | /** | ||
442 | * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure | ||
443 | * @LZ4_stream: pointer to the 'LZ4_stream_t' structure | ||
444 | * | ||
445 | * An LZ4_stream_t structure can be allocated once | ||
446 | * and re-used multiple times. | ||
447 | * Use this function to init an allocated `LZ4_stream_t` structure | ||
448 | * and start a new compression. | ||
449 | */ | ||
450 | void LZ4_resetStream(LZ4_stream_t *LZ4_stream); | ||
451 | |||
452 | /** | ||
453 | * LZ4_loadDict() - Load a static dictionary into LZ4_stream | ||
454 | * @streamPtr: pointer to the LZ4_stream_t | ||
455 | * @dictionary: dictionary to load | ||
456 | * @dictSize: size of dictionary | ||
457 | * | ||
458 | * Use this function to load a static dictionary into LZ4_stream. | ||
459 | * Any previous data will be forgotten, only 'dictionary' | ||
460 | * will remain in memory. | ||
461 | * Loading a size of 0 is allowed. | ||
462 | * | ||
463 | * Return : dictionary size, in bytes (necessarily <= 64 KB) | ||
464 | */ | ||
465 | int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, | ||
466 | int dictSize); | ||
467 | |||
468 | /** | ||
469 | * LZ4_saveDict() - Save static dictionary from LZ4_stream | ||
470 | * @streamPtr: pointer to the 'LZ4_stream_t' structure | ||
471 | * @safeBuffer: buffer to save dictionary to, must be already allocated | ||
472 | * @dictSize: size of 'safeBuffer' | ||
473 | * | ||
474 | * If previously compressed data block is not guaranteed | ||
475 | * to remain available at its memory location, | ||
476 | * save it into a safer place (char *safeBuffer). | ||
477 | * Note : you don't need to call LZ4_loadDict() afterwards, | ||
478 | * dictionary is immediately usable, you can therefore call | ||
479 | * LZ4_compress_fast_continue(). | ||
480 | * | ||
481 | * Return : saved dictionary size in bytes (necessarily <= dictSize), | ||
482 | * or 0 if error. | ||
483 | */ | ||
484 | int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); | ||
485 | |||
486 | /** | ||
487 | * LZ4_compress_fast_continue() - Compress 'src' using data from previously | ||
488 | * compressed blocks as a dictionary | ||
489 | * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure | ||
490 | * @src: source address of the original data | ||
491 | * @dst: output buffer address of the compressed data, | ||
492 | * which must be already allocated | ||
493 | * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE | ||
494 | * @maxDstSize: full or partial size of buffer 'dest' | ||
495 | * which must be already allocated | ||
496 | * @acceleration: acceleration factor | ||
497 | * | ||
498 | * Compress buffer content 'src', using data from previously compressed blocks | ||
499 | * as dictionary to improve compression ratio. | ||
500 | * Important : Previous data blocks are assumed to still | ||
501 | * be present and unmodified ! | ||
502 | * If maxDstSize >= LZ4_compressBound(srcSize), | ||
503 | * compression is guaranteed to succeed, and runs faster. | ||
504 | * | ||
505 | * Return: Number of bytes written into buffer 'dst' or 0 if compression fails | ||
506 | */ | ||
507 | int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, | ||
508 | char *dst, int srcSize, int maxDstSize, int acceleration); | ||
509 | |||
510 | /** | ||
511 | * LZ4_setStreamDecode() - Instruct where to find dictionary | ||
512 | * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure | ||
513 | * @dictionary: dictionary to use | ||
514 | * @dictSize: size of dictionary | ||
515 | * | ||
516 | * Use this function to instruct where to find the dictionary. | ||
517 | * Setting a size of 0 is allowed (same effect as reset). | ||
518 | * | ||
519 | * Return: 1 if OK, 0 if error | ||
520 | */ | ||
521 | int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, | ||
522 | const char *dictionary, int dictSize); | ||
523 | |||
524 | /** | ||
525 | * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode | ||
526 | * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure | ||
527 | * @source: source address of the compressed data | ||
528 | * @dest: output buffer address of the uncompressed data | ||
529 | * which must be already allocated | ||
530 | * @compressedSize: is the precise full size of the compressed block | ||
531 | * @maxDecompressedSize: is the size of 'dest' buffer | ||
532 | * | ||
533 | * These decoding function allows decompression of multiple blocks | ||
534 | * in "streaming" mode. | ||
535 | * Previously decoded blocks *must* remain available at the memory position | ||
536 | * where they were decoded (up to 64 KB) | ||
537 | * In the case of a ring buffers, decoding buffer must be either : | ||
538 | * - Exactly same size as encoding buffer, with same update rule | ||
539 | * (block boundaries at same positions) In which case, | ||
540 | * the decoding & encoding ring buffer can have any size, | ||
541 | * including very small ones ( < 64 KB). | ||
542 | * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. | ||
543 | * maxBlockSize is implementation dependent. | ||
544 | * It's the maximum size you intend to compress into a single block. | ||
545 | * In which case, encoding and decoding buffers do not need | ||
546 | * to be synchronized, and encoding ring buffer can have any size, | ||
547 | * including small ones ( < 64 KB). | ||
548 | * - _At least_ 64 KB + 8 bytes + maxBlockSize. | ||
549 | * In which case, encoding and decoding buffers do not need to be | ||
550 | * synchronized, and encoding ring buffer can have any size, | ||
551 | * including larger than decoding buffer. W | ||
552 | * Whenever these conditions are not possible, save the last 64KB of decoded | ||
553 | * data into a safe buffer, and indicate where it is saved | ||
554 | * using LZ4_setStreamDecode() | ||
555 | * | ||
556 | * Return: number of bytes decompressed into destination buffer | ||
557 | * (necessarily <= maxDecompressedSize) | ||
558 | * or a negative result in case of error | ||
559 | */ | ||
560 | int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, | ||
561 | const char *source, char *dest, int compressedSize, | ||
562 | int maxDecompressedSize); | ||
563 | |||
564 | /** | ||
565 | * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode | ||
566 | * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure | ||
567 | * @source: source address of the compressed data | ||
568 | * @dest: output buffer address of the uncompressed data | ||
569 | * which must be already allocated with 'originalSize' bytes | ||
570 | * @originalSize: is the original and therefore uncompressed size | ||
571 | * | ||
572 | * These decoding function allows decompression of multiple blocks | ||
573 | * in "streaming" mode. | ||
574 | * Previously decoded blocks *must* remain available at the memory position | ||
575 | * where they were decoded (up to 64 KB) | ||
576 | * In the case of a ring buffers, decoding buffer must be either : | ||
577 | * - Exactly same size as encoding buffer, with same update rule | ||
578 | * (block boundaries at same positions) In which case, | ||
579 | * the decoding & encoding ring buffer can have any size, | ||
580 | * including very small ones ( < 64 KB). | ||
581 | * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. | ||
582 | * maxBlockSize is implementation dependent. | ||
583 | * It's the maximum size you intend to compress into a single block. | ||
584 | * In which case, encoding and decoding buffers do not need | ||
585 | * to be synchronized, and encoding ring buffer can have any size, | ||
586 | * including small ones ( < 64 KB). | ||
587 | * - _At least_ 64 KB + 8 bytes + maxBlockSize. | ||
588 | * In which case, encoding and decoding buffers do not need to be | ||
589 | * synchronized, and encoding ring buffer can have any size, | ||
590 | * including larger than decoding buffer. W | ||
591 | * Whenever these conditions are not possible, save the last 64KB of decoded | ||
592 | * data into a safe buffer, and indicate where it is saved | ||
593 | * using LZ4_setStreamDecode() | ||
594 | * | ||
595 | * Return: number of bytes decompressed into destination buffer | ||
596 | * (necessarily <= maxDecompressedSize) | ||
597 | * or a negative result in case of error | ||
598 | */ | ||
599 | int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, | ||
600 | const char *source, char *dest, int originalSize); | ||
601 | |||
602 | /** | ||
603 | * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() | ||
604 | * followed by LZ4_decompress_safe_continue() | ||
605 | * @source: source address of the compressed data | ||
606 | * @dest: output buffer address of the uncompressed data | ||
607 | * which must be already allocated | ||
608 | * @compressedSize: is the precise full size of the compressed block | ||
609 | * @maxDecompressedSize: is the size of 'dest' buffer | ||
610 | * @dictStart: pointer to the start of the dictionary in memory | ||
611 | * @dictSize: size of dictionary | ||
612 | * | ||
613 | * These decoding function works the same as | ||
614 | * a combination of LZ4_setStreamDecode() followed by | ||
615 | * LZ4_decompress_safe_continue() | ||
616 | * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. | ||
617 | * | ||
618 | * Return: number of bytes decompressed into destination buffer | ||
619 | * (necessarily <= maxDecompressedSize) | ||
620 | * or a negative result in case of error | ||
621 | */ | ||
622 | int LZ4_decompress_safe_usingDict(const char *source, char *dest, | ||
623 | int compressedSize, int maxDecompressedSize, const char *dictStart, | ||
624 | int dictSize); | ||
625 | |||
626 | /** | ||
627 | * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() | ||
628 | * followed by LZ4_decompress_fast_continue() | ||
629 | * @source: source address of the compressed data | ||
630 | * @dest: output buffer address of the uncompressed data | ||
631 | * which must be already allocated with 'originalSize' bytes | ||
632 | * @originalSize: is the original and therefore uncompressed size | ||
633 | * @dictStart: pointer to the start of the dictionary in memory | ||
634 | * @dictSize: size of dictionary | ||
635 | * | ||
636 | * These decoding function works the same as | ||
637 | * a combination of LZ4_setStreamDecode() followed by | ||
638 | * LZ4_decompress_safe_continue() | ||
639 | * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. | ||
640 | * | ||
641 | * Return: number of bytes decompressed into destination buffer | ||
642 | * (necessarily <= maxDecompressedSize) | ||
643 | * or a negative result in case of error | ||
644 | */ | ||
645 | int LZ4_decompress_fast_usingDict(const char *source, char *dest, | ||
646 | int originalSize, const char *dictStart, int dictSize); | ||
647 | |||
87 | #endif | 648 | #endif |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 38bcf00cbed3..bdfc65af4152 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -42,6 +42,7 @@ struct memblock_type { | |||
42 | unsigned long max; /* size of the allocated array */ | 42 | unsigned long max; /* size of the allocated array */ |
43 | phys_addr_t total_size; /* size of all regions */ | 43 | phys_addr_t total_size; /* size of all regions */ |
44 | struct memblock_region *regions; | 44 | struct memblock_region *regions; |
45 | char *name; | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | struct memblock { | 48 | struct memblock { |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 093607f90b91..b723a686fc10 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb); | |||
109 | extern int register_memory_isolate_notifier(struct notifier_block *nb); | 109 | extern int register_memory_isolate_notifier(struct notifier_block *nb); |
110 | extern void unregister_memory_isolate_notifier(struct notifier_block *nb); | 110 | extern void unregister_memory_isolate_notifier(struct notifier_block *nb); |
111 | extern int register_new_memory(int, struct mem_section *); | 111 | extern int register_new_memory(int, struct mem_section *); |
112 | extern int memory_block_change_state(struct memory_block *mem, | ||
113 | unsigned long to_state, | ||
114 | unsigned long from_state_req); | ||
115 | #ifdef CONFIG_MEMORY_HOTREMOVE | 112 | #ifdef CONFIG_MEMORY_HOTREMOVE |
116 | extern int unregister_memory_section(struct mem_section *); | 113 | extern int unregister_memory_section(struct mem_section *); |
117 | #endif | 114 | #endif |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index ae8d475a9385..fa76b516fa47 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *, | |||
37 | struct page *, struct page *, enum migrate_mode); | 37 | struct page *, struct page *, enum migrate_mode); |
38 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, | 38 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
39 | unsigned long private, enum migrate_mode mode, int reason); | 39 | unsigned long private, enum migrate_mode mode, int reason); |
40 | extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); | 40 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
41 | extern void putback_movable_page(struct page *page); | 41 | extern void putback_movable_page(struct page *page); |
42 | 42 | ||
43 | extern int migrate_prep(void); | 43 | extern int migrate_prep(void); |
@@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, | |||
56 | free_page_t free, unsigned long private, enum migrate_mode mode, | 56 | free_page_t free, unsigned long private, enum migrate_mode mode, |
57 | int reason) | 57 | int reason) |
58 | { return -ENOSYS; } | 58 | { return -ENOSYS; } |
59 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) | ||
60 | { return -EBUSY; } | ||
59 | 61 | ||
60 | static inline int migrate_prep(void) { return -ENOSYS; } | 62 | static inline int migrate_prep(void) { return -ENOSYS; } |
61 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 63 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 574bc157a27c..0d65dd72c0f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -314,6 +314,9 @@ struct vm_fault { | |||
314 | unsigned long address; /* Faulting virtual address */ | 314 | unsigned long address; /* Faulting virtual address */ |
315 | pmd_t *pmd; /* Pointer to pmd entry matching | 315 | pmd_t *pmd; /* Pointer to pmd entry matching |
316 | * the 'address' */ | 316 | * the 'address' */ |
317 | pud_t *pud; /* Pointer to pud entry matching | ||
318 | * the 'address' | ||
319 | */ | ||
317 | pte_t orig_pte; /* Value of PTE at the time of fault */ | 320 | pte_t orig_pte; /* Value of PTE at the time of fault */ |
318 | 321 | ||
319 | struct page *cow_page; /* Page handler may use for COW fault */ | 322 | struct page *cow_page; /* Page handler may use for COW fault */ |
@@ -341,6 +344,13 @@ struct vm_fault { | |||
341 | */ | 344 | */ |
342 | }; | 345 | }; |
343 | 346 | ||
347 | /* page entry size for vm->huge_fault() */ | ||
348 | enum page_entry_size { | ||
349 | PE_SIZE_PTE = 0, | ||
350 | PE_SIZE_PMD, | ||
351 | PE_SIZE_PUD, | ||
352 | }; | ||
353 | |||
344 | /* | 354 | /* |
345 | * These are the virtual MM functions - opening of an area, closing and | 355 | * These are the virtual MM functions - opening of an area, closing and |
346 | * unmapping it (needed to keep files on disk up-to-date etc), pointer | 356 | * unmapping it (needed to keep files on disk up-to-date etc), pointer |
@@ -350,17 +360,17 @@ struct vm_operations_struct { | |||
350 | void (*open)(struct vm_area_struct * area); | 360 | void (*open)(struct vm_area_struct * area); |
351 | void (*close)(struct vm_area_struct * area); | 361 | void (*close)(struct vm_area_struct * area); |
352 | int (*mremap)(struct vm_area_struct * area); | 362 | int (*mremap)(struct vm_area_struct * area); |
353 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 363 | int (*fault)(struct vm_fault *vmf); |
354 | int (*pmd_fault)(struct vm_fault *vmf); | 364 | int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); |
355 | void (*map_pages)(struct vm_fault *vmf, | 365 | void (*map_pages)(struct vm_fault *vmf, |
356 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 366 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
357 | 367 | ||
358 | /* notification that a previously read-only page is about to become | 368 | /* notification that a previously read-only page is about to become |
359 | * writable, if an error is returned it will cause a SIGBUS */ | 369 | * writable, if an error is returned it will cause a SIGBUS */ |
360 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | 370 | int (*page_mkwrite)(struct vm_fault *vmf); |
361 | 371 | ||
362 | /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ | 372 | /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ |
363 | int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | 373 | int (*pfn_mkwrite)(struct vm_fault *vmf); |
364 | 374 | ||
365 | /* called by access_process_vm when get_user_pages() fails, typically | 375 | /* called by access_process_vm when get_user_pages() fails, typically |
366 | * for use by special VMAs that can switch between memory and hardware | 376 | * for use by special VMAs that can switch between memory and hardware |
@@ -416,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd) | |||
416 | { | 426 | { |
417 | return 0; | 427 | return 0; |
418 | } | 428 | } |
429 | static inline int pud_devmap(pud_t pud) | ||
430 | { | ||
431 | return 0; | ||
432 | } | ||
419 | #endif | 433 | #endif |
420 | 434 | ||
421 | /* | 435 | /* |
@@ -1154,16 +1168,6 @@ extern void pagefault_out_of_memory(void); | |||
1154 | 1168 | ||
1155 | extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); | 1169 | extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); |
1156 | 1170 | ||
1157 | int shmem_zero_setup(struct vm_area_struct *); | ||
1158 | #ifdef CONFIG_SHMEM | ||
1159 | bool shmem_mapping(struct address_space *mapping); | ||
1160 | #else | ||
1161 | static inline bool shmem_mapping(struct address_space *mapping) | ||
1162 | { | ||
1163 | return false; | ||
1164 | } | ||
1165 | #endif | ||
1166 | |||
1167 | extern bool can_do_mlock(void); | 1171 | extern bool can_do_mlock(void); |
1168 | extern int user_shm_lock(size_t, struct user_struct *); | 1172 | extern int user_shm_lock(size_t, struct user_struct *); |
1169 | extern void user_shm_unlock(size_t, struct user_struct *); | 1173 | extern void user_shm_unlock(size_t, struct user_struct *); |
@@ -1191,6 +1195,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | |||
1191 | 1195 | ||
1192 | /** | 1196 | /** |
1193 | * mm_walk - callbacks for walk_page_range | 1197 | * mm_walk - callbacks for walk_page_range |
1198 | * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry | ||
1199 | * this handler should only handle pud_trans_huge() puds. | ||
1200 | * the pmd_entry or pte_entry callbacks will be used for | ||
1201 | * regular PUDs. | ||
1194 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 1202 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
1195 | * this handler is required to be able to handle | 1203 | * this handler is required to be able to handle |
1196 | * pmd_trans_huge() pmds. They may simply choose to | 1204 | * pmd_trans_huge() pmds. They may simply choose to |
@@ -1210,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | |||
1210 | * (see the comment on walk_page_range() for more details) | 1218 | * (see the comment on walk_page_range() for more details) |
1211 | */ | 1219 | */ |
1212 | struct mm_walk { | 1220 | struct mm_walk { |
1221 | int (*pud_entry)(pud_t *pud, unsigned long addr, | ||
1222 | unsigned long next, struct mm_walk *walk); | ||
1213 | int (*pmd_entry)(pmd_t *pmd, unsigned long addr, | 1223 | int (*pmd_entry)(pmd_t *pmd, unsigned long addr, |
1214 | unsigned long next, struct mm_walk *walk); | 1224 | unsigned long next, struct mm_walk *walk); |
1215 | int (*pte_entry)(pte_t *pte, unsigned long addr, | 1225 | int (*pte_entry)(pte_t *pte, unsigned long addr, |
@@ -1793,8 +1803,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) | |||
1793 | return ptl; | 1803 | return ptl; |
1794 | } | 1804 | } |
1795 | 1805 | ||
1796 | extern void __init pagecache_init(void); | 1806 | /* |
1807 | * No scalability reason to split PUD locks yet, but follow the same pattern | ||
1808 | * as the PMD locks to make it easier if we decide to. The VM should not be | ||
1809 | * considered ready to switch to split PUD locks yet; there may be places | ||
1810 | * which need to be converted from page_table_lock. | ||
1811 | */ | ||
1812 | static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) | ||
1813 | { | ||
1814 | return &mm->page_table_lock; | ||
1815 | } | ||
1797 | 1816 | ||
1817 | static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) | ||
1818 | { | ||
1819 | spinlock_t *ptl = pud_lockptr(mm, pud); | ||
1820 | |||
1821 | spin_lock(ptl); | ||
1822 | return ptl; | ||
1823 | } | ||
1824 | |||
1825 | extern void __init pagecache_init(void); | ||
1798 | extern void free_area_init(unsigned long * zones_size); | 1826 | extern void free_area_init(unsigned long * zones_size); |
1799 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 1827 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
1800 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 1828 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
@@ -2003,8 +2031,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, | |||
2003 | unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, | 2031 | unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, |
2004 | struct mempolicy *, struct vm_userfaultfd_ctx); | 2032 | struct mempolicy *, struct vm_userfaultfd_ctx); |
2005 | extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); | 2033 | extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); |
2006 | extern int split_vma(struct mm_struct *, | 2034 | extern int __split_vma(struct mm_struct *, struct vm_area_struct *, |
2007 | struct vm_area_struct *, unsigned long addr, int new_below); | 2035 | unsigned long addr, int new_below); |
2036 | extern int split_vma(struct mm_struct *, struct vm_area_struct *, | ||
2037 | unsigned long addr, int new_below); | ||
2008 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); | 2038 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); |
2009 | extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, | 2039 | extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, |
2010 | struct rb_node **, struct rb_node *); | 2040 | struct rb_node **, struct rb_node *); |
@@ -2052,18 +2082,22 @@ extern int install_special_mapping(struct mm_struct *mm, | |||
2052 | extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); | 2082 | extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); |
2053 | 2083 | ||
2054 | extern unsigned long mmap_region(struct file *file, unsigned long addr, | 2084 | extern unsigned long mmap_region(struct file *file, unsigned long addr, |
2055 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); | 2085 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, |
2086 | struct list_head *uf); | ||
2056 | extern unsigned long do_mmap(struct file *file, unsigned long addr, | 2087 | extern unsigned long do_mmap(struct file *file, unsigned long addr, |
2057 | unsigned long len, unsigned long prot, unsigned long flags, | 2088 | unsigned long len, unsigned long prot, unsigned long flags, |
2058 | vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); | 2089 | vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, |
2059 | extern int do_munmap(struct mm_struct *, unsigned long, size_t); | 2090 | struct list_head *uf); |
2091 | extern int do_munmap(struct mm_struct *, unsigned long, size_t, | ||
2092 | struct list_head *uf); | ||
2060 | 2093 | ||
2061 | static inline unsigned long | 2094 | static inline unsigned long |
2062 | do_mmap_pgoff(struct file *file, unsigned long addr, | 2095 | do_mmap_pgoff(struct file *file, unsigned long addr, |
2063 | unsigned long len, unsigned long prot, unsigned long flags, | 2096 | unsigned long len, unsigned long prot, unsigned long flags, |
2064 | unsigned long pgoff, unsigned long *populate) | 2097 | unsigned long pgoff, unsigned long *populate, |
2098 | struct list_head *uf) | ||
2065 | { | 2099 | { |
2066 | return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); | 2100 | return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); |
2067 | } | 2101 | } |
2068 | 2102 | ||
2069 | #ifdef CONFIG_MMU | 2103 | #ifdef CONFIG_MMU |
@@ -2124,10 +2158,10 @@ extern void truncate_inode_pages_range(struct address_space *, | |||
2124 | extern void truncate_inode_pages_final(struct address_space *); | 2158 | extern void truncate_inode_pages_final(struct address_space *); |
2125 | 2159 | ||
2126 | /* generic vm_area_ops exported for stackable file systems */ | 2160 | /* generic vm_area_ops exported for stackable file systems */ |
2127 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 2161 | extern int filemap_fault(struct vm_fault *vmf); |
2128 | extern void filemap_map_pages(struct vm_fault *vmf, | 2162 | extern void filemap_map_pages(struct vm_fault *vmf, |
2129 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 2163 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
2130 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2164 | extern int filemap_page_mkwrite(struct vm_fault *vmf); |
2131 | 2165 | ||
2132 | /* mm/page-writeback.c */ | 2166 | /* mm/page-writeback.c */ |
2133 | int write_one_page(struct page *page, int wait); | 2167 | int write_one_page(struct page *page, int wait); |
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 41d376e7116d..e030a68ead7e 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page, | |||
50 | list_add(&page->lru, &lruvec->lists[lru]); | 50 | list_add(&page->lru, &lruvec->lists[lru]); |
51 | } | 51 | } |
52 | 52 | ||
53 | static __always_inline void add_page_to_lru_list_tail(struct page *page, | ||
54 | struct lruvec *lruvec, enum lru_list lru) | ||
55 | { | ||
56 | update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); | ||
57 | list_add_tail(&page->lru, &lruvec->lists[lru]); | ||
58 | } | ||
59 | |||
53 | static __always_inline void del_page_from_lru_list(struct page *page, | 60 | static __always_inline void del_page_from_lru_list(struct page *page, |
54 | struct lruvec *lruvec, enum lru_list lru) | 61 | struct lruvec *lruvec, enum lru_list lru) |
55 | { | 62 | { |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index a1a210d59961..51891fb0d3ce 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
381 | ___pmd; \ | 381 | ___pmd; \ |
382 | }) | 382 | }) |
383 | 383 | ||
384 | #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ | ||
385 | ({ \ | ||
386 | unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ | ||
387 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
388 | pud_t ___pud; \ | ||
389 | \ | ||
390 | ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ | ||
391 | mmu_notifier_invalidate_range(___mm, ___haddr, \ | ||
392 | ___haddr + HPAGE_PUD_SIZE); \ | ||
393 | \ | ||
394 | ___pud; \ | ||
395 | }) | ||
396 | |||
384 | #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ | 397 | #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ |
385 | ({ \ | 398 | ({ \ |
386 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | 399 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ |
@@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
475 | #define pmdp_clear_young_notify pmdp_test_and_clear_young | 488 | #define pmdp_clear_young_notify pmdp_test_and_clear_young |
476 | #define ptep_clear_flush_notify ptep_clear_flush | 489 | #define ptep_clear_flush_notify ptep_clear_flush |
477 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush | 490 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush |
491 | #define pudp_huge_clear_flush_notify pudp_huge_clear_flush | ||
478 | #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear | 492 | #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear |
479 | #define set_pte_at_notify set_pte_at | 493 | #define set_pte_at_notify set_pte_at |
480 | 494 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 82fc632fd11d..8e02b3750fe0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -236,8 +236,6 @@ struct lruvec { | |||
236 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | 236 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
237 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | 237 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
238 | 238 | ||
239 | /* Isolate clean file */ | ||
240 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) | ||
241 | /* Isolate unmapped file */ | 239 | /* Isolate unmapped file */ |
242 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) | 240 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
243 | /* Isolate for asynchronous migration */ | 241 | /* Isolate for asynchronous migration */ |
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 033fc7bbcefa..a49b3259cad7 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h | |||
@@ -90,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) | |||
90 | { | 90 | { |
91 | return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); | 91 | return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); |
92 | } | 92 | } |
93 | |||
94 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
95 | static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) | ||
96 | { | ||
97 | return pfn_pud(pfn_t_to_pfn(pfn), pgprot); | ||
98 | } | ||
99 | #endif | ||
93 | #endif | 100 | #endif |
94 | 101 | ||
95 | #ifdef __HAVE_ARCH_PTE_DEVMAP | 102 | #ifdef __HAVE_ARCH_PTE_DEVMAP |
@@ -106,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn) | |||
106 | } | 113 | } |
107 | pte_t pte_mkdevmap(pte_t pte); | 114 | pte_t pte_mkdevmap(pte_t pte); |
108 | pmd_t pmd_mkdevmap(pmd_t pmd); | 115 | pmd_t pmd_mkdevmap(pmd_t pmd); |
116 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ | ||
117 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) | ||
118 | pud_t pud_mkdevmap(pud_t pud); | ||
109 | #endif | 119 | #endif |
120 | #endif /* __HAVE_ARCH_PTE_DEVMAP */ | ||
121 | |||
110 | #endif /* _LINUX_PFN_T_H_ */ | 122 | #endif /* _LINUX_PFN_T_H_ */ |
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 34cce96741bc..c2a989dee876 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h | |||
@@ -21,6 +21,12 @@ struct pidmap { | |||
21 | 21 | ||
22 | struct fs_pin; | 22 | struct fs_pin; |
23 | 23 | ||
24 | enum { /* definitions for pid_namespace's hide_pid field */ | ||
25 | HIDEPID_OFF = 0, | ||
26 | HIDEPID_NO_ACCESS = 1, | ||
27 | HIDEPID_INVISIBLE = 2, | ||
28 | }; | ||
29 | |||
24 | struct pid_namespace { | 30 | struct pid_namespace { |
25 | struct kref kref; | 31 | struct kref kref; |
26 | struct pidmap pidmap[PIDMAP_ENTRIES]; | 32 | struct pidmap pidmap[PIDMAP_ENTRIES]; |
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d076183e49be..9702b6e183bc 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h | |||
@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ | |||
90 | old->rbaugmented = rbcompute(old); \ | 90 | old->rbaugmented = rbcompute(old); \ |
91 | } \ | 91 | } \ |
92 | rbstatic const struct rb_augment_callbacks rbname = { \ | 92 | rbstatic const struct rb_augment_callbacks rbname = { \ |
93 | rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ | 93 | .propagate = rbname ## _propagate, \ |
94 | .copy = rbname ## _copy, \ | ||
95 | .rotate = rbname ## _rotate \ | ||
94 | }; | 96 | }; |
95 | 97 | ||
96 | 98 | ||
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 15321fb1df6b..8c89e902df3e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/rwsem.h> | 10 | #include <linux/rwsem.h> |
11 | #include <linux/memcontrol.h> | 11 | #include <linux/memcontrol.h> |
12 | #include <linux/highmem.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * The anon_vma heads a list of private "related" vmas, to scan if | 15 | * The anon_vma heads a list of private "related" vmas, to scan if |
@@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked, | |||
196 | 197 | ||
197 | int try_to_unmap(struct page *, enum ttu_flags flags); | 198 | int try_to_unmap(struct page *, enum ttu_flags flags); |
198 | 199 | ||
199 | /* | 200 | /* Avoid racy checks */ |
200 | * Used by uprobes to replace a userspace page safely | 201 | #define PVMW_SYNC (1 << 0) |
201 | */ | 202 | /* Look for migarion entries rather than present PTEs */ |
202 | pte_t *__page_check_address(struct page *, struct mm_struct *, | 203 | #define PVMW_MIGRATION (1 << 1) |
203 | unsigned long, spinlock_t **, int); | ||
204 | |||
205 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, | ||
206 | unsigned long address, | ||
207 | spinlock_t **ptlp, int sync) | ||
208 | { | ||
209 | pte_t *ptep; | ||
210 | 204 | ||
211 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | 205 | struct page_vma_mapped_walk { |
212 | ptlp, sync)); | 206 | struct page *page; |
213 | return ptep; | 207 | struct vm_area_struct *vma; |
214 | } | 208 | unsigned long address; |
209 | pmd_t *pmd; | ||
210 | pte_t *pte; | ||
211 | spinlock_t *ptl; | ||
212 | unsigned int flags; | ||
213 | }; | ||
215 | 214 | ||
216 | /* | 215 | static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) |
217 | * Used by idle page tracking to check if a page was referenced via page | ||
218 | * tables. | ||
219 | */ | ||
220 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
221 | bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, | ||
222 | unsigned long address, pmd_t **pmdp, | ||
223 | pte_t **ptep, spinlock_t **ptlp); | ||
224 | #else | ||
225 | static inline bool page_check_address_transhuge(struct page *page, | ||
226 | struct mm_struct *mm, unsigned long address, | ||
227 | pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) | ||
228 | { | 216 | { |
229 | *ptep = page_check_address(page, mm, address, ptlp, 0); | 217 | if (pvmw->pte) |
230 | *pmdp = NULL; | 218 | pte_unmap(pvmw->pte); |
231 | return !!*ptep; | 219 | if (pvmw->ptl) |
220 | spin_unlock(pvmw->ptl); | ||
232 | } | 221 | } |
233 | #endif | 222 | |
223 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); | ||
234 | 224 | ||
235 | /* | 225 | /* |
236 | * Used by swapoff to help locate where page is expected in vma. | 226 | * Used by swapoff to help locate where page is expected in vma. |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index fdaac9d4d46d..a7d6bd2a918f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *); | |||
57 | extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, | 57 | extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, |
58 | unsigned long len, unsigned long pgoff, unsigned long flags); | 58 | unsigned long len, unsigned long pgoff, unsigned long flags); |
59 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); | 59 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); |
60 | #ifdef CONFIG_SHMEM | ||
60 | extern bool shmem_mapping(struct address_space *mapping); | 61 | extern bool shmem_mapping(struct address_space *mapping); |
62 | #else | ||
63 | static inline bool shmem_mapping(struct address_space *mapping) | ||
64 | { | ||
65 | return false; | ||
66 | } | ||
67 | #endif /* CONFIG_SHMEM */ | ||
61 | extern void shmem_unlock_mapping(struct address_space *mapping); | 68 | extern void shmem_unlock_mapping(struct address_space *mapping); |
62 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | 69 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
63 | pgoff_t index, gfp_t gfp_mask); | 70 | pgoff_t index, gfp_t gfp_mask); |
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index f431861f22f1..0468548acebf 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h | |||
@@ -61,10 +61,18 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, | |||
61 | unsigned long from, unsigned long to, | 61 | unsigned long from, unsigned long to, |
62 | unsigned long len); | 62 | unsigned long len); |
63 | 63 | ||
64 | extern void madvise_userfault_dontneed(struct vm_area_struct *vma, | 64 | extern void userfaultfd_remove(struct vm_area_struct *vma, |
65 | struct vm_area_struct **prev, | 65 | struct vm_area_struct **prev, |
66 | unsigned long start, | 66 | unsigned long start, |
67 | unsigned long end); | 67 | unsigned long end); |
68 | |||
69 | extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, | ||
70 | unsigned long start, unsigned long end, | ||
71 | struct list_head *uf); | ||
72 | extern void userfaultfd_unmap_complete(struct mm_struct *mm, | ||
73 | struct list_head *uf); | ||
74 | |||
75 | extern void userfaultfd_exit(struct mm_struct *mm); | ||
68 | 76 | ||
69 | #else /* CONFIG_USERFAULTFD */ | 77 | #else /* CONFIG_USERFAULTFD */ |
70 | 78 | ||
@@ -112,12 +120,29 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, | |||
112 | { | 120 | { |
113 | } | 121 | } |
114 | 122 | ||
115 | static inline void madvise_userfault_dontneed(struct vm_area_struct *vma, | 123 | static inline void userfaultfd_remove(struct vm_area_struct *vma, |
116 | struct vm_area_struct **prev, | 124 | struct vm_area_struct **prev, |
117 | unsigned long start, | 125 | unsigned long start, |
118 | unsigned long end) | 126 | unsigned long end) |
119 | { | 127 | { |
120 | } | 128 | } |
129 | |||
130 | static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, | ||
131 | unsigned long start, unsigned long end, | ||
132 | struct list_head *uf) | ||
133 | { | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static inline void userfaultfd_unmap_complete(struct mm_struct *mm, | ||
138 | struct list_head *uf) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | static inline void userfaultfd_exit(struct mm_struct *mm) | ||
143 | { | ||
144 | } | ||
145 | |||
121 | #endif /* CONFIG_USERFAULTFD */ | 146 | #endif /* CONFIG_USERFAULTFD */ |
122 | 147 | ||
123 | #endif /* _LINUX_USERFAULTFD_K_H */ | 148 | #endif /* _LINUX_USERFAULTFD_K_H */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5527d910ba3d..a3c0cbd7c888 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -46,7 +46,7 @@ enum writeback_sync_modes { | |||
46 | */ | 46 | */ |
47 | enum wb_reason { | 47 | enum wb_reason { |
48 | WB_REASON_BACKGROUND, | 48 | WB_REASON_BACKGROUND, |
49 | WB_REASON_TRY_TO_FREE_PAGES, | 49 | WB_REASON_VMSCAN, |
50 | WB_REASON_SYNC, | 50 | WB_REASON_SYNC, |
51 | WB_REASON_PERIODIC, | 51 | WB_REASON_PERIODIC, |
52 | WB_REASON_LAPTOP_TIMER, | 52 | WB_REASON_LAPTOP_TIMER, |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 2ccd9ccbf9ef..7bd8783a590f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #define WB_WORK_REASON \ | 32 | #define WB_WORK_REASON \ |
33 | EM( WB_REASON_BACKGROUND, "background") \ | 33 | EM( WB_REASON_BACKGROUND, "background") \ |
34 | EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ | 34 | EM( WB_REASON_VMSCAN, "vmscan") \ |
35 | EM( WB_REASON_SYNC, "sync") \ | 35 | EM( WB_REASON_SYNC, "sync") \ |
36 | EM( WB_REASON_PERIODIC, "periodic") \ | 36 | EM( WB_REASON_PERIODIC, "periodic") \ |
37 | EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ | 37 | EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ |
diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h index d0a2b8e89813..bbd5116ea739 100644 --- a/include/uapi/linux/mqueue.h +++ b/include/uapi/linux/mqueue.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #ifndef _LINUX_MQUEUE_H | 18 | #ifndef _LINUX_MQUEUE_H |
19 | #define _LINUX_MQUEUE_H | 19 | #define _LINUX_MQUEUE_H |
20 | 20 | ||
21 | #include <linux/types.h> | ||
22 | |||
21 | #define MQ_PRIO_MAX 32768 | 23 | #define MQ_PRIO_MAX 32768 |
22 | /* per-uid limit of kernel memory used by mqueue, in bytes */ | 24 | /* per-uid limit of kernel memory used by mqueue, in bytes */ |
23 | #define MQ_BYTES_MAX 819200 | 25 | #define MQ_BYTES_MAX 819200 |
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 9ac4b68c54d1..c055947c5c98 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h | |||
@@ -18,9 +18,11 @@ | |||
18 | * means the userland is reading). | 18 | * means the userland is reading). |
19 | */ | 19 | */ |
20 | #define UFFD_API ((__u64)0xAA) | 20 | #define UFFD_API ((__u64)0xAA) |
21 | #define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ | 21 | #define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ |
22 | UFFD_FEATURE_EVENT_FORK | \ | ||
22 | UFFD_FEATURE_EVENT_REMAP | \ | 23 | UFFD_FEATURE_EVENT_REMAP | \ |
23 | UFFD_FEATURE_EVENT_MADVDONTNEED | \ | 24 | UFFD_FEATURE_EVENT_REMOVE | \ |
25 | UFFD_FEATURE_EVENT_UNMAP | \ | ||
24 | UFFD_FEATURE_MISSING_HUGETLBFS | \ | 26 | UFFD_FEATURE_MISSING_HUGETLBFS | \ |
25 | UFFD_FEATURE_MISSING_SHMEM) | 27 | UFFD_FEATURE_MISSING_SHMEM) |
26 | #define UFFD_API_IOCTLS \ | 28 | #define UFFD_API_IOCTLS \ |
@@ -92,7 +94,7 @@ struct uffd_msg { | |||
92 | struct { | 94 | struct { |
93 | __u64 start; | 95 | __u64 start; |
94 | __u64 end; | 96 | __u64 end; |
95 | } madv_dn; | 97 | } remove; |
96 | 98 | ||
97 | struct { | 99 | struct { |
98 | /* unused reserved fields */ | 100 | /* unused reserved fields */ |
@@ -109,7 +111,9 @@ struct uffd_msg { | |||
109 | #define UFFD_EVENT_PAGEFAULT 0x12 | 111 | #define UFFD_EVENT_PAGEFAULT 0x12 |
110 | #define UFFD_EVENT_FORK 0x13 | 112 | #define UFFD_EVENT_FORK 0x13 |
111 | #define UFFD_EVENT_REMAP 0x14 | 113 | #define UFFD_EVENT_REMAP 0x14 |
112 | #define UFFD_EVENT_MADVDONTNEED 0x15 | 114 | #define UFFD_EVENT_REMOVE 0x15 |
115 | #define UFFD_EVENT_UNMAP 0x16 | ||
116 | #define UFFD_EVENT_EXIT 0x17 | ||
113 | 117 | ||
114 | /* flags for UFFD_EVENT_PAGEFAULT */ | 118 | /* flags for UFFD_EVENT_PAGEFAULT */ |
115 | #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ | 119 | #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ |
@@ -155,9 +159,11 @@ struct uffdio_api { | |||
155 | #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) | 159 | #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) |
156 | #define UFFD_FEATURE_EVENT_FORK (1<<1) | 160 | #define UFFD_FEATURE_EVENT_FORK (1<<1) |
157 | #define UFFD_FEATURE_EVENT_REMAP (1<<2) | 161 | #define UFFD_FEATURE_EVENT_REMAP (1<<2) |
158 | #define UFFD_FEATURE_EVENT_MADVDONTNEED (1<<3) | 162 | #define UFFD_FEATURE_EVENT_REMOVE (1<<3) |
159 | #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) | 163 | #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) |
160 | #define UFFD_FEATURE_MISSING_SHMEM (1<<5) | 164 | #define UFFD_FEATURE_MISSING_SHMEM (1<<5) |
165 | #define UFFD_FEATURE_EVENT_UNMAP (1<<6) | ||
166 | #define UFFD_FEATURE_EVENT_EXIT (1<<7) | ||
161 | __u64 features; | 167 | __u64 features; |
162 | 168 | ||
163 | __u64 ioctls; | 169 | __u64 ioctls; |
@@ -374,12 +374,12 @@ void exit_shm(struct task_struct *task) | |||
374 | up_write(&shm_ids(ns).rwsem); | 374 | up_write(&shm_ids(ns).rwsem); |
375 | } | 375 | } |
376 | 376 | ||
377 | static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 377 | static int shm_fault(struct vm_fault *vmf) |
378 | { | 378 | { |
379 | struct file *file = vma->vm_file; | 379 | struct file *file = vmf->vma->vm_file; |
380 | struct shm_file_data *sfd = shm_file_data(file); | 380 | struct shm_file_data *sfd = shm_file_data(file); |
381 | 381 | ||
382 | return sfd->vm_ops->fault(vma, vmf); | 382 | return sfd->vm_ops->fault(vmf); |
383 | } | 383 | } |
384 | 384 | ||
385 | #ifdef CONFIG_NUMA | 385 | #ifdef CONFIG_NUMA |
@@ -1222,7 +1222,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, | |||
1222 | goto invalid; | 1222 | goto invalid; |
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); | 1225 | addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL); |
1226 | *raddr = addr; | 1226 | *raddr = addr; |
1227 | err = 0; | 1227 | err = 0; |
1228 | if (IS_ERR_VALUE(addr)) | 1228 | if (IS_ERR_VALUE(addr)) |
@@ -1329,7 +1329,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) | |||
1329 | */ | 1329 | */ |
1330 | file = vma->vm_file; | 1330 | file = vma->vm_file; |
1331 | size = i_size_read(file_inode(vma->vm_file)); | 1331 | size = i_size_read(file_inode(vma->vm_file)); |
1332 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | 1332 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
1333 | /* | 1333 | /* |
1334 | * We discovered the size of the shm segment, so | 1334 | * We discovered the size of the shm segment, so |
1335 | * break out of here and fall through to the next | 1335 | * break out of here and fall through to the next |
@@ -1356,7 +1356,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) | |||
1356 | if ((vma->vm_ops == &shm_vm_ops) && | 1356 | if ((vma->vm_ops == &shm_vm_ops) && |
1357 | ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && | 1357 | ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && |
1358 | (vma->vm_file == file)) | 1358 | (vma->vm_file == file)) |
1359 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | 1359 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
1360 | vma = next; | 1360 | vma = next; |
1361 | } | 1361 | } |
1362 | 1362 | ||
@@ -1365,7 +1365,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) | |||
1365 | * given | 1365 | * given |
1366 | */ | 1366 | */ |
1367 | if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { | 1367 | if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { |
1368 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | 1368 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
1369 | retval = 0; | 1369 | retval = 0; |
1370 | } | 1370 | } |
1371 | 1371 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 77a932b54a64..b2eb3542e829 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4925,9 +4925,9 @@ unlock: | |||
4925 | rcu_read_unlock(); | 4925 | rcu_read_unlock(); |
4926 | } | 4926 | } |
4927 | 4927 | ||
4928 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 4928 | static int perf_mmap_fault(struct vm_fault *vmf) |
4929 | { | 4929 | { |
4930 | struct perf_event *event = vma->vm_file->private_data; | 4930 | struct perf_event *event = vmf->vma->vm_file->private_data; |
4931 | struct ring_buffer *rb; | 4931 | struct ring_buffer *rb; |
4932 | int ret = VM_FAULT_SIGBUS; | 4932 | int ret = VM_FAULT_SIGBUS; |
4933 | 4933 | ||
@@ -4950,7 +4950,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
4950 | goto unlock; | 4950 | goto unlock; |
4951 | 4951 | ||
4952 | get_page(vmf->page); | 4952 | get_page(vmf->page); |
4953 | vmf->page->mapping = vma->vm_file->f_mapping; | 4953 | vmf->page->mapping = vmf->vma->vm_file->f_mapping; |
4954 | vmf->page->index = vmf->pgoff; | 4954 | vmf->page->index = vmf->pgoff; |
4955 | 4955 | ||
4956 | ret = 0; | 4956 | ret = 0; |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d416f3baf392..18c6b23edd3c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -153,14 +153,19 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
153 | struct page *old_page, struct page *new_page) | 153 | struct page *old_page, struct page *new_page) |
154 | { | 154 | { |
155 | struct mm_struct *mm = vma->vm_mm; | 155 | struct mm_struct *mm = vma->vm_mm; |
156 | spinlock_t *ptl; | 156 | struct page_vma_mapped_walk pvmw = { |
157 | pte_t *ptep; | 157 | .page = old_page, |
158 | .vma = vma, | ||
159 | .address = addr, | ||
160 | }; | ||
158 | int err; | 161 | int err; |
159 | /* For mmu_notifiers */ | 162 | /* For mmu_notifiers */ |
160 | const unsigned long mmun_start = addr; | 163 | const unsigned long mmun_start = addr; |
161 | const unsigned long mmun_end = addr + PAGE_SIZE; | 164 | const unsigned long mmun_end = addr + PAGE_SIZE; |
162 | struct mem_cgroup *memcg; | 165 | struct mem_cgroup *memcg; |
163 | 166 | ||
167 | VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); | ||
168 | |||
164 | err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, | 169 | err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, |
165 | false); | 170 | false); |
166 | if (err) | 171 | if (err) |
@@ -171,11 +176,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
171 | 176 | ||
172 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 177 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
173 | err = -EAGAIN; | 178 | err = -EAGAIN; |
174 | ptep = page_check_address(old_page, mm, addr, &ptl, 0); | 179 | if (!page_vma_mapped_walk(&pvmw)) { |
175 | if (!ptep) { | ||
176 | mem_cgroup_cancel_charge(new_page, memcg, false); | 180 | mem_cgroup_cancel_charge(new_page, memcg, false); |
177 | goto unlock; | 181 | goto unlock; |
178 | } | 182 | } |
183 | VM_BUG_ON_PAGE(addr != pvmw.address, old_page); | ||
179 | 184 | ||
180 | get_page(new_page); | 185 | get_page(new_page); |
181 | page_add_new_anon_rmap(new_page, vma, addr, false); | 186 | page_add_new_anon_rmap(new_page, vma, addr, false); |
@@ -187,14 +192,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
187 | inc_mm_counter(mm, MM_ANONPAGES); | 192 | inc_mm_counter(mm, MM_ANONPAGES); |
188 | } | 193 | } |
189 | 194 | ||
190 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | 195 | flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); |
191 | ptep_clear_flush_notify(vma, addr, ptep); | 196 | ptep_clear_flush_notify(vma, addr, pvmw.pte); |
192 | set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot)); | 197 | set_pte_at_notify(mm, addr, pvmw.pte, |
198 | mk_pte(new_page, vma->vm_page_prot)); | ||
193 | 199 | ||
194 | page_remove_rmap(old_page, false); | 200 | page_remove_rmap(old_page, false); |
195 | if (!page_mapped(old_page)) | 201 | if (!page_mapped(old_page)) |
196 | try_to_free_swap(old_page); | 202 | try_to_free_swap(old_page); |
197 | pte_unmap_unlock(ptep, ptl); | 203 | page_vma_mapped_walk_done(&pvmw); |
198 | 204 | ||
199 | if (vma->vm_flags & VM_LOCKED) | 205 | if (vma->vm_flags & VM_LOCKED) |
200 | munlock_vma_page(old_page); | 206 | munlock_vma_page(old_page); |
@@ -300,8 +306,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, | |||
300 | 306 | ||
301 | retry: | 307 | retry: |
302 | /* Read the page with vaddr into memory */ | 308 | /* Read the page with vaddr into memory */ |
303 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, | 309 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, |
304 | &vma, NULL); | 310 | FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); |
305 | if (ret <= 0) | 311 | if (ret <= 0) |
306 | return ret; | 312 | return ret; |
307 | 313 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 9960accbf2ab..90b09ca35c84 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/task_io_accounting_ops.h> | 45 | #include <linux/task_io_accounting_ops.h> |
46 | #include <linux/tracehook.h> | 46 | #include <linux/tracehook.h> |
47 | #include <linux/fs_struct.h> | 47 | #include <linux/fs_struct.h> |
48 | #include <linux/userfaultfd_k.h> | ||
48 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
49 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
50 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
@@ -547,6 +548,7 @@ static void exit_mm(void) | |||
547 | enter_lazy_tlb(mm, current); | 548 | enter_lazy_tlb(mm, current); |
548 | task_unlock(current); | 549 | task_unlock(current); |
549 | mm_update_next_owner(mm); | 550 | mm_update_next_owner(mm); |
551 | userfaultfd_exit(mm); | ||
550 | mmput(mm); | 552 | mmput(mm); |
551 | if (test_thread_flag(TIF_MEMDIE)) | 553 | if (test_thread_flag(TIF_MEMDIE)) |
552 | exit_oom_victim(); | 554 | exit_oom_victim(); |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index ee1bc1bb8feb..0999679d6f26 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
@@ -195,7 +195,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj, | |||
195 | return count; | 195 | return count; |
196 | } | 196 | } |
197 | 197 | ||
198 | static struct bin_attribute notes_attr = { | 198 | static struct bin_attribute notes_attr __ro_after_init = { |
199 | .attr = { | 199 | .attr = { |
200 | .name = "notes", | 200 | .name = "notes", |
201 | .mode = S_IRUGO, | 201 | .mode = S_IRUGO, |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 9ecedc28b928..06123234f118 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
@@ -246,9 +246,13 @@ static void devm_memremap_pages_release(struct device *dev, void *data) | |||
246 | /* pages are dead and unused, undo the arch mapping */ | 246 | /* pages are dead and unused, undo the arch mapping */ |
247 | align_start = res->start & ~(SECTION_SIZE - 1); | 247 | align_start = res->start & ~(SECTION_SIZE - 1); |
248 | align_size = ALIGN(resource_size(res), SECTION_SIZE); | 248 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
249 | |||
250 | lock_device_hotplug(); | ||
249 | mem_hotplug_begin(); | 251 | mem_hotplug_begin(); |
250 | arch_remove_memory(align_start, align_size); | 252 | arch_remove_memory(align_start, align_size); |
251 | mem_hotplug_done(); | 253 | mem_hotplug_done(); |
254 | unlock_device_hotplug(); | ||
255 | |||
252 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | 256 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
253 | pgmap_radix_release(res); | 257 | pgmap_radix_release(res); |
254 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, | 258 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, |
@@ -360,9 +364,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
360 | if (error) | 364 | if (error) |
361 | goto err_pfn_remap; | 365 | goto err_pfn_remap; |
362 | 366 | ||
367 | lock_device_hotplug(); | ||
363 | mem_hotplug_begin(); | 368 | mem_hotplug_begin(); |
364 | error = arch_add_memory(nid, align_start, align_size, true); | 369 | error = arch_add_memory(nid, align_start, align_size, true); |
365 | mem_hotplug_done(); | 370 | mem_hotplug_done(); |
371 | unlock_device_hotplug(); | ||
366 | if (error) | 372 | if (error) |
367 | goto err_add_memory; | 373 | goto err_add_memory; |
368 | 374 | ||
diff --git a/kernel/notifier.c b/kernel/notifier.c index fd2c9acbcc19..6196af8a8223 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
@@ -95,7 +95,7 @@ static int notifier_call_chain(struct notifier_block **nl, | |||
95 | if (nr_calls) | 95 | if (nr_calls) |
96 | (*nr_calls)++; | 96 | (*nr_calls)++; |
97 | 97 | ||
98 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) | 98 | if (ret & NOTIFY_STOP_MASK) |
99 | break; | 99 | break; |
100 | nb = next_nb; | 100 | nb = next_nb; |
101 | nr_to_call--; | 101 | nr_to_call--; |
diff --git a/kernel/relay.c b/kernel/relay.c index 8f18d314a96a..8f8dc91db680 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -39,10 +39,10 @@ static void relay_file_mmap_close(struct vm_area_struct *vma) | |||
39 | /* | 39 | /* |
40 | * fault() vm_op implementation for relay file mapping. | 40 | * fault() vm_op implementation for relay file mapping. |
41 | */ | 41 | */ |
42 | static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 42 | static int relay_buf_fault(struct vm_fault *vmf) |
43 | { | 43 | { |
44 | struct page *page; | 44 | struct page *page; |
45 | struct rchan_buf *buf = vma->vm_private_data; | 45 | struct rchan_buf *buf = vmf->vma->vm_private_data; |
46 | pgoff_t pgoff = vmf->pgoff; | 46 | pgoff_t pgoff = vmf->pgoff; |
47 | 47 | ||
48 | if (!buf) | 48 | if (!buf) |
diff --git a/lib/Kconfig b/lib/Kconfig index 87ecd41031bd..fe7e8e175db8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -103,8 +103,7 @@ config CRC32 | |||
103 | functions require M here. | 103 | functions require M here. |
104 | 104 | ||
105 | config CRC32_SELFTEST | 105 | config CRC32_SELFTEST |
106 | bool "CRC32 perform self test on init" | 106 | tristate "CRC32 perform self test on init" |
107 | default n | ||
108 | depends on CRC32 | 107 | depends on CRC32 |
109 | help | 108 | help |
110 | This option enables the CRC32 library functions to perform a | 109 | This option enables the CRC32 library functions to perform a |
@@ -432,8 +431,7 @@ config GLOB | |||
432 | depends on this. | 431 | depends on this. |
433 | 432 | ||
434 | config GLOB_SELFTEST | 433 | config GLOB_SELFTEST |
435 | bool "glob self-test on init" | 434 | tristate "glob self-test on init" |
436 | default n | ||
437 | depends on GLOB | 435 | depends on GLOB |
438 | help | 436 | help |
439 | This option enables a simple self-test of the glob_match | 437 | This option enables a simple self-test of the glob_match |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 66fb4389f05c..55735c9bdb75 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1739,6 +1739,14 @@ config TEST_LIST_SORT | |||
1739 | 1739 | ||
1740 | If unsure, say N. | 1740 | If unsure, say N. |
1741 | 1741 | ||
1742 | config TEST_SORT | ||
1743 | bool "Array-based sort test" | ||
1744 | depends on DEBUG_KERNEL | ||
1745 | help | ||
1746 | This option enables the self-test function of 'sort()' at boot. | ||
1747 | |||
1748 | If unsure, say N. | ||
1749 | |||
1742 | config KPROBES_SANITY_TEST | 1750 | config KPROBES_SANITY_TEST |
1743 | bool "Kprobes sanity tests" | 1751 | bool "Kprobes sanity tests" |
1744 | depends on DEBUG_KERNEL | 1752 | depends on DEBUG_KERNEL |
@@ -1790,9 +1798,10 @@ config PERCPU_TEST | |||
1790 | If unsure, say N. | 1798 | If unsure, say N. |
1791 | 1799 | ||
1792 | config ATOMIC64_SELFTEST | 1800 | config ATOMIC64_SELFTEST |
1793 | bool "Perform an atomic64_t self-test at boot" | 1801 | tristate "Perform an atomic64_t self-test" |
1794 | help | 1802 | help |
1795 | Enable this option to test the atomic64_t functions at boot. | 1803 | Enable this option to test the atomic64_t functions at boot or |
1804 | at module load time. | ||
1796 | 1805 | ||
1797 | If unsure, say N. | 1806 | If unsure, say N. |
1798 | 1807 | ||
diff --git a/lib/Makefile b/lib/Makefile index f1a0364af377..445a39c21f46 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_KASAN) += test_kasan.o | |||
50 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 50 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
51 | obj-$(CONFIG_TEST_LKM) += test_module.o | 51 | obj-$(CONFIG_TEST_LKM) += test_module.o |
52 | obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o | 52 | obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o |
53 | obj-$(CONFIG_TEST_SORT) += test_sort.o | ||
53 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | 54 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o |
54 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o | 55 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o |
55 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o | 56 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o |
@@ -92,6 +93,7 @@ obj-$(CONFIG_CRC16) += crc16.o | |||
92 | obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o | 93 | obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o |
93 | obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o | 94 | obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o |
94 | obj-$(CONFIG_CRC32) += crc32.o | 95 | obj-$(CONFIG_CRC32) += crc32.o |
96 | obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o | ||
95 | obj-$(CONFIG_CRC7) += crc7.o | 97 | obj-$(CONFIG_CRC7) += crc7.o |
96 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o | 98 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o |
97 | obj-$(CONFIG_CRC8) += crc8.o | 99 | obj-$(CONFIG_CRC8) += crc8.o |
@@ -161,6 +163,7 @@ obj-$(CONFIG_CORDIC) += cordic.o | |||
161 | obj-$(CONFIG_DQL) += dynamic_queue_limits.o | 163 | obj-$(CONFIG_DQL) += dynamic_queue_limits.o |
162 | 164 | ||
163 | obj-$(CONFIG_GLOB) += glob.o | 165 | obj-$(CONFIG_GLOB) += glob.o |
166 | obj-$(CONFIG_GLOB_SELFTEST) += globtest.o | ||
164 | 167 | ||
165 | obj-$(CONFIG_MPILIB) += mpi/ | 168 | obj-$(CONFIG_MPILIB) += mpi/ |
166 | obj-$(CONFIG_SIGNATURE) += digsig.o | 169 | obj-$(CONFIG_SIGNATURE) += digsig.o |
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 46042901130f..fd70c0e0e673 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
18 | #include <linux/module.h> | ||
18 | 19 | ||
19 | #ifdef CONFIG_X86 | 20 | #ifdef CONFIG_X86 |
20 | #include <asm/cpufeature.h> /* for boot_cpu_has below */ | 21 | #include <asm/cpufeature.h> /* for boot_cpu_has below */ |
@@ -241,7 +242,7 @@ static __init void test_atomic64(void) | |||
241 | BUG_ON(v.counter != r); | 242 | BUG_ON(v.counter != r); |
242 | } | 243 | } |
243 | 244 | ||
244 | static __init int test_atomics(void) | 245 | static __init int test_atomics_init(void) |
245 | { | 246 | { |
246 | test_atomic(); | 247 | test_atomic(); |
247 | test_atomic64(); | 248 | test_atomic64(); |
@@ -264,4 +265,9 @@ static __init int test_atomics(void) | |||
264 | return 0; | 265 | return 0; |
265 | } | 266 | } |
266 | 267 | ||
267 | core_initcall(test_atomics); | 268 | static __exit void test_atomics_exit(void) {} |
269 | |||
270 | module_init(test_atomics_init); | ||
271 | module_exit(test_atomics_exit); | ||
272 | |||
273 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/crc32.c b/lib/crc32.c index 7fbd1a112b9d..6ddc92bc1460 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -340,827 +340,3 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | |||
340 | } | 340 | } |
341 | #endif | 341 | #endif |
342 | EXPORT_SYMBOL(crc32_be); | 342 | EXPORT_SYMBOL(crc32_be); |
343 | |||
344 | #ifdef CONFIG_CRC32_SELFTEST | ||
345 | |||
346 | /* 4096 random bytes */ | ||
347 | static u8 const __aligned(8) test_buf[] __initconst = | ||
348 | { | ||
349 | 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, | ||
350 | 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, | ||
351 | 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60, | ||
352 | 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c, | ||
353 | 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4, | ||
354 | 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a, | ||
355 | 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a, | ||
356 | 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4, | ||
357 | 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9, | ||
358 | 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4, | ||
359 | 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca, | ||
360 | 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61, | ||
361 | 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e, | ||
362 | 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a, | ||
363 | 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f, | ||
364 | 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd, | ||
365 | 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c, | ||
366 | 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88, | ||
367 | 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53, | ||
368 | 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f, | ||
369 | 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4, | ||
370 | 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74, | ||
371 | 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60, | ||
372 | 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09, | ||
373 | 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07, | ||
374 | 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1, | ||
375 | 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f, | ||
376 | 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2, | ||
377 | 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0, | ||
378 | 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95, | ||
379 | 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22, | ||
380 | 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93, | ||
381 | 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86, | ||
382 | 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d, | ||
383 | 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40, | ||
384 | 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b, | ||
385 | 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35, | ||
386 | 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40, | ||
387 | 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63, | ||
388 | 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b, | ||
389 | 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8, | ||
390 | 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72, | ||
391 | 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86, | ||
392 | 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff, | ||
393 | 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed, | ||
394 | 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c, | ||
395 | 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed, | ||
396 | 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30, | ||
397 | 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99, | ||
398 | 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4, | ||
399 | 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80, | ||
400 | 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37, | ||
401 | 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04, | ||
402 | 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e, | ||
403 | 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd, | ||
404 | 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c, | ||
405 | 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09, | ||
406 | 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb, | ||
407 | 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b, | ||
408 | 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53, | ||
409 | 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b, | ||
410 | 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f, | ||
411 | 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff, | ||
412 | 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40, | ||
413 | 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6, | ||
414 | 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb, | ||
415 | 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73, | ||
416 | 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f, | ||
417 | 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4, | ||
418 | 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66, | ||
419 | 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1, | ||
420 | 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80, | ||
421 | 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f, | ||
422 | 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5, | ||
423 | 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7, | ||
424 | 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce, | ||
425 | 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff, | ||
426 | 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48, | ||
427 | 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26, | ||
428 | 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72, | ||
429 | 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88, | ||
430 | 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9, | ||
431 | 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc, | ||
432 | 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8, | ||
433 | 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09, | ||
434 | 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8, | ||
435 | 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c, | ||
436 | 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48, | ||
437 | 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d, | ||
438 | 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f, | ||
439 | 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae, | ||
440 | 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97, | ||
441 | 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8, | ||
442 | 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75, | ||
443 | 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc, | ||
444 | 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27, | ||
445 | 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf, | ||
446 | 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7, | ||
447 | 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0, | ||
448 | 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8, | ||
449 | 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c, | ||
450 | 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44, | ||
451 | 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54, | ||
452 | 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38, | ||
453 | 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f, | ||
454 | 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b, | ||
455 | 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7, | ||
456 | 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef, | ||
457 | 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e, | ||
458 | 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c, | ||
459 | 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c, | ||
460 | 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0, | ||
461 | 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37, | ||
462 | 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf, | ||
463 | 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e, | ||
464 | 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4, | ||
465 | 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60, | ||
466 | 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe, | ||
467 | 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61, | ||
468 | 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3, | ||
469 | 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe, | ||
470 | 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40, | ||
471 | 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec, | ||
472 | 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f, | ||
473 | 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7, | ||
474 | 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79, | ||
475 | 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c, | ||
476 | 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f, | ||
477 | 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21, | ||
478 | 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9, | ||
479 | 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30, | ||
480 | 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b, | ||
481 | 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee, | ||
482 | 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6, | ||
483 | 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3, | ||
484 | 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09, | ||
485 | 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd, | ||
486 | 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f, | ||
487 | 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9, | ||
488 | 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc, | ||
489 | 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59, | ||
490 | 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60, | ||
491 | 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5, | ||
492 | 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1, | ||
493 | 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8, | ||
494 | 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9, | ||
495 | 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab, | ||
496 | 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80, | ||
497 | 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01, | ||
498 | 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e, | ||
499 | 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d, | ||
500 | 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35, | ||
501 | 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38, | ||
502 | 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a, | ||
503 | 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac, | ||
504 | 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca, | ||
505 | 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57, | ||
506 | 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed, | ||
507 | 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20, | ||
508 | 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef, | ||
509 | 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c, | ||
510 | 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a, | ||
511 | 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64, | ||
512 | 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4, | ||
513 | 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54, | ||
514 | 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16, | ||
515 | 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26, | ||
516 | 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc, | ||
517 | 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87, | ||
518 | 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60, | ||
519 | 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d, | ||
520 | 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54, | ||
521 | 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13, | ||
522 | 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59, | ||
523 | 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb, | ||
524 | 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f, | ||
525 | 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15, | ||
526 | 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78, | ||
527 | 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93, | ||
528 | 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e, | ||
529 | 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31, | ||
530 | 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1, | ||
531 | 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37, | ||
532 | 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15, | ||
533 | 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78, | ||
534 | 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f, | ||
535 | 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31, | ||
536 | 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f, | ||
537 | 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc, | ||
538 | 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9, | ||
539 | 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3, | ||
540 | 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe, | ||
541 | 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4, | ||
542 | 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24, | ||
543 | 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1, | ||
544 | 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85, | ||
545 | 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8, | ||
546 | 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09, | ||
547 | 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c, | ||
548 | 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46, | ||
549 | 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5, | ||
550 | 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39, | ||
551 | 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2, | ||
552 | 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc, | ||
553 | 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35, | ||
554 | 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde, | ||
555 | 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80, | ||
556 | 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15, | ||
557 | 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63, | ||
558 | 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58, | ||
559 | 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d, | ||
560 | 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf, | ||
561 | 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12, | ||
562 | 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c, | ||
563 | 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b, | ||
564 | 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1, | ||
565 | 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6, | ||
566 | 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73, | ||
567 | 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9, | ||
568 | 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e, | ||
569 | 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22, | ||
570 | 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb, | ||
571 | 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2, | ||
572 | 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c, | ||
573 | 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c, | ||
574 | 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93, | ||
575 | 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f, | ||
576 | 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38, | ||
577 | 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57, | ||
578 | 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03, | ||
579 | 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90, | ||
580 | 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8, | ||
581 | 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4, | ||
582 | 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36, | ||
583 | 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7, | ||
584 | 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47, | ||
585 | 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46, | ||
586 | 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73, | ||
587 | 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72, | ||
588 | 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23, | ||
589 | 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a, | ||
590 | 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58, | ||
591 | 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f, | ||
592 | 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96, | ||
593 | 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9, | ||
594 | 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b, | ||
595 | 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c, | ||
596 | 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef, | ||
597 | 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3, | ||
598 | 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4, | ||
599 | 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f, | ||
600 | 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17, | ||
601 | 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18, | ||
602 | 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8, | ||
603 | 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98, | ||
604 | 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42, | ||
605 | 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97, | ||
606 | 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97, | ||
607 | 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1, | ||
608 | 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77, | ||
609 | 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb, | ||
610 | 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c, | ||
611 | 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb, | ||
612 | 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56, | ||
613 | 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04, | ||
614 | 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48, | ||
615 | 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe, | ||
616 | 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d, | ||
617 | 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97, | ||
618 | 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8, | ||
619 | 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f, | ||
620 | 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e, | ||
621 | 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca, | ||
622 | 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44, | ||
623 | 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f, | ||
624 | 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6, | ||
625 | 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63, | ||
626 | 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19, | ||
627 | 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58, | ||
628 | 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b, | ||
629 | 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28, | ||
630 | 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf, | ||
631 | 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6, | ||
632 | 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3, | ||
633 | 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe, | ||
634 | 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f, | ||
635 | 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf, | ||
636 | 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9, | ||
637 | 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e, | ||
638 | 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7, | ||
639 | 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70, | ||
640 | 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0, | ||
641 | 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d, | ||
642 | 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4, | ||
643 | 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5, | ||
644 | 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85, | ||
645 | 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc, | ||
646 | 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f, | ||
647 | 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56, | ||
648 | 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb, | ||
649 | 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b, | ||
650 | 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5, | ||
651 | 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03, | ||
652 | 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23, | ||
653 | 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03, | ||
654 | 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87, | ||
655 | 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4, | ||
656 | 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43, | ||
657 | 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11, | ||
658 | 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40, | ||
659 | 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59, | ||
660 | 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9, | ||
661 | 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30, | ||
662 | 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd, | ||
663 | 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45, | ||
664 | 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83, | ||
665 | 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b, | ||
666 | 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5, | ||
667 | 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3, | ||
668 | 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84, | ||
669 | 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8, | ||
670 | 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34, | ||
671 | 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b, | ||
672 | 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31, | ||
673 | 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b, | ||
674 | 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40, | ||
675 | 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b, | ||
676 | 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e, | ||
677 | 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38, | ||
678 | 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb, | ||
679 | 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2, | ||
680 | 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c, | ||
681 | 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1, | ||
682 | 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc, | ||
683 | 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec, | ||
684 | 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34, | ||
685 | 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95, | ||
686 | 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92, | ||
687 | 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f, | ||
688 | 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c, | ||
689 | 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b, | ||
690 | 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c, | ||
691 | 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5, | ||
692 | 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb, | ||
693 | 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4, | ||
694 | 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9, | ||
695 | 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4, | ||
696 | 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41, | ||
697 | 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a, | ||
698 | 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8, | ||
699 | 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06, | ||
700 | 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62, | ||
701 | 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47, | ||
702 | 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4, | ||
703 | 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00, | ||
704 | 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67, | ||
705 | 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81, | ||
706 | 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0, | ||
707 | 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10, | ||
708 | 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79, | ||
709 | 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19, | ||
710 | 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8, | ||
711 | 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1, | ||
712 | 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83, | ||
713 | 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86, | ||
714 | 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55, | ||
715 | 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66, | ||
716 | 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0, | ||
717 | 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49, | ||
718 | 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea, | ||
719 | 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24, | ||
720 | 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e, | ||
721 | 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88, | ||
722 | 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87, | ||
723 | 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34, | ||
724 | 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f, | ||
725 | 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a, | ||
726 | 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a, | ||
727 | 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93, | ||
728 | 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37, | ||
729 | 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38, | ||
730 | 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4, | ||
731 | 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48, | ||
732 | 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65, | ||
733 | 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09, | ||
734 | 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e, | ||
735 | 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5, | ||
736 | 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b, | ||
737 | 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4, | ||
738 | 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e, | ||
739 | 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d, | ||
740 | 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0, | ||
741 | 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5, | ||
742 | 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48, | ||
743 | 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e, | ||
744 | 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f, | ||
745 | 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a, | ||
746 | 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d, | ||
747 | 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14, | ||
748 | 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69, | ||
749 | 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53, | ||
750 | 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56, | ||
751 | 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48, | ||
752 | 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4, | ||
753 | 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26, | ||
754 | 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e, | ||
755 | 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40, | ||
756 | 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7, | ||
757 | 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62, | ||
758 | 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe, | ||
759 | 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf, | ||
760 | 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2, | ||
761 | 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d, | ||
762 | 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32, | ||
763 | 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa, | ||
764 | 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45, | ||
765 | 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04, | ||
766 | 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33, | ||
767 | 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad, | ||
768 | 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4, | ||
769 | 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c, | ||
770 | 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b, | ||
771 | 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36, | ||
772 | 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa, | ||
773 | 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9, | ||
774 | 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28, | ||
775 | 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b, | ||
776 | 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03, | ||
777 | 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d, | ||
778 | 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff, | ||
779 | 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39, | ||
780 | 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b, | ||
781 | 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2, | ||
782 | 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34, | ||
783 | 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe, | ||
784 | 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0, | ||
785 | 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27, | ||
786 | 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86, | ||
787 | 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90, | ||
788 | 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03, | ||
789 | 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb, | ||
790 | 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57, | ||
791 | 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9, | ||
792 | 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5, | ||
793 | 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16, | ||
794 | 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5, | ||
795 | 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a, | ||
796 | 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d, | ||
797 | 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0, | ||
798 | 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f, | ||
799 | 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48, | ||
800 | 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1, | ||
801 | 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09, | ||
802 | 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51, | ||
803 | 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b, | ||
804 | 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf, | ||
805 | 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe, | ||
806 | 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad, | ||
807 | 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e, | ||
808 | 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57, | ||
809 | 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f, | ||
810 | 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef, | ||
811 | 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8, | ||
812 | 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69, | ||
813 | 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d, | ||
814 | 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59, | ||
815 | 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9, | ||
816 | 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d, | ||
817 | 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea, | ||
818 | 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56, | ||
819 | 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4, | ||
820 | 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8, | ||
821 | 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78, | ||
822 | 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f, | ||
823 | 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4, | ||
824 | 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91, | ||
825 | 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f, | ||
826 | 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c, | ||
827 | 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57, | ||
828 | 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4, | ||
829 | 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23, | ||
830 | 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17, | ||
831 | 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66, | ||
832 | 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39, | ||
833 | 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36, | ||
834 | 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00, | ||
835 | 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7, | ||
836 | 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60, | ||
837 | 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c, | ||
838 | 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e, | ||
839 | 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7, | ||
840 | 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a, | ||
841 | 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d, | ||
842 | 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37, | ||
843 | 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82, | ||
844 | 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8, | ||
845 | 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e, | ||
846 | 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85, | ||
847 | 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98, | ||
848 | 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22, | ||
849 | 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7, | ||
850 | 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49, | ||
851 | 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33, | ||
852 | 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc, | ||
853 | 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8, | ||
854 | 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f, | ||
855 | 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3, | ||
856 | 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98, | ||
857 | 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c, | ||
858 | 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6, | ||
859 | 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc, | ||
860 | 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d, | ||
861 | }; | ||
862 | |||
863 | /* 100 test cases */ | ||
864 | static struct crc_test { | ||
865 | u32 crc; /* random starting crc */ | ||
866 | u32 start; /* random 6 bit offset in buf */ | ||
867 | u32 length; /* random 11 bit length of test */ | ||
868 | u32 crc_le; /* expected crc32_le result */ | ||
869 | u32 crc_be; /* expected crc32_be result */ | ||
870 | u32 crc32c_le; /* expected crc32c_le result */ | ||
871 | } const test[] __initconst = | ||
872 | { | ||
873 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, | ||
874 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, | ||
875 | {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8}, | ||
876 | {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a}, | ||
877 | {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152}, | ||
878 | {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7}, | ||
879 | {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc}, | ||
880 | {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2}, | ||
881 | {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d}, | ||
882 | {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5}, | ||
883 | {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f}, | ||
884 | {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a}, | ||
885 | {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8}, | ||
886 | {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa}, | ||
887 | {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801}, | ||
888 | {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597}, | ||
889 | {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b}, | ||
890 | {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a}, | ||
891 | {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d}, | ||
892 | {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982}, | ||
893 | {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18}, | ||
894 | {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7}, | ||
895 | {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3}, | ||
896 | {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5}, | ||
897 | {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59}, | ||
898 | {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e}, | ||
899 | {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603}, | ||
900 | {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060}, | ||
901 | {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072}, | ||
902 | {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59}, | ||
903 | {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213}, | ||
904 | {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41}, | ||
905 | {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5}, | ||
906 | {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2}, | ||
907 | {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a}, | ||
908 | {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2}, | ||
909 | {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b}, | ||
910 | {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1}, | ||
911 | {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba}, | ||
912 | {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62}, | ||
913 | {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe}, | ||
914 | {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988}, | ||
915 | {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be}, | ||
916 | {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546}, | ||
917 | {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc}, | ||
918 | {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69}, | ||
919 | {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a}, | ||
920 | {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2}, | ||
921 | {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd}, | ||
922 | {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb}, | ||
923 | {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b}, | ||
924 | {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76}, | ||
925 | {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339}, | ||
926 | {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9}, | ||
927 | {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548}, | ||
928 | {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de}, | ||
929 | {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59}, | ||
930 | {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b}, | ||
931 | {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73}, | ||
932 | {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11}, | ||
933 | {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c}, | ||
934 | {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b}, | ||
935 | {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb}, | ||
936 | {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc}, | ||
937 | {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196}, | ||
938 | {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a}, | ||
939 | {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de}, | ||
940 | {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9}, | ||
941 | {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0}, | ||
942 | {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60}, | ||
943 | {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6}, | ||
944 | {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c}, | ||
945 | {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73}, | ||
946 | {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7}, | ||
947 | {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf}, | ||
948 | {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83}, | ||
949 | {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867}, | ||
950 | {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211}, | ||
951 | {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2}, | ||
952 | {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874}, | ||
953 | {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f}, | ||
954 | {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff}, | ||
955 | {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95}, | ||
956 | {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd}, | ||
957 | {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06}, | ||
958 | {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784}, | ||
959 | {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616}, | ||
960 | {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c}, | ||
961 | {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c}, | ||
962 | {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d}, | ||
963 | {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d}, | ||
964 | {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272}, | ||
965 | {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb}, | ||
966 | {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b}, | ||
967 | {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e}, | ||
968 | {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23}, | ||
969 | {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672}, | ||
970 | {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86}, | ||
971 | {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd}, | ||
972 | {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48}, | ||
973 | }; | ||
974 | |||
975 | #include <linux/time.h> | ||
976 | |||
977 | static int __init crc32c_test(void) | ||
978 | { | ||
979 | int i; | ||
980 | int errors = 0; | ||
981 | int bytes = 0; | ||
982 | u64 nsec; | ||
983 | unsigned long flags; | ||
984 | |||
985 | /* keep static to prevent cache warming code from | ||
986 | * getting eliminated by the compiler */ | ||
987 | static u32 crc; | ||
988 | |||
989 | /* pre-warm the cache */ | ||
990 | for (i = 0; i < 100; i++) { | ||
991 | bytes += 2*test[i].length; | ||
992 | |||
993 | crc ^= __crc32c_le(test[i].crc, test_buf + | ||
994 | test[i].start, test[i].length); | ||
995 | } | ||
996 | |||
997 | /* reduce OS noise */ | ||
998 | local_irq_save(flags); | ||
999 | local_irq_disable(); | ||
1000 | |||
1001 | nsec = ktime_get_ns(); | ||
1002 | for (i = 0; i < 100; i++) { | ||
1003 | if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + | ||
1004 | test[i].start, test[i].length)) | ||
1005 | errors++; | ||
1006 | } | ||
1007 | nsec = ktime_get_ns() - nsec; | ||
1008 | |||
1009 | local_irq_restore(flags); | ||
1010 | local_irq_enable(); | ||
1011 | |||
1012 | pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); | ||
1013 | |||
1014 | if (errors) | ||
1015 | pr_warn("crc32c: %d self tests failed\n", errors); | ||
1016 | else { | ||
1017 | pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n", | ||
1018 | bytes, nsec); | ||
1019 | } | ||
1020 | |||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | static int __init crc32c_combine_test(void) | ||
1025 | { | ||
1026 | int i, j; | ||
1027 | int errors = 0, runs = 0; | ||
1028 | |||
1029 | for (i = 0; i < 10; i++) { | ||
1030 | u32 crc_full; | ||
1031 | |||
1032 | crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start, | ||
1033 | test[i].length); | ||
1034 | for (j = 0; j <= test[i].length; ++j) { | ||
1035 | u32 crc1, crc2; | ||
1036 | u32 len1 = j, len2 = test[i].length - j; | ||
1037 | |||
1038 | crc1 = __crc32c_le(test[i].crc, test_buf + | ||
1039 | test[i].start, len1); | ||
1040 | crc2 = __crc32c_le(0, test_buf + test[i].start + | ||
1041 | len1, len2); | ||
1042 | |||
1043 | if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) && | ||
1044 | crc_full == test[i].crc32c_le)) | ||
1045 | errors++; | ||
1046 | runs++; | ||
1047 | cond_resched(); | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | if (errors) | ||
1052 | pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs); | ||
1053 | else | ||
1054 | pr_info("crc32c_combine: %d self tests passed\n", runs); | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | |||
1059 | static int __init crc32_test(void) | ||
1060 | { | ||
1061 | int i; | ||
1062 | int errors = 0; | ||
1063 | int bytes = 0; | ||
1064 | u64 nsec; | ||
1065 | unsigned long flags; | ||
1066 | |||
1067 | /* keep static to prevent cache warming code from | ||
1068 | * getting eliminated by the compiler */ | ||
1069 | static u32 crc; | ||
1070 | |||
1071 | /* pre-warm the cache */ | ||
1072 | for (i = 0; i < 100; i++) { | ||
1073 | bytes += 2*test[i].length; | ||
1074 | |||
1075 | crc ^= crc32_le(test[i].crc, test_buf + | ||
1076 | test[i].start, test[i].length); | ||
1077 | |||
1078 | crc ^= crc32_be(test[i].crc, test_buf + | ||
1079 | test[i].start, test[i].length); | ||
1080 | } | ||
1081 | |||
1082 | /* reduce OS noise */ | ||
1083 | local_irq_save(flags); | ||
1084 | local_irq_disable(); | ||
1085 | |||
1086 | nsec = ktime_get_ns(); | ||
1087 | for (i = 0; i < 100; i++) { | ||
1088 | if (test[i].crc_le != crc32_le(test[i].crc, test_buf + | ||
1089 | test[i].start, test[i].length)) | ||
1090 | errors++; | ||
1091 | |||
1092 | if (test[i].crc_be != crc32_be(test[i].crc, test_buf + | ||
1093 | test[i].start, test[i].length)) | ||
1094 | errors++; | ||
1095 | } | ||
1096 | nsec = ktime_get_ns() - nsec; | ||
1097 | |||
1098 | local_irq_restore(flags); | ||
1099 | local_irq_enable(); | ||
1100 | |||
1101 | pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", | ||
1102 | CRC_LE_BITS, CRC_BE_BITS); | ||
1103 | |||
1104 | if (errors) | ||
1105 | pr_warn("crc32: %d self tests failed\n", errors); | ||
1106 | else { | ||
1107 | pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n", | ||
1108 | bytes, nsec); | ||
1109 | } | ||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static int __init crc32_combine_test(void) | ||
1115 | { | ||
1116 | int i, j; | ||
1117 | int errors = 0, runs = 0; | ||
1118 | |||
1119 | for (i = 0; i < 10; i++) { | ||
1120 | u32 crc_full; | ||
1121 | |||
1122 | crc_full = crc32_le(test[i].crc, test_buf + test[i].start, | ||
1123 | test[i].length); | ||
1124 | for (j = 0; j <= test[i].length; ++j) { | ||
1125 | u32 crc1, crc2; | ||
1126 | u32 len1 = j, len2 = test[i].length - j; | ||
1127 | |||
1128 | crc1 = crc32_le(test[i].crc, test_buf + | ||
1129 | test[i].start, len1); | ||
1130 | crc2 = crc32_le(0, test_buf + test[i].start + | ||
1131 | len1, len2); | ||
1132 | |||
1133 | if (!(crc_full == crc32_le_combine(crc1, crc2, len2) && | ||
1134 | crc_full == test[i].crc_le)) | ||
1135 | errors++; | ||
1136 | runs++; | ||
1137 | cond_resched(); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | if (errors) | ||
1142 | pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs); | ||
1143 | else | ||
1144 | pr_info("crc32_combine: %d self tests passed\n", runs); | ||
1145 | |||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | static int __init crc32test_init(void) | ||
1150 | { | ||
1151 | crc32_test(); | ||
1152 | crc32c_test(); | ||
1153 | |||
1154 | crc32_combine_test(); | ||
1155 | crc32c_combine_test(); | ||
1156 | |||
1157 | return 0; | ||
1158 | } | ||
1159 | |||
1160 | static void __exit crc32_exit(void) | ||
1161 | { | ||
1162 | } | ||
1163 | |||
1164 | module_init(crc32test_init); | ||
1165 | module_exit(crc32_exit); | ||
1166 | #endif /* CONFIG_CRC32_SELFTEST */ | ||
diff --git a/lib/crc32test.c b/lib/crc32test.c new file mode 100644 index 000000000000..97d6a57cefcc --- /dev/null +++ b/lib/crc32test.c | |||
@@ -0,0 +1,856 @@ | |||
1 | /* | ||
2 | * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin | ||
3 | * cleaned up code to current version of sparse and added the slicing-by-8 | ||
4 | * algorithm to the closely similar existing slicing-by-4 algorithm. | ||
5 | * | ||
6 | * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> | ||
7 | * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! | ||
8 | * Code was from the public domain, copyright abandoned. Code was | ||
9 | * subsequently included in the kernel, thus was re-licensed under the | ||
10 | * GNU GPL v2. | ||
11 | * | ||
12 | * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> | ||
13 | * Same crc32 function was used in 5 other places in the kernel. | ||
14 | * I made one version, and deleted the others. | ||
15 | * There are various incantations of crc32(). Some use a seed of 0 or ~0. | ||
16 | * Some xor at the end with ~0. The generic crc32() function takes | ||
17 | * seed as an argument, and doesn't xor at the end. Then individual | ||
18 | * users can do whatever they need. | ||
19 | * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. | ||
20 | * fs/jffs2 uses seed 0, doesn't xor with ~0. | ||
21 | * fs/partitions/efi.c uses seed ~0, xor's with ~0. | ||
22 | * | ||
23 | * This source code is licensed under the GNU General Public License, | ||
24 | * Version 2. See the file COPYING for more details. | ||
25 | */ | ||
26 | |||
27 | #include <linux/crc32.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/sched.h> | ||
30 | |||
31 | #include "crc32defs.h" | ||
32 | |||
33 | /* 4096 random bytes */ | ||
34 | static u8 const __aligned(8) test_buf[] __initconst = | ||
35 | { | ||
36 | 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, | ||
37 | 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, | ||
38 | 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60, | ||
39 | 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c, | ||
40 | 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4, | ||
41 | 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a, | ||
42 | 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a, | ||
43 | 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4, | ||
44 | 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9, | ||
45 | 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4, | ||
46 | 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca, | ||
47 | 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61, | ||
48 | 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e, | ||
49 | 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a, | ||
50 | 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f, | ||
51 | 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd, | ||
52 | 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c, | ||
53 | 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88, | ||
54 | 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53, | ||
55 | 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f, | ||
56 | 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4, | ||
57 | 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74, | ||
58 | 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60, | ||
59 | 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09, | ||
60 | 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07, | ||
61 | 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1, | ||
62 | 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f, | ||
63 | 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2, | ||
64 | 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0, | ||
65 | 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95, | ||
66 | 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22, | ||
67 | 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93, | ||
68 | 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86, | ||
69 | 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d, | ||
70 | 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40, | ||
71 | 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b, | ||
72 | 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35, | ||
73 | 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40, | ||
74 | 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63, | ||
75 | 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b, | ||
76 | 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8, | ||
77 | 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72, | ||
78 | 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86, | ||
79 | 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff, | ||
80 | 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed, | ||
81 | 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c, | ||
82 | 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed, | ||
83 | 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30, | ||
84 | 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99, | ||
85 | 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4, | ||
86 | 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80, | ||
87 | 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37, | ||
88 | 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04, | ||
89 | 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e, | ||
90 | 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd, | ||
91 | 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c, | ||
92 | 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09, | ||
93 | 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb, | ||
94 | 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b, | ||
95 | 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53, | ||
96 | 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b, | ||
97 | 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f, | ||
98 | 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff, | ||
99 | 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40, | ||
100 | 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6, | ||
101 | 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb, | ||
102 | 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73, | ||
103 | 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f, | ||
104 | 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4, | ||
105 | 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66, | ||
106 | 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1, | ||
107 | 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80, | ||
108 | 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f, | ||
109 | 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5, | ||
110 | 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7, | ||
111 | 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce, | ||
112 | 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff, | ||
113 | 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48, | ||
114 | 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26, | ||
115 | 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72, | ||
116 | 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88, | ||
117 | 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9, | ||
118 | 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc, | ||
119 | 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8, | ||
120 | 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09, | ||
121 | 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8, | ||
122 | 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c, | ||
123 | 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48, | ||
124 | 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d, | ||
125 | 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f, | ||
126 | 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae, | ||
127 | 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97, | ||
128 | 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8, | ||
129 | 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75, | ||
130 | 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc, | ||
131 | 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27, | ||
132 | 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf, | ||
133 | 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7, | ||
134 | 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0, | ||
135 | 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8, | ||
136 | 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c, | ||
137 | 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44, | ||
138 | 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54, | ||
139 | 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38, | ||
140 | 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f, | ||
141 | 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b, | ||
142 | 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7, | ||
143 | 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef, | ||
144 | 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e, | ||
145 | 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c, | ||
146 | 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c, | ||
147 | 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0, | ||
148 | 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37, | ||
149 | 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf, | ||
150 | 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e, | ||
151 | 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4, | ||
152 | 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60, | ||
153 | 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe, | ||
154 | 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61, | ||
155 | 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3, | ||
156 | 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe, | ||
157 | 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40, | ||
158 | 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec, | ||
159 | 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f, | ||
160 | 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7, | ||
161 | 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79, | ||
162 | 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c, | ||
163 | 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f, | ||
164 | 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21, | ||
165 | 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9, | ||
166 | 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30, | ||
167 | 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b, | ||
168 | 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee, | ||
169 | 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6, | ||
170 | 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3, | ||
171 | 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09, | ||
172 | 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd, | ||
173 | 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f, | ||
174 | 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9, | ||
175 | 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc, | ||
176 | 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59, | ||
177 | 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60, | ||
178 | 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5, | ||
179 | 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1, | ||
180 | 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8, | ||
181 | 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9, | ||
182 | 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab, | ||
183 | 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80, | ||
184 | 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01, | ||
185 | 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e, | ||
186 | 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d, | ||
187 | 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35, | ||
188 | 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38, | ||
189 | 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a, | ||
190 | 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac, | ||
191 | 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca, | ||
192 | 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57, | ||
193 | 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed, | ||
194 | 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20, | ||
195 | 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef, | ||
196 | 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c, | ||
197 | 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a, | ||
198 | 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64, | ||
199 | 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4, | ||
200 | 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54, | ||
201 | 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16, | ||
202 | 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26, | ||
203 | 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc, | ||
204 | 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87, | ||
205 | 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60, | ||
206 | 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d, | ||
207 | 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54, | ||
208 | 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13, | ||
209 | 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59, | ||
210 | 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb, | ||
211 | 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f, | ||
212 | 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15, | ||
213 | 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78, | ||
214 | 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93, | ||
215 | 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e, | ||
216 | 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31, | ||
217 | 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1, | ||
218 | 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37, | ||
219 | 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15, | ||
220 | 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78, | ||
221 | 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f, | ||
222 | 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31, | ||
223 | 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f, | ||
224 | 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc, | ||
225 | 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9, | ||
226 | 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3, | ||
227 | 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe, | ||
228 | 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4, | ||
229 | 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24, | ||
230 | 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1, | ||
231 | 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85, | ||
232 | 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8, | ||
233 | 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09, | ||
234 | 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c, | ||
235 | 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46, | ||
236 | 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5, | ||
237 | 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39, | ||
238 | 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2, | ||
239 | 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc, | ||
240 | 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35, | ||
241 | 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde, | ||
242 | 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80, | ||
243 | 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15, | ||
244 | 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63, | ||
245 | 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58, | ||
246 | 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d, | ||
247 | 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf, | ||
248 | 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12, | ||
249 | 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c, | ||
250 | 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b, | ||
251 | 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1, | ||
252 | 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6, | ||
253 | 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73, | ||
254 | 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9, | ||
255 | 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e, | ||
256 | 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22, | ||
257 | 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb, | ||
258 | 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2, | ||
259 | 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c, | ||
260 | 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c, | ||
261 | 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93, | ||
262 | 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f, | ||
263 | 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38, | ||
264 | 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57, | ||
265 | 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03, | ||
266 | 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90, | ||
267 | 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8, | ||
268 | 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4, | ||
269 | 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36, | ||
270 | 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7, | ||
271 | 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47, | ||
272 | 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46, | ||
273 | 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73, | ||
274 | 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72, | ||
275 | 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23, | ||
276 | 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a, | ||
277 | 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58, | ||
278 | 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f, | ||
279 | 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96, | ||
280 | 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9, | ||
281 | 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b, | ||
282 | 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c, | ||
283 | 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef, | ||
284 | 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3, | ||
285 | 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4, | ||
286 | 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f, | ||
287 | 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17, | ||
288 | 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18, | ||
289 | 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8, | ||
290 | 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98, | ||
291 | 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42, | ||
292 | 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97, | ||
293 | 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97, | ||
294 | 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1, | ||
295 | 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77, | ||
296 | 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb, | ||
297 | 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c, | ||
298 | 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb, | ||
299 | 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56, | ||
300 | 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04, | ||
301 | 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48, | ||
302 | 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe, | ||
303 | 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d, | ||
304 | 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97, | ||
305 | 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8, | ||
306 | 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f, | ||
307 | 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e, | ||
308 | 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca, | ||
309 | 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44, | ||
310 | 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f, | ||
311 | 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6, | ||
312 | 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63, | ||
313 | 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19, | ||
314 | 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58, | ||
315 | 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b, | ||
316 | 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28, | ||
317 | 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf, | ||
318 | 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6, | ||
319 | 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3, | ||
320 | 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe, | ||
321 | 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f, | ||
322 | 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf, | ||
323 | 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9, | ||
324 | 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e, | ||
325 | 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7, | ||
326 | 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70, | ||
327 | 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0, | ||
328 | 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d, | ||
329 | 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4, | ||
330 | 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5, | ||
331 | 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85, | ||
332 | 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc, | ||
333 | 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f, | ||
334 | 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56, | ||
335 | 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb, | ||
336 | 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b, | ||
337 | 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5, | ||
338 | 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03, | ||
339 | 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23, | ||
340 | 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03, | ||
341 | 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87, | ||
342 | 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4, | ||
343 | 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43, | ||
344 | 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11, | ||
345 | 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40, | ||
346 | 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59, | ||
347 | 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9, | ||
348 | 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30, | ||
349 | 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd, | ||
350 | 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45, | ||
351 | 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83, | ||
352 | 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b, | ||
353 | 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5, | ||
354 | 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3, | ||
355 | 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84, | ||
356 | 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8, | ||
357 | 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34, | ||
358 | 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b, | ||
359 | 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31, | ||
360 | 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b, | ||
361 | 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40, | ||
362 | 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b, | ||
363 | 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e, | ||
364 | 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38, | ||
365 | 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb, | ||
366 | 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2, | ||
367 | 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c, | ||
368 | 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1, | ||
369 | 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc, | ||
370 | 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec, | ||
371 | 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34, | ||
372 | 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95, | ||
373 | 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92, | ||
374 | 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f, | ||
375 | 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c, | ||
376 | 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b, | ||
377 | 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c, | ||
378 | 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5, | ||
379 | 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb, | ||
380 | 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4, | ||
381 | 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9, | ||
382 | 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4, | ||
383 | 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41, | ||
384 | 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a, | ||
385 | 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8, | ||
386 | 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06, | ||
387 | 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62, | ||
388 | 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47, | ||
389 | 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4, | ||
390 | 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00, | ||
391 | 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67, | ||
392 | 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81, | ||
393 | 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0, | ||
394 | 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10, | ||
395 | 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79, | ||
396 | 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19, | ||
397 | 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8, | ||
398 | 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1, | ||
399 | 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83, | ||
400 | 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86, | ||
401 | 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55, | ||
402 | 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66, | ||
403 | 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0, | ||
404 | 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49, | ||
405 | 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea, | ||
406 | 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24, | ||
407 | 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e, | ||
408 | 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88, | ||
409 | 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87, | ||
410 | 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34, | ||
411 | 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f, | ||
412 | 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a, | ||
413 | 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a, | ||
414 | 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93, | ||
415 | 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37, | ||
416 | 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38, | ||
417 | 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4, | ||
418 | 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48, | ||
419 | 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65, | ||
420 | 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09, | ||
421 | 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e, | ||
422 | 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5, | ||
423 | 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b, | ||
424 | 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4, | ||
425 | 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e, | ||
426 | 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d, | ||
427 | 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0, | ||
428 | 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5, | ||
429 | 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48, | ||
430 | 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e, | ||
431 | 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f, | ||
432 | 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a, | ||
433 | 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d, | ||
434 | 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14, | ||
435 | 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69, | ||
436 | 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53, | ||
437 | 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56, | ||
438 | 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48, | ||
439 | 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4, | ||
440 | 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26, | ||
441 | 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e, | ||
442 | 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40, | ||
443 | 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7, | ||
444 | 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62, | ||
445 | 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe, | ||
446 | 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf, | ||
447 | 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2, | ||
448 | 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d, | ||
449 | 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32, | ||
450 | 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa, | ||
451 | 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45, | ||
452 | 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04, | ||
453 | 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33, | ||
454 | 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad, | ||
455 | 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4, | ||
456 | 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c, | ||
457 | 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b, | ||
458 | 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36, | ||
459 | 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa, | ||
460 | 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9, | ||
461 | 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28, | ||
462 | 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b, | ||
463 | 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03, | ||
464 | 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d, | ||
465 | 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff, | ||
466 | 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39, | ||
467 | 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b, | ||
468 | 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2, | ||
469 | 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34, | ||
470 | 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe, | ||
471 | 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0, | ||
472 | 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27, | ||
473 | 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86, | ||
474 | 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90, | ||
475 | 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03, | ||
476 | 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb, | ||
477 | 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57, | ||
478 | 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9, | ||
479 | 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5, | ||
480 | 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16, | ||
481 | 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5, | ||
482 | 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a, | ||
483 | 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d, | ||
484 | 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0, | ||
485 | 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f, | ||
486 | 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48, | ||
487 | 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1, | ||
488 | 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09, | ||
489 | 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51, | ||
490 | 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b, | ||
491 | 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf, | ||
492 | 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe, | ||
493 | 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad, | ||
494 | 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e, | ||
495 | 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57, | ||
496 | 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f, | ||
497 | 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef, | ||
498 | 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8, | ||
499 | 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69, | ||
500 | 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d, | ||
501 | 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59, | ||
502 | 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9, | ||
503 | 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d, | ||
504 | 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea, | ||
505 | 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56, | ||
506 | 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4, | ||
507 | 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8, | ||
508 | 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78, | ||
509 | 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f, | ||
510 | 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4, | ||
511 | 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91, | ||
512 | 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f, | ||
513 | 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c, | ||
514 | 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57, | ||
515 | 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4, | ||
516 | 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23, | ||
517 | 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17, | ||
518 | 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66, | ||
519 | 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39, | ||
520 | 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36, | ||
521 | 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00, | ||
522 | 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7, | ||
523 | 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60, | ||
524 | 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c, | ||
525 | 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e, | ||
526 | 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7, | ||
527 | 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a, | ||
528 | 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d, | ||
529 | 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37, | ||
530 | 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82, | ||
531 | 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8, | ||
532 | 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e, | ||
533 | 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85, | ||
534 | 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98, | ||
535 | 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22, | ||
536 | 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7, | ||
537 | 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49, | ||
538 | 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33, | ||
539 | 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc, | ||
540 | 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8, | ||
541 | 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f, | ||
542 | 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3, | ||
543 | 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98, | ||
544 | 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c, | ||
545 | 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6, | ||
546 | 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc, | ||
547 | 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d, | ||
548 | }; | ||
549 | |||
550 | /* 100 test cases */ | ||
551 | static struct crc_test { | ||
552 | u32 crc; /* random starting crc */ | ||
553 | u32 start; /* random 6 bit offset in buf */ | ||
554 | u32 length; /* random 11 bit length of test */ | ||
555 | u32 crc_le; /* expected crc32_le result */ | ||
556 | u32 crc_be; /* expected crc32_be result */ | ||
557 | u32 crc32c_le; /* expected crc32c_le result */ | ||
558 | } const test[] __initconst = | ||
559 | { | ||
560 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, | ||
561 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, | ||
562 | {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8}, | ||
563 | {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a}, | ||
564 | {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152}, | ||
565 | {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7}, | ||
566 | {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc}, | ||
567 | {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2}, | ||
568 | {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d}, | ||
569 | {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5}, | ||
570 | {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f}, | ||
571 | {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a}, | ||
572 | {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8}, | ||
573 | {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa}, | ||
574 | {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801}, | ||
575 | {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597}, | ||
576 | {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b}, | ||
577 | {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a}, | ||
578 | {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d}, | ||
579 | {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982}, | ||
580 | {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18}, | ||
581 | {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7}, | ||
582 | {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3}, | ||
583 | {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5}, | ||
584 | {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59}, | ||
585 | {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e}, | ||
586 | {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603}, | ||
587 | {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060}, | ||
588 | {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072}, | ||
589 | {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59}, | ||
590 | {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213}, | ||
591 | {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41}, | ||
592 | {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5}, | ||
593 | {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2}, | ||
594 | {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a}, | ||
595 | {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2}, | ||
596 | {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b}, | ||
597 | {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1}, | ||
598 | {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba}, | ||
599 | {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62}, | ||
600 | {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe}, | ||
601 | {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988}, | ||
602 | {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be}, | ||
603 | {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546}, | ||
604 | {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc}, | ||
605 | {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69}, | ||
606 | {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a}, | ||
607 | {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2}, | ||
608 | {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd}, | ||
609 | {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb}, | ||
610 | {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b}, | ||
611 | {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76}, | ||
612 | {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339}, | ||
613 | {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9}, | ||
614 | {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548}, | ||
615 | {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de}, | ||
616 | {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59}, | ||
617 | {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b}, | ||
618 | {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73}, | ||
619 | {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11}, | ||
620 | {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c}, | ||
621 | {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b}, | ||
622 | {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb}, | ||
623 | {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc}, | ||
624 | {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196}, | ||
625 | {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a}, | ||
626 | {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de}, | ||
627 | {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9}, | ||
628 | {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0}, | ||
629 | {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60}, | ||
630 | {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6}, | ||
631 | {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c}, | ||
632 | {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73}, | ||
633 | {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7}, | ||
634 | {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf}, | ||
635 | {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83}, | ||
636 | {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867}, | ||
637 | {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211}, | ||
638 | {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2}, | ||
639 | {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874}, | ||
640 | {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f}, | ||
641 | {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff}, | ||
642 | {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95}, | ||
643 | {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd}, | ||
644 | {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06}, | ||
645 | {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784}, | ||
646 | {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616}, | ||
647 | {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c}, | ||
648 | {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c}, | ||
649 | {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d}, | ||
650 | {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d}, | ||
651 | {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272}, | ||
652 | {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb}, | ||
653 | {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b}, | ||
654 | {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e}, | ||
655 | {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23}, | ||
656 | {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672}, | ||
657 | {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86}, | ||
658 | {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd}, | ||
659 | {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48}, | ||
660 | }; | ||
661 | |||
662 | #include <linux/time.h> | ||
663 | |||
664 | static int __init crc32c_test(void) | ||
665 | { | ||
666 | int i; | ||
667 | int errors = 0; | ||
668 | int bytes = 0; | ||
669 | u64 nsec; | ||
670 | unsigned long flags; | ||
671 | |||
672 | /* keep static to prevent cache warming code from | ||
673 | * getting eliminated by the compiler */ | ||
674 | static u32 crc; | ||
675 | |||
676 | /* pre-warm the cache */ | ||
677 | for (i = 0; i < 100; i++) { | ||
678 | bytes += 2*test[i].length; | ||
679 | |||
680 | crc ^= __crc32c_le(test[i].crc, test_buf + | ||
681 | test[i].start, test[i].length); | ||
682 | } | ||
683 | |||
684 | /* reduce OS noise */ | ||
685 | local_irq_save(flags); | ||
686 | local_irq_disable(); | ||
687 | |||
688 | nsec = ktime_get_ns(); | ||
689 | for (i = 0; i < 100; i++) { | ||
690 | if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + | ||
691 | test[i].start, test[i].length)) | ||
692 | errors++; | ||
693 | } | ||
694 | nsec = ktime_get_ns() - nsec; | ||
695 | |||
696 | local_irq_restore(flags); | ||
697 | local_irq_enable(); | ||
698 | |||
699 | pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); | ||
700 | |||
701 | if (errors) | ||
702 | pr_warn("crc32c: %d self tests failed\n", errors); | ||
703 | else { | ||
704 | pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n", | ||
705 | bytes, nsec); | ||
706 | } | ||
707 | |||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | static int __init crc32c_combine_test(void) | ||
712 | { | ||
713 | int i, j; | ||
714 | int errors = 0, runs = 0; | ||
715 | |||
716 | for (i = 0; i < 10; i++) { | ||
717 | u32 crc_full; | ||
718 | |||
719 | crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start, | ||
720 | test[i].length); | ||
721 | for (j = 0; j <= test[i].length; ++j) { | ||
722 | u32 crc1, crc2; | ||
723 | u32 len1 = j, len2 = test[i].length - j; | ||
724 | |||
725 | crc1 = __crc32c_le(test[i].crc, test_buf + | ||
726 | test[i].start, len1); | ||
727 | crc2 = __crc32c_le(0, test_buf + test[i].start + | ||
728 | len1, len2); | ||
729 | |||
730 | if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) && | ||
731 | crc_full == test[i].crc32c_le)) | ||
732 | errors++; | ||
733 | runs++; | ||
734 | cond_resched(); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | if (errors) | ||
739 | pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs); | ||
740 | else | ||
741 | pr_info("crc32c_combine: %d self tests passed\n", runs); | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int __init crc32_test(void) | ||
747 | { | ||
748 | int i; | ||
749 | int errors = 0; | ||
750 | int bytes = 0; | ||
751 | u64 nsec; | ||
752 | unsigned long flags; | ||
753 | |||
754 | /* keep static to prevent cache warming code from | ||
755 | * getting eliminated by the compiler */ | ||
756 | static u32 crc; | ||
757 | |||
758 | /* pre-warm the cache */ | ||
759 | for (i = 0; i < 100; i++) { | ||
760 | bytes += 2*test[i].length; | ||
761 | |||
762 | crc ^= crc32_le(test[i].crc, test_buf + | ||
763 | test[i].start, test[i].length); | ||
764 | |||
765 | crc ^= crc32_be(test[i].crc, test_buf + | ||
766 | test[i].start, test[i].length); | ||
767 | } | ||
768 | |||
769 | /* reduce OS noise */ | ||
770 | local_irq_save(flags); | ||
771 | local_irq_disable(); | ||
772 | |||
773 | nsec = ktime_get_ns(); | ||
774 | for (i = 0; i < 100; i++) { | ||
775 | if (test[i].crc_le != crc32_le(test[i].crc, test_buf + | ||
776 | test[i].start, test[i].length)) | ||
777 | errors++; | ||
778 | |||
779 | if (test[i].crc_be != crc32_be(test[i].crc, test_buf + | ||
780 | test[i].start, test[i].length)) | ||
781 | errors++; | ||
782 | } | ||
783 | nsec = ktime_get_ns() - nsec; | ||
784 | |||
785 | local_irq_restore(flags); | ||
786 | local_irq_enable(); | ||
787 | |||
788 | pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", | ||
789 | CRC_LE_BITS, CRC_BE_BITS); | ||
790 | |||
791 | if (errors) | ||
792 | pr_warn("crc32: %d self tests failed\n", errors); | ||
793 | else { | ||
794 | pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n", | ||
795 | bytes, nsec); | ||
796 | } | ||
797 | |||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | static int __init crc32_combine_test(void) | ||
802 | { | ||
803 | int i, j; | ||
804 | int errors = 0, runs = 0; | ||
805 | |||
806 | for (i = 0; i < 10; i++) { | ||
807 | u32 crc_full; | ||
808 | |||
809 | crc_full = crc32_le(test[i].crc, test_buf + test[i].start, | ||
810 | test[i].length); | ||
811 | for (j = 0; j <= test[i].length; ++j) { | ||
812 | u32 crc1, crc2; | ||
813 | u32 len1 = j, len2 = test[i].length - j; | ||
814 | |||
815 | crc1 = crc32_le(test[i].crc, test_buf + | ||
816 | test[i].start, len1); | ||
817 | crc2 = crc32_le(0, test_buf + test[i].start + | ||
818 | len1, len2); | ||
819 | |||
820 | if (!(crc_full == crc32_le_combine(crc1, crc2, len2) && | ||
821 | crc_full == test[i].crc_le)) | ||
822 | errors++; | ||
823 | runs++; | ||
824 | cond_resched(); | ||
825 | } | ||
826 | } | ||
827 | |||
828 | if (errors) | ||
829 | pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs); | ||
830 | else | ||
831 | pr_info("crc32_combine: %d self tests passed\n", runs); | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | static int __init crc32test_init(void) | ||
837 | { | ||
838 | crc32_test(); | ||
839 | crc32c_test(); | ||
840 | |||
841 | crc32_combine_test(); | ||
842 | crc32c_combine_test(); | ||
843 | |||
844 | return 0; | ||
845 | } | ||
846 | |||
847 | static void __exit crc32_exit(void) | ||
848 | { | ||
849 | } | ||
850 | |||
851 | module_init(crc32test_init); | ||
852 | module_exit(crc32_exit); | ||
853 | |||
854 | MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | ||
855 | MODULE_DESCRIPTION("CRC32 selftest"); | ||
856 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 036fc882cd72..1b0baf3008ea 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
@@ -72,7 +72,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, | |||
72 | error("NULL input pointer and missing fill function"); | 72 | error("NULL input pointer and missing fill function"); |
73 | goto exit_1; | 73 | goto exit_1; |
74 | } else { | 74 | } else { |
75 | inp = large_malloc(lz4_compressbound(uncomp_chunksize)); | 75 | inp = large_malloc(LZ4_compressBound(uncomp_chunksize)); |
76 | if (!inp) { | 76 | if (!inp) { |
77 | error("Could not allocate input buffer"); | 77 | error("Could not allocate input buffer"); |
78 | goto exit_1; | 78 | goto exit_1; |
@@ -136,7 +136,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, | |||
136 | inp += 4; | 136 | inp += 4; |
137 | size -= 4; | 137 | size -= 4; |
138 | } else { | 138 | } else { |
139 | if (chunksize > lz4_compressbound(uncomp_chunksize)) { | 139 | if (chunksize > LZ4_compressBound(uncomp_chunksize)) { |
140 | error("chunk length is longer than allocated"); | 140 | error("chunk length is longer than allocated"); |
141 | goto exit_2; | 141 | goto exit_2; |
142 | } | 142 | } |
@@ -152,11 +152,14 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, | |||
152 | out_len -= dest_len; | 152 | out_len -= dest_len; |
153 | } else | 153 | } else |
154 | dest_len = out_len; | 154 | dest_len = out_len; |
155 | ret = lz4_decompress(inp, &chunksize, outp, dest_len); | 155 | |
156 | ret = LZ4_decompress_fast(inp, outp, dest_len); | ||
157 | chunksize = ret; | ||
156 | #else | 158 | #else |
157 | dest_len = uncomp_chunksize; | 159 | dest_len = uncomp_chunksize; |
158 | ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, | 160 | |
159 | &dest_len); | 161 | ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len); |
162 | dest_len = ret; | ||
160 | #endif | 163 | #endif |
161 | if (ret < 0) { | 164 | if (ret < 0) { |
162 | error("Decoding failed"); | 165 | error("Decoding failed"); |
diff --git a/lib/find_bit.c b/lib/find_bit.c index 18072ea9c20e..6ed74f78380c 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c | |||
@@ -33,7 +33,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
33 | { | 33 | { |
34 | unsigned long tmp; | 34 | unsigned long tmp; |
35 | 35 | ||
36 | if (!nbits || start >= nbits) | 36 | if (unlikely(start >= nbits)) |
37 | return nbits; | 37 | return nbits; |
38 | 38 | ||
39 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 39 | tmp = addr[start / BITS_PER_LONG] ^ invert; |
@@ -151,7 +151,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, | |||
151 | { | 151 | { |
152 | unsigned long tmp; | 152 | unsigned long tmp; |
153 | 153 | ||
154 | if (!nbits || start >= nbits) | 154 | if (unlikely(start >= nbits)) |
155 | return nbits; | 155 | return nbits; |
156 | 156 | ||
157 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 157 | tmp = addr[start / BITS_PER_LONG] ^ invert; |
diff --git a/lib/glob.c b/lib/glob.c index 500fc80d23e1..0ba3ea86b546 100644 --- a/lib/glob.c +++ b/lib/glob.c | |||
@@ -121,167 +121,3 @@ backtrack: | |||
121 | } | 121 | } |
122 | } | 122 | } |
123 | EXPORT_SYMBOL(glob_match); | 123 | EXPORT_SYMBOL(glob_match); |
124 | |||
125 | |||
126 | #ifdef CONFIG_GLOB_SELFTEST | ||
127 | |||
128 | #include <linux/printk.h> | ||
129 | #include <linux/moduleparam.h> | ||
130 | |||
131 | /* Boot with "glob.verbose=1" to show successful tests, too */ | ||
132 | static bool verbose = false; | ||
133 | module_param(verbose, bool, 0); | ||
134 | |||
135 | struct glob_test { | ||
136 | char const *pat, *str; | ||
137 | bool expected; | ||
138 | }; | ||
139 | |||
140 | static bool __pure __init test(char const *pat, char const *str, bool expected) | ||
141 | { | ||
142 | bool match = glob_match(pat, str); | ||
143 | bool success = match == expected; | ||
144 | |||
145 | /* Can't get string literals into a particular section, so... */ | ||
146 | static char const msg_error[] __initconst = | ||
147 | KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n"; | ||
148 | static char const msg_ok[] __initconst = | ||
149 | KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n"; | ||
150 | static char const mismatch[] __initconst = "mismatch"; | ||
151 | char const *message; | ||
152 | |||
153 | if (!success) | ||
154 | message = msg_error; | ||
155 | else if (verbose) | ||
156 | message = msg_ok; | ||
157 | else | ||
158 | return success; | ||
159 | |||
160 | printk(message, pat, str, mismatch + 3*match); | ||
161 | return success; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * The tests are all jammed together in one array to make it simpler | ||
166 | * to place that array in the .init.rodata section. The obvious | ||
167 | * "array of structures containing char *" has no way to force the | ||
168 | * pointed-to strings to be in a particular section. | ||
169 | * | ||
170 | * Anyway, a test consists of: | ||
171 | * 1. Expected glob_match result: '1' or '0'. | ||
172 | * 2. Pattern to match: null-terminated string | ||
173 | * 3. String to match against: null-terminated string | ||
174 | * | ||
175 | * The list of tests is terminated with a final '\0' instead of | ||
176 | * a glob_match result character. | ||
177 | */ | ||
178 | static char const glob_tests[] __initconst = | ||
179 | /* Some basic tests */ | ||
180 | "1" "a\0" "a\0" | ||
181 | "0" "a\0" "b\0" | ||
182 | "0" "a\0" "aa\0" | ||
183 | "0" "a\0" "\0" | ||
184 | "1" "\0" "\0" | ||
185 | "0" "\0" "a\0" | ||
186 | /* Simple character class tests */ | ||
187 | "1" "[a]\0" "a\0" | ||
188 | "0" "[a]\0" "b\0" | ||
189 | "0" "[!a]\0" "a\0" | ||
190 | "1" "[!a]\0" "b\0" | ||
191 | "1" "[ab]\0" "a\0" | ||
192 | "1" "[ab]\0" "b\0" | ||
193 | "0" "[ab]\0" "c\0" | ||
194 | "1" "[!ab]\0" "c\0" | ||
195 | "1" "[a-c]\0" "b\0" | ||
196 | "0" "[a-c]\0" "d\0" | ||
197 | /* Corner cases in character class parsing */ | ||
198 | "1" "[a-c-e-g]\0" "-\0" | ||
199 | "0" "[a-c-e-g]\0" "d\0" | ||
200 | "1" "[a-c-e-g]\0" "f\0" | ||
201 | "1" "[]a-ceg-ik[]\0" "a\0" | ||
202 | "1" "[]a-ceg-ik[]\0" "]\0" | ||
203 | "1" "[]a-ceg-ik[]\0" "[\0" | ||
204 | "1" "[]a-ceg-ik[]\0" "h\0" | ||
205 | "0" "[]a-ceg-ik[]\0" "f\0" | ||
206 | "0" "[!]a-ceg-ik[]\0" "h\0" | ||
207 | "0" "[!]a-ceg-ik[]\0" "]\0" | ||
208 | "1" "[!]a-ceg-ik[]\0" "f\0" | ||
209 | /* Simple wild cards */ | ||
210 | "1" "?\0" "a\0" | ||
211 | "0" "?\0" "aa\0" | ||
212 | "0" "??\0" "a\0" | ||
213 | "1" "?x?\0" "axb\0" | ||
214 | "0" "?x?\0" "abx\0" | ||
215 | "0" "?x?\0" "xab\0" | ||
216 | /* Asterisk wild cards (backtracking) */ | ||
217 | "0" "*??\0" "a\0" | ||
218 | "1" "*??\0" "ab\0" | ||
219 | "1" "*??\0" "abc\0" | ||
220 | "1" "*??\0" "abcd\0" | ||
221 | "0" "??*\0" "a\0" | ||
222 | "1" "??*\0" "ab\0" | ||
223 | "1" "??*\0" "abc\0" | ||
224 | "1" "??*\0" "abcd\0" | ||
225 | "0" "?*?\0" "a\0" | ||
226 | "1" "?*?\0" "ab\0" | ||
227 | "1" "?*?\0" "abc\0" | ||
228 | "1" "?*?\0" "abcd\0" | ||
229 | "1" "*b\0" "b\0" | ||
230 | "1" "*b\0" "ab\0" | ||
231 | "0" "*b\0" "ba\0" | ||
232 | "1" "*b\0" "bb\0" | ||
233 | "1" "*b\0" "abb\0" | ||
234 | "1" "*b\0" "bab\0" | ||
235 | "1" "*bc\0" "abbc\0" | ||
236 | "1" "*bc\0" "bc\0" | ||
237 | "1" "*bc\0" "bbc\0" | ||
238 | "1" "*bc\0" "bcbc\0" | ||
239 | /* Multiple asterisks (complex backtracking) */ | ||
240 | "1" "*ac*\0" "abacadaeafag\0" | ||
241 | "1" "*ac*ae*ag*\0" "abacadaeafag\0" | ||
242 | "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0" | ||
243 | "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0" | ||
244 | "1" "*abcd*\0" "abcabcabcabcdefg\0" | ||
245 | "1" "*ab*cd*\0" "abcabcabcabcdefg\0" | ||
246 | "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0" | ||
247 | "0" "*abcd*\0" "abcabcabcabcefg\0" | ||
248 | "0" "*ab*cd*\0" "abcabcabcabcefg\0"; | ||
249 | |||
250 | static int __init glob_init(void) | ||
251 | { | ||
252 | unsigned successes = 0; | ||
253 | unsigned n = 0; | ||
254 | char const *p = glob_tests; | ||
255 | static char const message[] __initconst = | ||
256 | KERN_INFO "glob: %u self-tests passed, %u failed\n"; | ||
257 | |||
258 | /* | ||
259 | * Tests are jammed together in a string. The first byte is '1' | ||
260 | * or '0' to indicate the expected outcome, or '\0' to indicate the | ||
261 | * end of the tests. Then come two null-terminated strings: the | ||
262 | * pattern and the string to match it against. | ||
263 | */ | ||
264 | while (*p) { | ||
265 | bool expected = *p++ & 1; | ||
266 | char const *pat = p; | ||
267 | |||
268 | p += strlen(p) + 1; | ||
269 | successes += test(pat, p, expected); | ||
270 | p += strlen(p) + 1; | ||
271 | n++; | ||
272 | } | ||
273 | |||
274 | n -= successes; | ||
275 | printk(message, successes, n); | ||
276 | |||
277 | /* What's the errno for "kernel bug detected"? Guess... */ | ||
278 | return n ? -ECANCELED : 0; | ||
279 | } | ||
280 | |||
281 | /* We need a dummy exit function to allow unload */ | ||
282 | static void __exit glob_fini(void) { } | ||
283 | |||
284 | module_init(glob_init); | ||
285 | module_exit(glob_fini); | ||
286 | |||
287 | #endif /* CONFIG_GLOB_SELFTEST */ | ||
diff --git a/lib/globtest.c b/lib/globtest.c new file mode 100644 index 000000000000..d8e97d43b905 --- /dev/null +++ b/lib/globtest.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Extracted fronm glob.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/module.h> | ||
6 | #include <linux/moduleparam.h> | ||
7 | #include <linux/glob.h> | ||
8 | #include <linux/printk.h> | ||
9 | |||
10 | /* Boot with "glob.verbose=1" to show successful tests, too */ | ||
11 | static bool verbose = false; | ||
12 | module_param(verbose, bool, 0); | ||
13 | |||
14 | struct glob_test { | ||
15 | char const *pat, *str; | ||
16 | bool expected; | ||
17 | }; | ||
18 | |||
19 | static bool __pure __init test(char const *pat, char const *str, bool expected) | ||
20 | { | ||
21 | bool match = glob_match(pat, str); | ||
22 | bool success = match == expected; | ||
23 | |||
24 | /* Can't get string literals into a particular section, so... */ | ||
25 | static char const msg_error[] __initconst = | ||
26 | KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n"; | ||
27 | static char const msg_ok[] __initconst = | ||
28 | KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n"; | ||
29 | static char const mismatch[] __initconst = "mismatch"; | ||
30 | char const *message; | ||
31 | |||
32 | if (!success) | ||
33 | message = msg_error; | ||
34 | else if (verbose) | ||
35 | message = msg_ok; | ||
36 | else | ||
37 | return success; | ||
38 | |||
39 | printk(message, pat, str, mismatch + 3*match); | ||
40 | return success; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * The tests are all jammed together in one array to make it simpler | ||
45 | * to place that array in the .init.rodata section. The obvious | ||
46 | * "array of structures containing char *" has no way to force the | ||
47 | * pointed-to strings to be in a particular section. | ||
48 | * | ||
49 | * Anyway, a test consists of: | ||
50 | * 1. Expected glob_match result: '1' or '0'. | ||
51 | * 2. Pattern to match: null-terminated string | ||
52 | * 3. String to match against: null-terminated string | ||
53 | * | ||
54 | * The list of tests is terminated with a final '\0' instead of | ||
55 | * a glob_match result character. | ||
56 | */ | ||
57 | static char const glob_tests[] __initconst = | ||
58 | /* Some basic tests */ | ||
59 | "1" "a\0" "a\0" | ||
60 | "0" "a\0" "b\0" | ||
61 | "0" "a\0" "aa\0" | ||
62 | "0" "a\0" "\0" | ||
63 | "1" "\0" "\0" | ||
64 | "0" "\0" "a\0" | ||
65 | /* Simple character class tests */ | ||
66 | "1" "[a]\0" "a\0" | ||
67 | "0" "[a]\0" "b\0" | ||
68 | "0" "[!a]\0" "a\0" | ||
69 | "1" "[!a]\0" "b\0" | ||
70 | "1" "[ab]\0" "a\0" | ||
71 | "1" "[ab]\0" "b\0" | ||
72 | "0" "[ab]\0" "c\0" | ||
73 | "1" "[!ab]\0" "c\0" | ||
74 | "1" "[a-c]\0" "b\0" | ||
75 | "0" "[a-c]\0" "d\0" | ||
76 | /* Corner cases in character class parsing */ | ||
77 | "1" "[a-c-e-g]\0" "-\0" | ||
78 | "0" "[a-c-e-g]\0" "d\0" | ||
79 | "1" "[a-c-e-g]\0" "f\0" | ||
80 | "1" "[]a-ceg-ik[]\0" "a\0" | ||
81 | "1" "[]a-ceg-ik[]\0" "]\0" | ||
82 | "1" "[]a-ceg-ik[]\0" "[\0" | ||
83 | "1" "[]a-ceg-ik[]\0" "h\0" | ||
84 | "0" "[]a-ceg-ik[]\0" "f\0" | ||
85 | "0" "[!]a-ceg-ik[]\0" "h\0" | ||
86 | "0" "[!]a-ceg-ik[]\0" "]\0" | ||
87 | "1" "[!]a-ceg-ik[]\0" "f\0" | ||
88 | /* Simple wild cards */ | ||
89 | "1" "?\0" "a\0" | ||
90 | "0" "?\0" "aa\0" | ||
91 | "0" "??\0" "a\0" | ||
92 | "1" "?x?\0" "axb\0" | ||
93 | "0" "?x?\0" "abx\0" | ||
94 | "0" "?x?\0" "xab\0" | ||
95 | /* Asterisk wild cards (backtracking) */ | ||
96 | "0" "*??\0" "a\0" | ||
97 | "1" "*??\0" "ab\0" | ||
98 | "1" "*??\0" "abc\0" | ||
99 | "1" "*??\0" "abcd\0" | ||
100 | "0" "??*\0" "a\0" | ||
101 | "1" "??*\0" "ab\0" | ||
102 | "1" "??*\0" "abc\0" | ||
103 | "1" "??*\0" "abcd\0" | ||
104 | "0" "?*?\0" "a\0" | ||
105 | "1" "?*?\0" "ab\0" | ||
106 | "1" "?*?\0" "abc\0" | ||
107 | "1" "?*?\0" "abcd\0" | ||
108 | "1" "*b\0" "b\0" | ||
109 | "1" "*b\0" "ab\0" | ||
110 | "0" "*b\0" "ba\0" | ||
111 | "1" "*b\0" "bb\0" | ||
112 | "1" "*b\0" "abb\0" | ||
113 | "1" "*b\0" "bab\0" | ||
114 | "1" "*bc\0" "abbc\0" | ||
115 | "1" "*bc\0" "bc\0" | ||
116 | "1" "*bc\0" "bbc\0" | ||
117 | "1" "*bc\0" "bcbc\0" | ||
118 | /* Multiple asterisks (complex backtracking) */ | ||
119 | "1" "*ac*\0" "abacadaeafag\0" | ||
120 | "1" "*ac*ae*ag*\0" "abacadaeafag\0" | ||
121 | "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0" | ||
122 | "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0" | ||
123 | "1" "*abcd*\0" "abcabcabcabcdefg\0" | ||
124 | "1" "*ab*cd*\0" "abcabcabcabcdefg\0" | ||
125 | "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0" | ||
126 | "0" "*abcd*\0" "abcabcabcabcefg\0" | ||
127 | "0" "*ab*cd*\0" "abcabcabcabcefg\0"; | ||
128 | |||
129 | static int __init glob_init(void) | ||
130 | { | ||
131 | unsigned successes = 0; | ||
132 | unsigned n = 0; | ||
133 | char const *p = glob_tests; | ||
134 | static char const message[] __initconst = | ||
135 | KERN_INFO "glob: %u self-tests passed, %u failed\n"; | ||
136 | |||
137 | /* | ||
138 | * Tests are jammed together in a string. The first byte is '1' | ||
139 | * or '0' to indicate the expected outcome, or '\0' to indicate the | ||
140 | * end of the tests. Then come two null-terminated strings: the | ||
141 | * pattern and the string to match it against. | ||
142 | */ | ||
143 | while (*p) { | ||
144 | bool expected = *p++ & 1; | ||
145 | char const *pat = p; | ||
146 | |||
147 | p += strlen(p) + 1; | ||
148 | successes += test(pat, p, expected); | ||
149 | p += strlen(p) + 1; | ||
150 | n++; | ||
151 | } | ||
152 | |||
153 | n -= successes; | ||
154 | printk(message, successes, n); | ||
155 | |||
156 | /* What's the errno for "kernel bug detected"? Guess... */ | ||
157 | return n ? -ECANCELED : 0; | ||
158 | } | ||
159 | |||
160 | /* We need a dummy exit function to allow unload */ | ||
161 | static void __exit glob_fini(void) { } | ||
162 | |||
163 | module_init(glob_init); | ||
164 | module_exit(glob_fini); | ||
165 | |||
166 | MODULE_DESCRIPTION("glob(7) matching tests"); | ||
167 | MODULE_LICENSE("Dual MIT/GPL"); | ||
diff --git a/lib/list_debug.c b/lib/list_debug.c index 7f7bfa55eb6d..a34db8d27667 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -20,15 +20,16 @@ | |||
20 | bool __list_add_valid(struct list_head *new, struct list_head *prev, | 20 | bool __list_add_valid(struct list_head *new, struct list_head *prev, |
21 | struct list_head *next) | 21 | struct list_head *next) |
22 | { | 22 | { |
23 | CHECK_DATA_CORRUPTION(next->prev != prev, | 23 | if (CHECK_DATA_CORRUPTION(next->prev != prev, |
24 | "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", | 24 | "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", |
25 | prev, next->prev, next); | 25 | prev, next->prev, next) || |
26 | CHECK_DATA_CORRUPTION(prev->next != next, | 26 | CHECK_DATA_CORRUPTION(prev->next != next, |
27 | "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", | 27 | "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", |
28 | next, prev->next, prev); | 28 | next, prev->next, prev) || |
29 | CHECK_DATA_CORRUPTION(new == prev || new == next, | 29 | CHECK_DATA_CORRUPTION(new == prev || new == next, |
30 | "list_add double add: new=%p, prev=%p, next=%p.\n", | 30 | "list_add double add: new=%p, prev=%p, next=%p.\n", |
31 | new, prev, next); | 31 | new, prev, next)) |
32 | return false; | ||
32 | 33 | ||
33 | return true; | 34 | return true; |
34 | } | 35 | } |
@@ -41,18 +42,20 @@ bool __list_del_entry_valid(struct list_head *entry) | |||
41 | prev = entry->prev; | 42 | prev = entry->prev; |
42 | next = entry->next; | 43 | next = entry->next; |
43 | 44 | ||
44 | CHECK_DATA_CORRUPTION(next == LIST_POISON1, | 45 | if (CHECK_DATA_CORRUPTION(next == LIST_POISON1, |
45 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", | 46 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", |
46 | entry, LIST_POISON1); | 47 | entry, LIST_POISON1) || |
47 | CHECK_DATA_CORRUPTION(prev == LIST_POISON2, | 48 | CHECK_DATA_CORRUPTION(prev == LIST_POISON2, |
48 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", | 49 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", |
49 | entry, LIST_POISON2); | 50 | entry, LIST_POISON2) || |
50 | CHECK_DATA_CORRUPTION(prev->next != entry, | 51 | CHECK_DATA_CORRUPTION(prev->next != entry, |
51 | "list_del corruption. prev->next should be %p, but was %p\n", | 52 | "list_del corruption. prev->next should be %p, but was %p\n", |
52 | entry, prev->next); | 53 | entry, prev->next) || |
53 | CHECK_DATA_CORRUPTION(next->prev != entry, | 54 | CHECK_DATA_CORRUPTION(next->prev != entry, |
54 | "list_del corruption. next->prev should be %p, but was %p\n", | 55 | "list_del corruption. next->prev should be %p, but was %p\n", |
55 | entry, next->prev); | 56 | entry, next->prev)) |
57 | return false; | ||
58 | |||
56 | return true; | 59 | return true; |
57 | 60 | ||
58 | } | 61 | } |
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile index 8085d04e9309..f7b113271d13 100644 --- a/lib/lz4/Makefile +++ b/lib/lz4/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | ccflags-y += -O3 | ||
2 | |||
1 | obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o | 3 | obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o |
2 | obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o | 4 | obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o |
3 | obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o | 5 | obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o |
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c index 28321d8f75ef..cc7b6d4cc7c7 100644 --- a/lib/lz4/lz4_compress.c +++ b/lib/lz4/lz4_compress.c | |||
@@ -1,19 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * LZ4 - Fast LZ compression algorithm | 2 | * LZ4 - Fast LZ compression algorithm |
3 | * Copyright (C) 2011-2012, Yann Collet. | 3 | * Copyright (C) 2011 - 2016, Yann Collet. |
4 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | 4 | * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) |
5 | |||
6 | * Redistribution and use in source and binary forms, with or without | 5 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions are | 6 | * modification, are permitted provided that the following conditions are |
8 | * met: | 7 | * met: |
9 | * | 8 | * * Redistributions of source code must retain the above copyright |
10 | * * Redistributions of source code must retain the above copyright | 9 | * notice, this list of conditions and the following disclaimer. |
11 | * notice, this list of conditions and the following disclaimer. | 10 | * * Redistributions in binary form must reproduce the above |
12 | * * Redistributions in binary form must reproduce the above | ||
13 | * copyright notice, this list of conditions and the following disclaimer | 11 | * copyright notice, this list of conditions and the following disclaimer |
14 | * in the documentation and/or other materials provided with the | 12 | * in the documentation and/or other materials provided with the |
15 | * distribution. | 13 | * distribution. |
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 14 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
18 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 15 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
19 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 16 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
@@ -25,419 +22,919 @@ | |||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | * | ||
29 | * You can contact the author at : | 25 | * You can contact the author at : |
30 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | 26 | * - LZ4 homepage : http://www.lz4.org |
31 | * - LZ4 source repository : http://code.google.com/p/lz4/ | 27 | * - LZ4 source repository : https://github.com/lz4/lz4 |
32 | * | 28 | * |
33 | * Changed for kernel use by: | 29 | * Changed for kernel usage by: |
34 | * Chanho Min <chanho.min@lge.com> | 30 | * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
35 | */ | 31 | */ |
36 | 32 | ||
33 | /*-************************************ | ||
34 | * Dependencies | ||
35 | **************************************/ | ||
36 | #include <linux/lz4.h> | ||
37 | #include "lz4defs.h" | ||
37 | #include <linux/module.h> | 38 | #include <linux/module.h> |
38 | #include <linux/kernel.h> | 39 | #include <linux/kernel.h> |
39 | #include <linux/lz4.h> | ||
40 | #include <asm/unaligned.h> | 40 | #include <asm/unaligned.h> |
41 | #include "lz4defs.h" | ||
42 | 41 | ||
43 | /* | 42 | static const int LZ4_minLength = (MFLIMIT + 1); |
44 | * LZ4_compressCtx : | 43 | static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1)); |
45 | * ----------------- | 44 | |
46 | * Compress 'isize' bytes from 'source' into an output buffer 'dest' of | 45 | /*-****************************** |
47 | * maximum size 'maxOutputSize'. * If it cannot achieve it, compression | 46 | * Compression functions |
48 | * will stop, and result of the function will be zero. | 47 | ********************************/ |
49 | * return : the number of bytes written in buffer 'dest', or 0 if the | 48 | static FORCE_INLINE U32 LZ4_hash4( |
50 | * compression fails | 49 | U32 sequence, |
51 | */ | 50 | tableType_t const tableType) |
52 | static inline int lz4_compressctx(void *ctx, | ||
53 | const char *source, | ||
54 | char *dest, | ||
55 | int isize, | ||
56 | int maxoutputsize) | ||
57 | { | 51 | { |
58 | HTYPE *hashtable = (HTYPE *)ctx; | 52 | if (tableType == byU16) |
59 | const u8 *ip = (u8 *)source; | 53 | return ((sequence * 2654435761U) |
60 | #if LZ4_ARCH64 | 54 | >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1))); |
61 | const BYTE * const base = ip; | 55 | else |
56 | return ((sequence * 2654435761U) | ||
57 | >> ((MINMATCH * 8) - LZ4_HASHLOG)); | ||
58 | } | ||
59 | |||
60 | static FORCE_INLINE U32 LZ4_hash5( | ||
61 | U64 sequence, | ||
62 | tableType_t const tableType) | ||
63 | { | ||
64 | const U32 hashLog = (tableType == byU16) | ||
65 | ? LZ4_HASHLOG + 1 | ||
66 | : LZ4_HASHLOG; | ||
67 | |||
68 | #if LZ4_LITTLE_ENDIAN | ||
69 | static const U64 prime5bytes = 889523592379ULL; | ||
70 | |||
71 | return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); | ||
62 | #else | 72 | #else |
63 | const int base = 0; | 73 | static const U64 prime8bytes = 11400714785074694791ULL; |
74 | |||
75 | return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); | ||
64 | #endif | 76 | #endif |
65 | const u8 *anchor = ip; | 77 | } |
66 | const u8 *const iend = ip + isize; | 78 | |
67 | const u8 *const mflimit = iend - MFLIMIT; | 79 | static FORCE_INLINE U32 LZ4_hashPosition( |
68 | #define MATCHLIMIT (iend - LASTLITERALS) | 80 | const void *p, |
69 | 81 | tableType_t const tableType) | |
70 | u8 *op = (u8 *) dest; | 82 | { |
71 | u8 *const oend = op + maxoutputsize; | 83 | #if LZ4_ARCH64 |
72 | int length; | 84 | if (tableType == byU32) |
73 | const int skipstrength = SKIPSTRENGTH; | 85 | return LZ4_hash5(LZ4_read_ARCH(p), tableType); |
74 | u32 forwardh; | 86 | #endif |
75 | int lastrun; | 87 | |
76 | 88 | return LZ4_hash4(LZ4_read32(p), tableType); | |
77 | /* Init */ | 89 | } |
78 | if (isize < MINLENGTH) | 90 | |
79 | goto _last_literals; | 91 | static void LZ4_putPositionOnHash( |
92 | const BYTE *p, | ||
93 | U32 h, | ||
94 | void *tableBase, | ||
95 | tableType_t const tableType, | ||
96 | const BYTE *srcBase) | ||
97 | { | ||
98 | switch (tableType) { | ||
99 | case byPtr: | ||
100 | { | ||
101 | const BYTE **hashTable = (const BYTE **)tableBase; | ||
102 | |||
103 | hashTable[h] = p; | ||
104 | return; | ||
105 | } | ||
106 | case byU32: | ||
107 | { | ||
108 | U32 *hashTable = (U32 *) tableBase; | ||
109 | |||
110 | hashTable[h] = (U32)(p - srcBase); | ||
111 | return; | ||
112 | } | ||
113 | case byU16: | ||
114 | { | ||
115 | U16 *hashTable = (U16 *) tableBase; | ||
116 | |||
117 | hashTable[h] = (U16)(p - srcBase); | ||
118 | return; | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static FORCE_INLINE void LZ4_putPosition( | ||
124 | const BYTE *p, | ||
125 | void *tableBase, | ||
126 | tableType_t tableType, | ||
127 | const BYTE *srcBase) | ||
128 | { | ||
129 | U32 const h = LZ4_hashPosition(p, tableType); | ||
130 | |||
131 | LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); | ||
132 | } | ||
133 | |||
134 | static const BYTE *LZ4_getPositionOnHash( | ||
135 | U32 h, | ||
136 | void *tableBase, | ||
137 | tableType_t tableType, | ||
138 | const BYTE *srcBase) | ||
139 | { | ||
140 | if (tableType == byPtr) { | ||
141 | const BYTE **hashTable = (const BYTE **) tableBase; | ||
142 | |||
143 | return hashTable[h]; | ||
144 | } | ||
145 | |||
146 | if (tableType == byU32) { | ||
147 | const U32 * const hashTable = (U32 *) tableBase; | ||
148 | |||
149 | return hashTable[h] + srcBase; | ||
150 | } | ||
151 | |||
152 | { | ||
153 | /* default, to ensure a return */ | ||
154 | const U16 * const hashTable = (U16 *) tableBase; | ||
155 | |||
156 | return hashTable[h] + srcBase; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static FORCE_INLINE const BYTE *LZ4_getPosition( | ||
161 | const BYTE *p, | ||
162 | void *tableBase, | ||
163 | tableType_t tableType, | ||
164 | const BYTE *srcBase) | ||
165 | { | ||
166 | U32 const h = LZ4_hashPosition(p, tableType); | ||
167 | |||
168 | return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); | ||
169 | } | ||
80 | 170 | ||
81 | memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); | 171 | |
172 | /* | ||
173 | * LZ4_compress_generic() : | ||
174 | * inlined, to ensure branches are decided at compilation time | ||
175 | */ | ||
176 | static FORCE_INLINE int LZ4_compress_generic( | ||
177 | LZ4_stream_t_internal * const dictPtr, | ||
178 | const char * const source, | ||
179 | char * const dest, | ||
180 | const int inputSize, | ||
181 | const int maxOutputSize, | ||
182 | const limitedOutput_directive outputLimited, | ||
183 | const tableType_t tableType, | ||
184 | const dict_directive dict, | ||
185 | const dictIssue_directive dictIssue, | ||
186 | const U32 acceleration) | ||
187 | { | ||
188 | const BYTE *ip = (const BYTE *) source; | ||
189 | const BYTE *base; | ||
190 | const BYTE *lowLimit; | ||
191 | const BYTE * const lowRefLimit = ip - dictPtr->dictSize; | ||
192 | const BYTE * const dictionary = dictPtr->dictionary; | ||
193 | const BYTE * const dictEnd = dictionary + dictPtr->dictSize; | ||
194 | const size_t dictDelta = dictEnd - (const BYTE *)source; | ||
195 | const BYTE *anchor = (const BYTE *) source; | ||
196 | const BYTE * const iend = ip + inputSize; | ||
197 | const BYTE * const mflimit = iend - MFLIMIT; | ||
198 | const BYTE * const matchlimit = iend - LASTLITERALS; | ||
199 | |||
200 | BYTE *op = (BYTE *) dest; | ||
201 | BYTE * const olimit = op + maxOutputSize; | ||
202 | |||
203 | U32 forwardH; | ||
204 | size_t refDelta = 0; | ||
205 | |||
206 | /* Init conditions */ | ||
207 | if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { | ||
208 | /* Unsupported inputSize, too large (or negative) */ | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | switch (dict) { | ||
213 | case noDict: | ||
214 | default: | ||
215 | base = (const BYTE *)source; | ||
216 | lowLimit = (const BYTE *)source; | ||
217 | break; | ||
218 | case withPrefix64k: | ||
219 | base = (const BYTE *)source - dictPtr->currentOffset; | ||
220 | lowLimit = (const BYTE *)source - dictPtr->dictSize; | ||
221 | break; | ||
222 | case usingExtDict: | ||
223 | base = (const BYTE *)source - dictPtr->currentOffset; | ||
224 | lowLimit = (const BYTE *)source; | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | if ((tableType == byU16) | ||
229 | && (inputSize >= LZ4_64Klimit)) { | ||
230 | /* Size too large (not within 64K limit) */ | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | if (inputSize < LZ4_minLength) { | ||
235 | /* Input too small, no compression (all literals) */ | ||
236 | goto _last_literals; | ||
237 | } | ||
82 | 238 | ||
83 | /* First Byte */ | 239 | /* First Byte */ |
84 | hashtable[LZ4_HASH_VALUE(ip)] = ip - base; | 240 | LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); |
85 | ip++; | 241 | ip++; |
86 | forwardh = LZ4_HASH_VALUE(ip); | 242 | forwardH = LZ4_hashPosition(ip, tableType); |
87 | 243 | ||
88 | /* Main Loop */ | 244 | /* Main Loop */ |
89 | for (;;) { | 245 | for ( ; ; ) { |
90 | int findmatchattempts = (1U << skipstrength) + 3; | 246 | const BYTE *match; |
91 | const u8 *forwardip = ip; | 247 | BYTE *token; |
92 | const u8 *ref; | ||
93 | u8 *token; | ||
94 | 248 | ||
95 | /* Find a match */ | 249 | /* Find a match */ |
96 | do { | 250 | { |
97 | u32 h = forwardh; | 251 | const BYTE *forwardIp = ip; |
98 | int step = findmatchattempts++ >> skipstrength; | 252 | unsigned int step = 1; |
99 | ip = forwardip; | 253 | unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER; |
100 | forwardip = ip + step; | 254 | |
101 | 255 | do { | |
102 | if (unlikely(forwardip > mflimit)) | 256 | U32 const h = forwardH; |
103 | goto _last_literals; | 257 | |
104 | 258 | ip = forwardIp; | |
105 | forwardh = LZ4_HASH_VALUE(forwardip); | 259 | forwardIp += step; |
106 | ref = base + hashtable[h]; | 260 | step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); |
107 | hashtable[h] = ip - base; | 261 | |
108 | } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); | 262 | if (unlikely(forwardIp > mflimit)) |
263 | goto _last_literals; | ||
264 | |||
265 | match = LZ4_getPositionOnHash(h, | ||
266 | dictPtr->hashTable, | ||
267 | tableType, base); | ||
268 | |||
269 | if (dict == usingExtDict) { | ||
270 | if (match < (const BYTE *)source) { | ||
271 | refDelta = dictDelta; | ||
272 | lowLimit = dictionary; | ||
273 | } else { | ||
274 | refDelta = 0; | ||
275 | lowLimit = (const BYTE *)source; | ||
276 | } } | ||
277 | |||
278 | forwardH = LZ4_hashPosition(forwardIp, | ||
279 | tableType); | ||
280 | |||
281 | LZ4_putPositionOnHash(ip, h, dictPtr->hashTable, | ||
282 | tableType, base); | ||
283 | } while (((dictIssue == dictSmall) | ||
284 | ? (match < lowRefLimit) | ||
285 | : 0) | ||
286 | || ((tableType == byU16) | ||
287 | ? 0 | ||
288 | : (match + MAX_DISTANCE < ip)) | ||
289 | || (LZ4_read32(match + refDelta) | ||
290 | != LZ4_read32(ip))); | ||
291 | } | ||
109 | 292 | ||
110 | /* Catch up */ | 293 | /* Catch up */ |
111 | while ((ip > anchor) && (ref > (u8 *)source) && | 294 | while (((ip > anchor) & (match + refDelta > lowLimit)) |
112 | unlikely(ip[-1] == ref[-1])) { | 295 | && (unlikely(ip[-1] == match[refDelta - 1]))) { |
113 | ip--; | 296 | ip--; |
114 | ref--; | 297 | match--; |
115 | } | 298 | } |
116 | 299 | ||
117 | /* Encode Literal length */ | 300 | /* Encode Literals */ |
118 | length = (int)(ip - anchor); | 301 | { |
119 | token = op++; | 302 | unsigned const int litLength = (unsigned int)(ip - anchor); |
120 | /* check output limit */ | ||
121 | if (unlikely(op + length + (2 + 1 + LASTLITERALS) + | ||
122 | (length >> 8) > oend)) | ||
123 | return 0; | ||
124 | 303 | ||
125 | if (length >= (int)RUN_MASK) { | 304 | token = op++; |
126 | int len; | 305 | |
127 | *token = (RUN_MASK << ML_BITS); | 306 | if ((outputLimited) && |
128 | len = length - RUN_MASK; | 307 | /* Check output buffer overflow */ |
129 | for (; len > 254 ; len -= 255) | 308 | (unlikely(op + litLength + |
130 | *op++ = 255; | 309 | (2 + 1 + LASTLITERALS) + |
131 | *op++ = (u8)len; | 310 | (litLength / 255) > olimit))) |
132 | } else | 311 | return 0; |
133 | *token = (length << ML_BITS); | 312 | |
313 | if (litLength >= RUN_MASK) { | ||
314 | int len = (int)litLength - RUN_MASK; | ||
315 | |||
316 | *token = (RUN_MASK << ML_BITS); | ||
317 | |||
318 | for (; len >= 255; len -= 255) | ||
319 | *op++ = 255; | ||
320 | *op++ = (BYTE)len; | ||
321 | } else | ||
322 | *token = (BYTE)(litLength << ML_BITS); | ||
323 | |||
324 | /* Copy Literals */ | ||
325 | LZ4_wildCopy(op, anchor, op + litLength); | ||
326 | op += litLength; | ||
327 | } | ||
134 | 328 | ||
135 | /* Copy Literals */ | ||
136 | LZ4_BLINDCOPY(anchor, op, length); | ||
137 | _next_match: | 329 | _next_match: |
138 | /* Encode Offset */ | 330 | /* Encode Offset */ |
139 | LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); | 331 | LZ4_writeLE16(op, (U16)(ip - match)); |
332 | op += 2; | ||
140 | 333 | ||
141 | /* Start Counting */ | ||
142 | ip += MINMATCH; | ||
143 | /* MinMatch verified */ | ||
144 | ref += MINMATCH; | ||
145 | anchor = ip; | ||
146 | while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) { | ||
147 | #if LZ4_ARCH64 | ||
148 | u64 diff = A64(ref) ^ A64(ip); | ||
149 | #else | ||
150 | u32 diff = A32(ref) ^ A32(ip); | ||
151 | #endif | ||
152 | if (!diff) { | ||
153 | ip += STEPSIZE; | ||
154 | ref += STEPSIZE; | ||
155 | continue; | ||
156 | } | ||
157 | ip += LZ4_NBCOMMONBYTES(diff); | ||
158 | goto _endcount; | ||
159 | } | ||
160 | #if LZ4_ARCH64 | ||
161 | if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { | ||
162 | ip += 4; | ||
163 | ref += 4; | ||
164 | } | ||
165 | #endif | ||
166 | if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { | ||
167 | ip += 2; | ||
168 | ref += 2; | ||
169 | } | ||
170 | if ((ip < MATCHLIMIT) && (*ref == *ip)) | ||
171 | ip++; | ||
172 | _endcount: | ||
173 | /* Encode MatchLength */ | 334 | /* Encode MatchLength */ |
174 | length = (int)(ip - anchor); | 335 | { |
175 | /* Check output limit */ | 336 | unsigned int matchCode; |
176 | if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend)) | 337 | |
177 | return 0; | 338 | if ((dict == usingExtDict) |
178 | if (length >= (int)ML_MASK) { | 339 | && (lowLimit == dictionary)) { |
179 | *token += ML_MASK; | 340 | const BYTE *limit; |
180 | length -= ML_MASK; | 341 | |
181 | for (; length > 509 ; length -= 510) { | 342 | match += refDelta; |
182 | *op++ = 255; | 343 | limit = ip + (dictEnd - match); |
183 | *op++ = 255; | 344 | |
184 | } | 345 | if (limit > matchlimit) |
185 | if (length > 254) { | 346 | limit = matchlimit; |
186 | length -= 255; | 347 | |
187 | *op++ = 255; | 348 | matchCode = LZ4_count(ip + MINMATCH, |
349 | match + MINMATCH, limit); | ||
350 | |||
351 | ip += MINMATCH + matchCode; | ||
352 | |||
353 | if (ip == limit) { | ||
354 | unsigned const int more = LZ4_count(ip, | ||
355 | (const BYTE *)source, | ||
356 | matchlimit); | ||
357 | |||
358 | matchCode += more; | ||
359 | ip += more; | ||
360 | } | ||
361 | } else { | ||
362 | matchCode = LZ4_count(ip + MINMATCH, | ||
363 | match + MINMATCH, matchlimit); | ||
364 | ip += MINMATCH + matchCode; | ||
188 | } | 365 | } |
189 | *op++ = (u8)length; | 366 | |
190 | } else | 367 | if (outputLimited && |
191 | *token += length; | 368 | /* Check output buffer overflow */ |
369 | (unlikely(op + | ||
370 | (1 + LASTLITERALS) + | ||
371 | (matchCode >> 8) > olimit))) | ||
372 | return 0; | ||
373 | |||
374 | if (matchCode >= ML_MASK) { | ||
375 | *token += ML_MASK; | ||
376 | matchCode -= ML_MASK; | ||
377 | LZ4_write32(op, 0xFFFFFFFF); | ||
378 | |||
379 | while (matchCode >= 4 * 255) { | ||
380 | op += 4; | ||
381 | LZ4_write32(op, 0xFFFFFFFF); | ||
382 | matchCode -= 4 * 255; | ||
383 | } | ||
384 | |||
385 | op += matchCode / 255; | ||
386 | *op++ = (BYTE)(matchCode % 255); | ||
387 | } else | ||
388 | *token += (BYTE)(matchCode); | ||
389 | } | ||
390 | |||
391 | anchor = ip; | ||
192 | 392 | ||
193 | /* Test end of chunk */ | 393 | /* Test end of chunk */ |
194 | if (ip > mflimit) { | 394 | if (ip > mflimit) |
195 | anchor = ip; | ||
196 | break; | 395 | break; |
197 | } | ||
198 | 396 | ||
199 | /* Fill table */ | 397 | /* Fill table */ |
200 | hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; | 398 | LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base); |
201 | 399 | ||
202 | /* Test next position */ | 400 | /* Test next position */ |
203 | ref = base + hashtable[LZ4_HASH_VALUE(ip)]; | 401 | match = LZ4_getPosition(ip, dictPtr->hashTable, |
204 | hashtable[LZ4_HASH_VALUE(ip)] = ip - base; | 402 | tableType, base); |
205 | if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { | 403 | |
404 | if (dict == usingExtDict) { | ||
405 | if (match < (const BYTE *)source) { | ||
406 | refDelta = dictDelta; | ||
407 | lowLimit = dictionary; | ||
408 | } else { | ||
409 | refDelta = 0; | ||
410 | lowLimit = (const BYTE *)source; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); | ||
415 | |||
416 | if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) | ||
417 | && (match + MAX_DISTANCE >= ip) | ||
418 | && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) { | ||
206 | token = op++; | 419 | token = op++; |
207 | *token = 0; | 420 | *token = 0; |
208 | goto _next_match; | 421 | goto _next_match; |
209 | } | 422 | } |
210 | 423 | ||
211 | /* Prepare next loop */ | 424 | /* Prepare next loop */ |
212 | anchor = ip++; | 425 | forwardH = LZ4_hashPosition(++ip, tableType); |
213 | forwardh = LZ4_HASH_VALUE(ip); | ||
214 | } | 426 | } |
215 | 427 | ||
216 | _last_literals: | 428 | _last_literals: |
217 | /* Encode Last Literals */ | 429 | /* Encode Last Literals */ |
218 | lastrun = (int)(iend - anchor); | 430 | { |
219 | if (((char *)op - dest) + lastrun + 1 | 431 | size_t const lastRun = (size_t)(iend - anchor); |
220 | + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize) | 432 | |
221 | return 0; | 433 | if ((outputLimited) && |
434 | /* Check output buffer overflow */ | ||
435 | ((op - (BYTE *)dest) + lastRun + 1 + | ||
436 | ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize)) | ||
437 | return 0; | ||
438 | |||
439 | if (lastRun >= RUN_MASK) { | ||
440 | size_t accumulator = lastRun - RUN_MASK; | ||
441 | *op++ = RUN_MASK << ML_BITS; | ||
442 | for (; accumulator >= 255; accumulator -= 255) | ||
443 | *op++ = 255; | ||
444 | *op++ = (BYTE) accumulator; | ||
445 | } else { | ||
446 | *op++ = (BYTE)(lastRun << ML_BITS); | ||
447 | } | ||
222 | 448 | ||
223 | if (lastrun >= (int)RUN_MASK) { | 449 | memcpy(op, anchor, lastRun); |
224 | *op++ = (RUN_MASK << ML_BITS); | 450 | |
225 | lastrun -= RUN_MASK; | 451 | op += lastRun; |
226 | for (; lastrun > 254 ; lastrun -= 255) | 452 | } |
227 | *op++ = 255; | ||
228 | *op++ = (u8)lastrun; | ||
229 | } else | ||
230 | *op++ = (lastrun << ML_BITS); | ||
231 | memcpy(op, anchor, iend - anchor); | ||
232 | op += iend - anchor; | ||
233 | 453 | ||
234 | /* End */ | 454 | /* End */ |
235 | return (int)(((char *)op) - dest); | 455 | return (int) (((char *)op) - dest); |
236 | } | 456 | } |
237 | 457 | ||
238 | static inline int lz4_compress64kctx(void *ctx, | 458 | static int LZ4_compress_fast_extState( |
239 | const char *source, | 459 | void *state, |
240 | char *dest, | 460 | const char *source, |
241 | int isize, | 461 | char *dest, |
242 | int maxoutputsize) | 462 | int inputSize, |
463 | int maxOutputSize, | ||
464 | int acceleration) | ||
243 | { | 465 | { |
244 | u16 *hashtable = (u16 *)ctx; | 466 | LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse; |
245 | const u8 *ip = (u8 *) source; | 467 | #if LZ4_ARCH64 |
246 | const u8 *anchor = ip; | 468 | const tableType_t tableType = byU32; |
247 | const u8 *const base = ip; | 469 | #else |
248 | const u8 *const iend = ip + isize; | 470 | const tableType_t tableType = byPtr; |
249 | const u8 *const mflimit = iend - MFLIMIT; | 471 | #endif |
250 | #define MATCHLIMIT (iend - LASTLITERALS) | 472 | |
251 | 473 | LZ4_resetStream((LZ4_stream_t *)state); | |
252 | u8 *op = (u8 *) dest; | 474 | |
253 | u8 *const oend = op + maxoutputsize; | 475 | if (acceleration < 1) |
254 | int len, length; | 476 | acceleration = LZ4_ACCELERATION_DEFAULT; |
255 | const int skipstrength = SKIPSTRENGTH; | 477 | |
256 | u32 forwardh; | 478 | if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) { |
257 | int lastrun; | 479 | if (inputSize < LZ4_64Klimit) |
258 | 480 | return LZ4_compress_generic(ctx, source, | |
259 | /* Init */ | 481 | dest, inputSize, 0, |
260 | if (isize < MINLENGTH) | 482 | noLimit, byU16, noDict, |
261 | goto _last_literals; | 483 | noDictIssue, acceleration); |
484 | else | ||
485 | return LZ4_compress_generic(ctx, source, | ||
486 | dest, inputSize, 0, | ||
487 | noLimit, tableType, noDict, | ||
488 | noDictIssue, acceleration); | ||
489 | } else { | ||
490 | if (inputSize < LZ4_64Klimit) | ||
491 | return LZ4_compress_generic(ctx, source, | ||
492 | dest, inputSize, | ||
493 | maxOutputSize, limitedOutput, byU16, noDict, | ||
494 | noDictIssue, acceleration); | ||
495 | else | ||
496 | return LZ4_compress_generic(ctx, source, | ||
497 | dest, inputSize, | ||
498 | maxOutputSize, limitedOutput, tableType, noDict, | ||
499 | noDictIssue, acceleration); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | int LZ4_compress_fast(const char *source, char *dest, int inputSize, | ||
504 | int maxOutputSize, int acceleration, void *wrkmem) | ||
505 | { | ||
506 | return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize, | ||
507 | maxOutputSize, acceleration); | ||
508 | } | ||
509 | EXPORT_SYMBOL(LZ4_compress_fast); | ||
262 | 510 | ||
263 | memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); | 511 | int LZ4_compress_default(const char *source, char *dest, int inputSize, |
512 | int maxOutputSize, void *wrkmem) | ||
513 | { | ||
514 | return LZ4_compress_fast(source, dest, inputSize, | ||
515 | maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem); | ||
516 | } | ||
517 | EXPORT_SYMBOL(LZ4_compress_default); | ||
518 | |||
519 | /*-****************************** | ||
520 | * *_destSize() variant | ||
521 | ********************************/ | ||
522 | static int LZ4_compress_destSize_generic( | ||
523 | LZ4_stream_t_internal * const ctx, | ||
524 | const char * const src, | ||
525 | char * const dst, | ||
526 | int * const srcSizePtr, | ||
527 | const int targetDstSize, | ||
528 | const tableType_t tableType) | ||
529 | { | ||
530 | const BYTE *ip = (const BYTE *) src; | ||
531 | const BYTE *base = (const BYTE *) src; | ||
532 | const BYTE *lowLimit = (const BYTE *) src; | ||
533 | const BYTE *anchor = ip; | ||
534 | const BYTE * const iend = ip + *srcSizePtr; | ||
535 | const BYTE * const mflimit = iend - MFLIMIT; | ||
536 | const BYTE * const matchlimit = iend - LASTLITERALS; | ||
537 | |||
538 | BYTE *op = (BYTE *) dst; | ||
539 | BYTE * const oend = op + targetDstSize; | ||
540 | BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */ | ||
541 | - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */; | ||
542 | BYTE * const oMaxMatch = op + targetDstSize | ||
543 | - (LASTLITERALS + 1 /* token */); | ||
544 | BYTE * const oMaxSeq = oMaxLit - 1 /* token */; | ||
545 | |||
546 | U32 forwardH; | ||
547 | |||
548 | /* Init conditions */ | ||
549 | /* Impossible to store anything */ | ||
550 | if (targetDstSize < 1) | ||
551 | return 0; | ||
552 | /* Unsupported input size, too large (or negative) */ | ||
553 | if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) | ||
554 | return 0; | ||
555 | /* Size too large (not within 64K limit) */ | ||
556 | if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit)) | ||
557 | return 0; | ||
558 | /* Input too small, no compression (all literals) */ | ||
559 | if (*srcSizePtr < LZ4_minLength) | ||
560 | goto _last_literals; | ||
264 | 561 | ||
265 | /* First Byte */ | 562 | /* First Byte */ |
266 | ip++; | 563 | *srcSizePtr = 0; |
267 | forwardh = LZ4_HASH64K_VALUE(ip); | 564 | LZ4_putPosition(ip, ctx->hashTable, tableType, base); |
565 | ip++; forwardH = LZ4_hashPosition(ip, tableType); | ||
268 | 566 | ||
269 | /* Main Loop */ | 567 | /* Main Loop */ |
270 | for (;;) { | 568 | for ( ; ; ) { |
271 | int findmatchattempts = (1U << skipstrength) + 3; | 569 | const BYTE *match; |
272 | const u8 *forwardip = ip; | 570 | BYTE *token; |
273 | const u8 *ref; | ||
274 | u8 *token; | ||
275 | 571 | ||
276 | /* Find a match */ | 572 | /* Find a match */ |
277 | do { | 573 | { |
278 | u32 h = forwardh; | 574 | const BYTE *forwardIp = ip; |
279 | int step = findmatchattempts++ >> skipstrength; | 575 | unsigned int step = 1; |
280 | ip = forwardip; | 576 | unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER; |
281 | forwardip = ip + step; | 577 | |
282 | 578 | do { | |
283 | if (forwardip > mflimit) | 579 | U32 h = forwardH; |
284 | goto _last_literals; | 580 | |
285 | 581 | ip = forwardIp; | |
286 | forwardh = LZ4_HASH64K_VALUE(forwardip); | 582 | forwardIp += step; |
287 | ref = base + hashtable[h]; | 583 | step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); |
288 | hashtable[h] = (u16)(ip - base); | 584 | |
289 | } while (A32(ref) != A32(ip)); | 585 | if (unlikely(forwardIp > mflimit)) |
586 | goto _last_literals; | ||
587 | |||
588 | match = LZ4_getPositionOnHash(h, ctx->hashTable, | ||
589 | tableType, base); | ||
590 | forwardH = LZ4_hashPosition(forwardIp, | ||
591 | tableType); | ||
592 | LZ4_putPositionOnHash(ip, h, | ||
593 | ctx->hashTable, tableType, | ||
594 | base); | ||
595 | |||
596 | } while (((tableType == byU16) | ||
597 | ? 0 | ||
598 | : (match + MAX_DISTANCE < ip)) | ||
599 | || (LZ4_read32(match) != LZ4_read32(ip))); | ||
600 | } | ||
290 | 601 | ||
291 | /* Catch up */ | 602 | /* Catch up */ |
292 | while ((ip > anchor) && (ref > (u8 *)source) | 603 | while ((ip > anchor) |
293 | && (ip[-1] == ref[-1])) { | 604 | && (match > lowLimit) |
605 | && (unlikely(ip[-1] == match[-1]))) { | ||
294 | ip--; | 606 | ip--; |
295 | ref--; | 607 | match--; |
296 | } | 608 | } |
297 | 609 | ||
298 | /* Encode Literal length */ | 610 | /* Encode Literal length */ |
299 | length = (int)(ip - anchor); | 611 | { |
300 | token = op++; | 612 | unsigned int litLength = (unsigned int)(ip - anchor); |
301 | /* Check output limit */ | ||
302 | if (unlikely(op + length + (2 + 1 + LASTLITERALS) | ||
303 | + (length >> 8) > oend)) | ||
304 | return 0; | ||
305 | if (length >= (int)RUN_MASK) { | ||
306 | *token = (RUN_MASK << ML_BITS); | ||
307 | len = length - RUN_MASK; | ||
308 | for (; len > 254 ; len -= 255) | ||
309 | *op++ = 255; | ||
310 | *op++ = (u8)len; | ||
311 | } else | ||
312 | *token = (length << ML_BITS); | ||
313 | 613 | ||
314 | /* Copy Literals */ | 614 | token = op++; |
315 | LZ4_BLINDCOPY(anchor, op, length); | 615 | if (op + ((litLength + 240) / 255) |
616 | + litLength > oMaxLit) { | ||
617 | /* Not enough space for a last match */ | ||
618 | op--; | ||
619 | goto _last_literals; | ||
620 | } | ||
621 | if (litLength >= RUN_MASK) { | ||
622 | unsigned int len = litLength - RUN_MASK; | ||
623 | *token = (RUN_MASK<<ML_BITS); | ||
624 | for (; len >= 255; len -= 255) | ||
625 | *op++ = 255; | ||
626 | *op++ = (BYTE)len; | ||
627 | } else | ||
628 | *token = (BYTE)(litLength << ML_BITS); | ||
629 | |||
630 | /* Copy Literals */ | ||
631 | LZ4_wildCopy(op, anchor, op + litLength); | ||
632 | op += litLength; | ||
633 | } | ||
316 | 634 | ||
317 | _next_match: | 635 | _next_match: |
318 | /* Encode Offset */ | 636 | /* Encode Offset */ |
319 | LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); | 637 | LZ4_writeLE16(op, (U16)(ip - match)); op += 2; |
320 | 638 | ||
321 | /* Start Counting */ | 639 | /* Encode MatchLength */ |
322 | ip += MINMATCH; | 640 | { |
323 | /* MinMatch verified */ | 641 | size_t matchLength = LZ4_count(ip + MINMATCH, |
324 | ref += MINMATCH; | 642 | match + MINMATCH, matchlimit); |
325 | anchor = ip; | ||
326 | 643 | ||
327 | while (ip < MATCHLIMIT - (STEPSIZE - 1)) { | 644 | if (op + ((matchLength + 240)/255) > oMaxMatch) { |
328 | #if LZ4_ARCH64 | 645 | /* Match description too long : reduce it */ |
329 | u64 diff = A64(ref) ^ A64(ip); | 646 | matchLength = (15 - 1) + (oMaxMatch - op) * 255; |
330 | #else | ||
331 | u32 diff = A32(ref) ^ A32(ip); | ||
332 | #endif | ||
333 | |||
334 | if (!diff) { | ||
335 | ip += STEPSIZE; | ||
336 | ref += STEPSIZE; | ||
337 | continue; | ||
338 | } | 647 | } |
339 | ip += LZ4_NBCOMMONBYTES(diff); | 648 | ip += MINMATCH + matchLength; |
340 | goto _endcount; | 649 | |
341 | } | 650 | if (matchLength >= ML_MASK) { |
342 | #if LZ4_ARCH64 | 651 | *token += ML_MASK; |
343 | if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { | 652 | matchLength -= ML_MASK; |
344 | ip += 4; | 653 | while (matchLength >= 255) { |
345 | ref += 4; | 654 | matchLength -= 255; |
655 | *op++ = 255; | ||
656 | } | ||
657 | *op++ = (BYTE)matchLength; | ||
658 | } else | ||
659 | *token += (BYTE)(matchLength); | ||
346 | } | 660 | } |
347 | #endif | ||
348 | if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { | ||
349 | ip += 2; | ||
350 | ref += 2; | ||
351 | } | ||
352 | if ((ip < MATCHLIMIT) && (*ref == *ip)) | ||
353 | ip++; | ||
354 | _endcount: | ||
355 | 661 | ||
356 | /* Encode MatchLength */ | 662 | anchor = ip; |
357 | len = (int)(ip - anchor); | ||
358 | /* Check output limit */ | ||
359 | if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)) | ||
360 | return 0; | ||
361 | if (len >= (int)ML_MASK) { | ||
362 | *token += ML_MASK; | ||
363 | len -= ML_MASK; | ||
364 | for (; len > 509 ; len -= 510) { | ||
365 | *op++ = 255; | ||
366 | *op++ = 255; | ||
367 | } | ||
368 | if (len > 254) { | ||
369 | len -= 255; | ||
370 | *op++ = 255; | ||
371 | } | ||
372 | *op++ = (u8)len; | ||
373 | } else | ||
374 | *token += len; | ||
375 | 663 | ||
376 | /* Test end of chunk */ | 664 | /* Test end of block */ |
377 | if (ip > mflimit) { | 665 | if (ip > mflimit) |
378 | anchor = ip; | 666 | break; |
667 | if (op > oMaxSeq) | ||
379 | break; | 668 | break; |
380 | } | ||
381 | 669 | ||
382 | /* Fill table */ | 670 | /* Fill table */ |
383 | hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base); | 671 | LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base); |
384 | 672 | ||
385 | /* Test next position */ | 673 | /* Test next position */ |
386 | ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; | 674 | match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); |
387 | hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base); | 675 | LZ4_putPosition(ip, ctx->hashTable, tableType, base); |
388 | if (A32(ref) == A32(ip)) { | 676 | |
389 | token = op++; | 677 | if ((match + MAX_DISTANCE >= ip) |
390 | *token = 0; | 678 | && (LZ4_read32(match) == LZ4_read32(ip))) { |
679 | token = op++; *token = 0; | ||
391 | goto _next_match; | 680 | goto _next_match; |
392 | } | 681 | } |
393 | 682 | ||
394 | /* Prepare next loop */ | 683 | /* Prepare next loop */ |
395 | anchor = ip++; | 684 | forwardH = LZ4_hashPosition(++ip, tableType); |
396 | forwardh = LZ4_HASH64K_VALUE(ip); | ||
397 | } | 685 | } |
398 | 686 | ||
399 | _last_literals: | 687 | _last_literals: |
400 | /* Encode Last Literals */ | 688 | /* Encode Last Literals */ |
401 | lastrun = (int)(iend - anchor); | 689 | { |
402 | if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend) | 690 | size_t lastRunSize = (size_t)(iend - anchor); |
403 | return 0; | 691 | |
404 | if (lastrun >= (int)RUN_MASK) { | 692 | if (op + 1 /* token */ |
405 | *op++ = (RUN_MASK << ML_BITS); | 693 | + ((lastRunSize + 240) / 255) /* litLength */ |
406 | lastrun -= RUN_MASK; | 694 | + lastRunSize /* literals */ > oend) { |
407 | for (; lastrun > 254 ; lastrun -= 255) | 695 | /* adapt lastRunSize to fill 'dst' */ |
408 | *op++ = 255; | 696 | lastRunSize = (oend - op) - 1; |
409 | *op++ = (u8)lastrun; | 697 | lastRunSize -= (lastRunSize + 240) / 255; |
410 | } else | 698 | } |
411 | *op++ = (lastrun << ML_BITS); | 699 | ip = anchor + lastRunSize; |
412 | memcpy(op, anchor, iend - anchor); | 700 | |
413 | op += iend - anchor; | 701 | if (lastRunSize >= RUN_MASK) { |
702 | size_t accumulator = lastRunSize - RUN_MASK; | ||
703 | |||
704 | *op++ = RUN_MASK << ML_BITS; | ||
705 | for (; accumulator >= 255; accumulator -= 255) | ||
706 | *op++ = 255; | ||
707 | *op++ = (BYTE) accumulator; | ||
708 | } else { | ||
709 | *op++ = (BYTE)(lastRunSize<<ML_BITS); | ||
710 | } | ||
711 | memcpy(op, anchor, lastRunSize); | ||
712 | op += lastRunSize; | ||
713 | } | ||
714 | |||
414 | /* End */ | 715 | /* End */ |
415 | return (int)(((char *)op) - dest); | 716 | *srcSizePtr = (int) (((const char *)ip) - src); |
717 | return (int) (((char *)op) - dst); | ||
416 | } | 718 | } |
417 | 719 | ||
418 | int lz4_compress(const unsigned char *src, size_t src_len, | 720 | static int LZ4_compress_destSize_extState( |
419 | unsigned char *dst, size_t *dst_len, void *wrkmem) | 721 | LZ4_stream_t *state, |
722 | const char *src, | ||
723 | char *dst, | ||
724 | int *srcSizePtr, | ||
725 | int targetDstSize) | ||
420 | { | 726 | { |
421 | int ret = -1; | 727 | #if LZ4_ARCH64 |
422 | int out_len = 0; | 728 | const tableType_t tableType = byU32; |
729 | #else | ||
730 | const tableType_t tableType = byPtr; | ||
731 | #endif | ||
423 | 732 | ||
424 | if (src_len < LZ4_64KLIMIT) | 733 | LZ4_resetStream(state); |
425 | out_len = lz4_compress64kctx(wrkmem, src, dst, src_len, | 734 | |
426 | lz4_compressbound(src_len)); | 735 | if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) { |
427 | else | 736 | /* compression success is guaranteed */ |
428 | out_len = lz4_compressctx(wrkmem, src, dst, src_len, | 737 | return LZ4_compress_fast_extState( |
429 | lz4_compressbound(src_len)); | 738 | state, src, dst, *srcSizePtr, |
739 | targetDstSize, 1); | ||
740 | } else { | ||
741 | if (*srcSizePtr < LZ4_64Klimit) | ||
742 | return LZ4_compress_destSize_generic( | ||
743 | &state->internal_donotuse, | ||
744 | src, dst, srcSizePtr, | ||
745 | targetDstSize, byU16); | ||
746 | else | ||
747 | return LZ4_compress_destSize_generic( | ||
748 | &state->internal_donotuse, | ||
749 | src, dst, srcSizePtr, | ||
750 | targetDstSize, tableType); | ||
751 | } | ||
752 | } | ||
753 | |||
754 | |||
755 | int LZ4_compress_destSize( | ||
756 | const char *src, | ||
757 | char *dst, | ||
758 | int *srcSizePtr, | ||
759 | int targetDstSize, | ||
760 | void *wrkmem) | ||
761 | { | ||
762 | return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr, | ||
763 | targetDstSize); | ||
764 | } | ||
765 | EXPORT_SYMBOL(LZ4_compress_destSize); | ||
766 | |||
767 | /*-****************************** | ||
768 | * Streaming functions | ||
769 | ********************************/ | ||
770 | void LZ4_resetStream(LZ4_stream_t *LZ4_stream) | ||
771 | { | ||
772 | memset(LZ4_stream, 0, sizeof(LZ4_stream_t)); | ||
773 | } | ||
774 | |||
775 | int LZ4_loadDict(LZ4_stream_t *LZ4_dict, | ||
776 | const char *dictionary, int dictSize) | ||
777 | { | ||
778 | LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse; | ||
779 | const BYTE *p = (const BYTE *)dictionary; | ||
780 | const BYTE * const dictEnd = p + dictSize; | ||
781 | const BYTE *base; | ||
782 | |||
783 | if ((dict->initCheck) | ||
784 | || (dict->currentOffset > 1 * GB)) { | ||
785 | /* Uninitialized structure, or reuse overflow */ | ||
786 | LZ4_resetStream(LZ4_dict); | ||
787 | } | ||
788 | |||
789 | if (dictSize < (int)HASH_UNIT) { | ||
790 | dict->dictionary = NULL; | ||
791 | dict->dictSize = 0; | ||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | if ((dictEnd - p) > 64 * KB) | ||
796 | p = dictEnd - 64 * KB; | ||
797 | dict->currentOffset += 64 * KB; | ||
798 | base = p - dict->currentOffset; | ||
799 | dict->dictionary = p; | ||
800 | dict->dictSize = (U32)(dictEnd - p); | ||
801 | dict->currentOffset += dict->dictSize; | ||
802 | |||
803 | while (p <= dictEnd - HASH_UNIT) { | ||
804 | LZ4_putPosition(p, dict->hashTable, byU32, base); | ||
805 | p += 3; | ||
806 | } | ||
807 | |||
808 | return dict->dictSize; | ||
809 | } | ||
810 | EXPORT_SYMBOL(LZ4_loadDict); | ||
811 | |||
812 | static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, | ||
813 | const BYTE *src) | ||
814 | { | ||
815 | if ((LZ4_dict->currentOffset > 0x80000000) || | ||
816 | ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { | ||
817 | /* address space overflow */ | ||
818 | /* rescale hash table */ | ||
819 | U32 const delta = LZ4_dict->currentOffset - 64 * KB; | ||
820 | const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; | ||
821 | int i; | ||
822 | |||
823 | for (i = 0; i < LZ4_HASH_SIZE_U32; i++) { | ||
824 | if (LZ4_dict->hashTable[i] < delta) | ||
825 | LZ4_dict->hashTable[i] = 0; | ||
826 | else | ||
827 | LZ4_dict->hashTable[i] -= delta; | ||
828 | } | ||
829 | LZ4_dict->currentOffset = 64 * KB; | ||
830 | if (LZ4_dict->dictSize > 64 * KB) | ||
831 | LZ4_dict->dictSize = 64 * KB; | ||
832 | LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; | ||
833 | } | ||
834 | } | ||
835 | |||
836 | int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize) | ||
837 | { | ||
838 | LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse; | ||
839 | const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize; | ||
840 | |||
841 | if ((U32)dictSize > 64 * KB) { | ||
842 | /* useless to define a dictionary > 64 * KB */ | ||
843 | dictSize = 64 * KB; | ||
844 | } | ||
845 | if ((U32)dictSize > dict->dictSize) | ||
846 | dictSize = dict->dictSize; | ||
847 | |||
848 | memmove(safeBuffer, previousDictEnd - dictSize, dictSize); | ||
849 | |||
850 | dict->dictionary = (const BYTE *)safeBuffer; | ||
851 | dict->dictSize = (U32)dictSize; | ||
852 | |||
853 | return dictSize; | ||
854 | } | ||
855 | EXPORT_SYMBOL(LZ4_saveDict); | ||
856 | |||
857 | int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, | ||
858 | char *dest, int inputSize, int maxOutputSize, int acceleration) | ||
859 | { | ||
860 | LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse; | ||
861 | const BYTE * const dictEnd = streamPtr->dictionary | ||
862 | + streamPtr->dictSize; | ||
430 | 863 | ||
431 | if (out_len < 0) | 864 | const BYTE *smallest = (const BYTE *) source; |
432 | goto exit; | ||
433 | 865 | ||
434 | *dst_len = out_len; | 866 | if (streamPtr->initCheck) { |
867 | /* Uninitialized structure detected */ | ||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) | ||
872 | smallest = dictEnd; | ||
873 | |||
874 | LZ4_renormDictT(streamPtr, smallest); | ||
875 | |||
876 | if (acceleration < 1) | ||
877 | acceleration = LZ4_ACCELERATION_DEFAULT; | ||
878 | |||
879 | /* Check overlapping input/dictionary space */ | ||
880 | { | ||
881 | const BYTE *sourceEnd = (const BYTE *) source + inputSize; | ||
882 | |||
883 | if ((sourceEnd > streamPtr->dictionary) | ||
884 | && (sourceEnd < dictEnd)) { | ||
885 | streamPtr->dictSize = (U32)(dictEnd - sourceEnd); | ||
886 | if (streamPtr->dictSize > 64 * KB) | ||
887 | streamPtr->dictSize = 64 * KB; | ||
888 | if (streamPtr->dictSize < 4) | ||
889 | streamPtr->dictSize = 0; | ||
890 | streamPtr->dictionary = dictEnd - streamPtr->dictSize; | ||
891 | } | ||
892 | } | ||
435 | 893 | ||
436 | return 0; | 894 | /* prefix mode : source data follows dictionary */ |
437 | exit: | 895 | if (dictEnd == (const BYTE *)source) { |
438 | return ret; | 896 | int result; |
897 | |||
898 | if ((streamPtr->dictSize < 64 * KB) && | ||
899 | (streamPtr->dictSize < streamPtr->currentOffset)) { | ||
900 | result = LZ4_compress_generic( | ||
901 | streamPtr, source, dest, inputSize, | ||
902 | maxOutputSize, limitedOutput, byU32, | ||
903 | withPrefix64k, dictSmall, acceleration); | ||
904 | } else { | ||
905 | result = LZ4_compress_generic( | ||
906 | streamPtr, source, dest, inputSize, | ||
907 | maxOutputSize, limitedOutput, byU32, | ||
908 | withPrefix64k, noDictIssue, acceleration); | ||
909 | } | ||
910 | streamPtr->dictSize += (U32)inputSize; | ||
911 | streamPtr->currentOffset += (U32)inputSize; | ||
912 | return result; | ||
913 | } | ||
914 | |||
915 | /* external dictionary mode */ | ||
916 | { | ||
917 | int result; | ||
918 | |||
919 | if ((streamPtr->dictSize < 64 * KB) && | ||
920 | (streamPtr->dictSize < streamPtr->currentOffset)) { | ||
921 | result = LZ4_compress_generic( | ||
922 | streamPtr, source, dest, inputSize, | ||
923 | maxOutputSize, limitedOutput, byU32, | ||
924 | usingExtDict, dictSmall, acceleration); | ||
925 | } else { | ||
926 | result = LZ4_compress_generic( | ||
927 | streamPtr, source, dest, inputSize, | ||
928 | maxOutputSize, limitedOutput, byU32, | ||
929 | usingExtDict, noDictIssue, acceleration); | ||
930 | } | ||
931 | streamPtr->dictionary = (const BYTE *)source; | ||
932 | streamPtr->dictSize = (U32)inputSize; | ||
933 | streamPtr->currentOffset += (U32)inputSize; | ||
934 | return result; | ||
935 | } | ||
439 | } | 936 | } |
440 | EXPORT_SYMBOL(lz4_compress); | 937 | EXPORT_SYMBOL(LZ4_compress_fast_continue); |
441 | 938 | ||
442 | MODULE_LICENSE("Dual BSD/GPL"); | 939 | MODULE_LICENSE("Dual BSD/GPL"); |
443 | MODULE_DESCRIPTION("LZ4 compressor"); | 940 | MODULE_DESCRIPTION("LZ4 compressor"); |
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 6d940c72b5fc..bd3574312b82 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -1,25 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * LZ4 Decompressor for Linux kernel | ||
3 | * | ||
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
5 | * | ||
6 | * Based on LZ4 implementation by Yann Collet. | ||
7 | * | ||
8 | * LZ4 - Fast LZ compression algorithm | 2 | * LZ4 - Fast LZ compression algorithm |
9 | * Copyright (C) 2011-2012, Yann Collet. | 3 | * Copyright (C) 2011 - 2016, Yann Collet. |
10 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | 4 | * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) |
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | 5 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions are | 6 | * modification, are permitted provided that the following conditions are |
14 | * met: | 7 | * met: |
15 | * | 8 | * * Redistributions of source code must retain the above copyright |
16 | * * Redistributions of source code must retain the above copyright | 9 | * notice, this list of conditions and the following disclaimer. |
17 | * notice, this list of conditions and the following disclaimer. | 10 | * * Redistributions in binary form must reproduce the above |
18 | * * Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following disclaimer | 11 | * copyright notice, this list of conditions and the following disclaimer |
20 | * in the documentation and/or other materials provided with the | 12 | * in the documentation and/or other materials provided with the |
21 | * distribution. | 13 | * distribution. |
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 14 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 15 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 16 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
@@ -31,313 +22,487 @@ | |||
31 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
32 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
33 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * You can contact the author at : | ||
26 | * - LZ4 homepage : http://www.lz4.org | ||
27 | * - LZ4 source repository : https://github.com/lz4/lz4 | ||
34 | * | 28 | * |
35 | * You can contact the author at : | 29 | * Changed for kernel usage by: |
36 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | 30 | * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
37 | * - LZ4 source repository : http://code.google.com/p/lz4/ | ||
38 | */ | 31 | */ |
39 | 32 | ||
40 | #ifndef STATIC | 33 | /*-************************************ |
34 | * Dependencies | ||
35 | **************************************/ | ||
36 | #include <linux/lz4.h> | ||
37 | #include "lz4defs.h" | ||
38 | #include <linux/init.h> | ||
41 | #include <linux/module.h> | 39 | #include <linux/module.h> |
42 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
43 | #endif | ||
44 | #include <linux/lz4.h> | ||
45 | |||
46 | #include <asm/unaligned.h> | 41 | #include <asm/unaligned.h> |
47 | 42 | ||
48 | #include "lz4defs.h" | 43 | /*-***************************** |
49 | 44 | * Decompression functions | |
50 | static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; | 45 | *******************************/ |
51 | #if LZ4_ARCH64 | 46 | /* LZ4_decompress_generic() : |
52 | static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; | 47 | * This generic decompression function cover all use cases. |
53 | #endif | 48 | * It shall be instantiated several times, using different sets of directives |
54 | 49 | * Note that it is important this generic function is really inlined, | |
55 | static int lz4_uncompress(const char *source, char *dest, int osize) | 50 | * in order to remove useless branches during compilation optimization. |
51 | */ | ||
52 | static FORCE_INLINE int LZ4_decompress_generic( | ||
53 | const char * const source, | ||
54 | char * const dest, | ||
55 | int inputSize, | ||
56 | /* | ||
57 | * If endOnInput == endOnInputSize, | ||
58 | * this value is the max size of Output Buffer. | ||
59 | */ | ||
60 | int outputSize, | ||
61 | /* endOnOutputSize, endOnInputSize */ | ||
62 | int endOnInput, | ||
63 | /* full, partial */ | ||
64 | int partialDecoding, | ||
65 | /* only used if partialDecoding == partial */ | ||
66 | int targetOutputSize, | ||
67 | /* noDict, withPrefix64k, usingExtDict */ | ||
68 | int dict, | ||
69 | /* == dest when no prefix */ | ||
70 | const BYTE * const lowPrefix, | ||
71 | /* only if dict == usingExtDict */ | ||
72 | const BYTE * const dictStart, | ||
73 | /* note : = 0 if noDict */ | ||
74 | const size_t dictSize | ||
75 | ) | ||
56 | { | 76 | { |
77 | /* Local Variables */ | ||
57 | const BYTE *ip = (const BYTE *) source; | 78 | const BYTE *ip = (const BYTE *) source; |
58 | const BYTE *ref; | 79 | const BYTE * const iend = ip + inputSize; |
80 | |||
59 | BYTE *op = (BYTE *) dest; | 81 | BYTE *op = (BYTE *) dest; |
60 | BYTE * const oend = op + osize; | 82 | BYTE * const oend = op + outputSize; |
61 | BYTE *cpy; | 83 | BYTE *cpy; |
62 | unsigned token; | 84 | BYTE *oexit = op + targetOutputSize; |
63 | size_t length; | 85 | const BYTE * const lowLimit = lowPrefix - dictSize; |
64 | 86 | ||
87 | const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; | ||
88 | const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; | ||
89 | const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; | ||
90 | |||
91 | const int safeDecode = (endOnInput == endOnInputSize); | ||
92 | const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); | ||
93 | |||
94 | /* Special cases */ | ||
95 | /* targetOutputSize too high => decode everything */ | ||
96 | if ((partialDecoding) && (oexit > oend - MFLIMIT)) | ||
97 | oexit = oend - MFLIMIT; | ||
98 | |||
99 | /* Empty output buffer */ | ||
100 | if ((endOnInput) && (unlikely(outputSize == 0))) | ||
101 | return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; | ||
102 | |||
103 | if ((!endOnInput) && (unlikely(outputSize == 0))) | ||
104 | return (*ip == 0 ? 1 : -1); | ||
105 | |||
106 | /* Main Loop : decode sequences */ | ||
65 | while (1) { | 107 | while (1) { |
108 | size_t length; | ||
109 | const BYTE *match; | ||
110 | size_t offset; | ||
111 | |||
112 | /* get literal length */ | ||
113 | unsigned int const token = *ip++; | ||
114 | |||
115 | length = token>>ML_BITS; | ||
66 | 116 | ||
67 | /* get runlength */ | ||
68 | token = *ip++; | ||
69 | length = (token >> ML_BITS); | ||
70 | if (length == RUN_MASK) { | 117 | if (length == RUN_MASK) { |
71 | size_t len; | 118 | unsigned int s; |
72 | 119 | ||
73 | len = *ip++; | 120 | do { |
74 | for (; len == 255; length += 255) | 121 | s = *ip++; |
75 | len = *ip++; | 122 | length += s; |
76 | if (unlikely(length > (size_t)(length + len))) | 123 | } while (likely(endOnInput |
124 | ? ip < iend - RUN_MASK | ||
125 | : 1) & (s == 255)); | ||
126 | |||
127 | if ((safeDecode) | ||
128 | && unlikely( | ||
129 | (size_t)(op + length) < (size_t)(op))) { | ||
130 | /* overflow detection */ | ||
131 | goto _output_error; | ||
132 | } | ||
133 | if ((safeDecode) | ||
134 | && unlikely( | ||
135 | (size_t)(ip + length) < (size_t)(ip))) { | ||
136 | /* overflow detection */ | ||
77 | goto _output_error; | 137 | goto _output_error; |
78 | length += len; | 138 | } |
79 | } | 139 | } |
80 | 140 | ||
81 | /* copy literals */ | 141 | /* copy literals */ |
82 | cpy = op + length; | 142 | cpy = op + length; |
83 | if (unlikely(cpy > oend - COPYLENGTH)) { | 143 | if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) |
84 | /* | 144 | || (ip + length > iend - (2 + 1 + LASTLITERALS)))) |
85 | * Error: not enough place for another match | 145 | || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { |
86 | * (min 4) + 5 literals | 146 | if (partialDecoding) { |
87 | */ | 147 | if (cpy > oend) { |
88 | if (cpy != oend) | 148 | /* |
89 | goto _output_error; | 149 | * Error : |
150 | * write attempt beyond end of output buffer | ||
151 | */ | ||
152 | goto _output_error; | ||
153 | } | ||
154 | if ((endOnInput) | ||
155 | && (ip + length > iend)) { | ||
156 | /* | ||
157 | * Error : | ||
158 | * read attempt beyond | ||
159 | * end of input buffer | ||
160 | */ | ||
161 | goto _output_error; | ||
162 | } | ||
163 | } else { | ||
164 | if ((!endOnInput) | ||
165 | && (cpy != oend)) { | ||
166 | /* | ||
167 | * Error : | ||
168 | * block decoding must | ||
169 | * stop exactly there | ||
170 | */ | ||
171 | goto _output_error; | ||
172 | } | ||
173 | if ((endOnInput) | ||
174 | && ((ip + length != iend) | ||
175 | || (cpy > oend))) { | ||
176 | /* | ||
177 | * Error : | ||
178 | * input must be consumed | ||
179 | */ | ||
180 | goto _output_error; | ||
181 | } | ||
182 | } | ||
90 | 183 | ||
91 | memcpy(op, ip, length); | 184 | memcpy(op, ip, length); |
92 | ip += length; | 185 | ip += length; |
93 | break; /* EOF */ | 186 | op += length; |
187 | /* Necessarily EOF, due to parsing restrictions */ | ||
188 | break; | ||
94 | } | 189 | } |
95 | LZ4_WILDCOPY(ip, op, cpy); | 190 | |
96 | ip -= (op - cpy); | 191 | LZ4_wildCopy(op, ip, cpy); |
192 | ip += length; | ||
97 | op = cpy; | 193 | op = cpy; |
98 | 194 | ||
99 | /* get offset */ | 195 | /* get offset */ |
100 | LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); | 196 | offset = LZ4_readLE16(ip); |
101 | ip += 2; | 197 | ip += 2; |
198 | match = op - offset; | ||
102 | 199 | ||
103 | /* Error: offset create reference outside destination buffer */ | 200 | if ((checkOffset) && (unlikely(match < lowLimit))) { |
104 | if (unlikely(ref < (BYTE *const) dest)) | 201 | /* Error : offset outside buffers */ |
105 | goto _output_error; | 202 | goto _output_error; |
203 | } | ||
204 | |||
205 | /* costs ~1%; silence an msan warning when offset == 0 */ | ||
206 | LZ4_write32(op, (U32)offset); | ||
106 | 207 | ||
107 | /* get matchlength */ | 208 | /* get matchlength */ |
108 | length = token & ML_MASK; | 209 | length = token & ML_MASK; |
109 | if (length == ML_MASK) { | 210 | if (length == ML_MASK) { |
110 | for (; *ip == 255; length += 255) | 211 | unsigned int s; |
111 | ip++; | 212 | |
112 | if (unlikely(length > (size_t)(length + *ip))) | 213 | do { |
214 | s = *ip++; | ||
215 | |||
216 | if ((endOnInput) && (ip > iend - LASTLITERALS)) | ||
217 | goto _output_error; | ||
218 | |||
219 | length += s; | ||
220 | } while (s == 255); | ||
221 | |||
222 | if ((safeDecode) | ||
223 | && unlikely( | ||
224 | (size_t)(op + length) < (size_t)op)) { | ||
225 | /* overflow detection */ | ||
113 | goto _output_error; | 226 | goto _output_error; |
114 | length += *ip++; | 227 | } |
115 | } | 228 | } |
116 | 229 | ||
117 | /* copy repeated sequence */ | 230 | length += MINMATCH; |
118 | if (unlikely((op - ref) < STEPSIZE)) { | 231 | |
119 | #if LZ4_ARCH64 | 232 | /* check external dictionary */ |
120 | int dec64 = dec64table[op - ref]; | 233 | if ((dict == usingExtDict) && (match < lowPrefix)) { |
121 | #else | 234 | if (unlikely(op + length > oend - LASTLITERALS)) { |
122 | const int dec64 = 0; | 235 | /* doesn't respect parsing restriction */ |
123 | #endif | 236 | goto _output_error; |
124 | op[0] = ref[0]; | 237 | } |
125 | op[1] = ref[1]; | 238 | |
126 | op[2] = ref[2]; | 239 | if (length <= (size_t)(lowPrefix - match)) { |
127 | op[3] = ref[3]; | 240 | /* |
128 | op += 4; | 241 | * match can be copied as a single segment |
129 | ref += 4; | 242 | * from external dictionary |
130 | ref -= dec32table[op-ref]; | 243 | */ |
131 | PUT4(ref, op); | 244 | memmove(op, dictEnd - (lowPrefix - match), |
132 | op += STEPSIZE - 4; | 245 | length); |
133 | ref -= dec64; | 246 | op += length; |
247 | } else { | ||
248 | /* | ||
249 | * match encompass external | ||
250 | * dictionary and current block | ||
251 | */ | ||
252 | size_t const copySize = (size_t)(lowPrefix - match); | ||
253 | size_t const restSize = length - copySize; | ||
254 | |||
255 | memcpy(op, dictEnd - copySize, copySize); | ||
256 | op += copySize; | ||
257 | |||
258 | if (restSize > (size_t)(op - lowPrefix)) { | ||
259 | /* overlap copy */ | ||
260 | BYTE * const endOfMatch = op + restSize; | ||
261 | const BYTE *copyFrom = lowPrefix; | ||
262 | |||
263 | while (op < endOfMatch) | ||
264 | *op++ = *copyFrom++; | ||
265 | } else { | ||
266 | memcpy(op, lowPrefix, restSize); | ||
267 | op += restSize; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | continue; | ||
272 | } | ||
273 | |||
274 | /* copy match within block */ | ||
275 | cpy = op + length; | ||
276 | |||
277 | if (unlikely(offset < 8)) { | ||
278 | const int dec64 = dec64table[offset]; | ||
279 | |||
280 | op[0] = match[0]; | ||
281 | op[1] = match[1]; | ||
282 | op[2] = match[2]; | ||
283 | op[3] = match[3]; | ||
284 | match += dec32table[offset]; | ||
285 | memcpy(op + 4, match, 4); | ||
286 | match -= dec64; | ||
134 | } else { | 287 | } else { |
135 | LZ4_COPYSTEP(ref, op); | 288 | LZ4_copy8(op, match); |
289 | match += 8; | ||
136 | } | 290 | } |
137 | cpy = op + length - (STEPSIZE - 4); | ||
138 | if (cpy > (oend - COPYLENGTH)) { | ||
139 | 291 | ||
140 | /* Error: request to write beyond destination buffer */ | 292 | op += 8; |
141 | if (cpy > oend) | 293 | |
142 | goto _output_error; | 294 | if (unlikely(cpy > oend - 12)) { |
143 | #if LZ4_ARCH64 | 295 | BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); |
144 | if ((ref + COPYLENGTH) > oend) | 296 | |
145 | #else | 297 | if (cpy > oend - LASTLITERALS) { |
146 | if ((ref + COPYLENGTH) > oend || | 298 | /* |
147 | (op + COPYLENGTH) > oend) | 299 | * Error : last LASTLITERALS bytes |
148 | #endif | 300 | * must be literals (uncompressed) |
301 | */ | ||
149 | goto _output_error; | 302 | goto _output_error; |
150 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | 303 | } |
304 | |||
305 | if (op < oCopyLimit) { | ||
306 | LZ4_wildCopy(op, match, oCopyLimit); | ||
307 | match += oCopyLimit - op; | ||
308 | op = oCopyLimit; | ||
309 | } | ||
310 | |||
151 | while (op < cpy) | 311 | while (op < cpy) |
152 | *op++ = *ref++; | 312 | *op++ = *match++; |
153 | op = cpy; | 313 | } else { |
154 | /* | 314 | LZ4_copy8(op, match); |
155 | * Check EOF (should never happen, since last 5 bytes | 315 | |
156 | * are supposed to be literals) | 316 | if (length > 16) |
157 | */ | 317 | LZ4_wildCopy(op + 8, match + 8, cpy); |
158 | if (op == oend) | ||
159 | goto _output_error; | ||
160 | continue; | ||
161 | } | 318 | } |
162 | LZ4_SECURECOPY(ref, op, cpy); | 319 | |
163 | op = cpy; /* correction */ | 320 | op = cpy; /* correction */ |
164 | } | 321 | } |
322 | |||
165 | /* end of decoding */ | 323 | /* end of decoding */ |
166 | return (int) (((char *)ip) - source); | 324 | if (endOnInput) { |
325 | /* Nb of output bytes decoded */ | ||
326 | return (int) (((char *)op) - dest); | ||
327 | } else { | ||
328 | /* Nb of input bytes read */ | ||
329 | return (int) (((const char *)ip) - source); | ||
330 | } | ||
167 | 331 | ||
168 | /* write overflow error detected */ | 332 | /* Overflow error detected */ |
169 | _output_error: | 333 | _output_error: |
170 | return -1; | 334 | return -1; |
171 | } | 335 | } |
172 | 336 | ||
173 | static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, | 337 | int LZ4_decompress_safe(const char *source, char *dest, |
174 | int isize, size_t maxoutputsize) | 338 | int compressedSize, int maxDecompressedSize) |
175 | { | 339 | { |
176 | const BYTE *ip = (const BYTE *) source; | 340 | return LZ4_decompress_generic(source, dest, compressedSize, |
177 | const BYTE *const iend = ip + isize; | 341 | maxDecompressedSize, endOnInputSize, full, 0, |
178 | const BYTE *ref; | 342 | noDict, (BYTE *)dest, NULL, 0); |
179 | 343 | } | |
180 | 344 | ||
181 | BYTE *op = (BYTE *) dest; | 345 | int LZ4_decompress_safe_partial(const char *source, char *dest, |
182 | BYTE * const oend = op + maxoutputsize; | 346 | int compressedSize, int targetOutputSize, int maxDecompressedSize) |
183 | BYTE *cpy; | 347 | { |
348 | return LZ4_decompress_generic(source, dest, compressedSize, | ||
349 | maxDecompressedSize, endOnInputSize, partial, | ||
350 | targetOutputSize, noDict, (BYTE *)dest, NULL, 0); | ||
351 | } | ||
184 | 352 | ||
185 | /* Main Loop */ | 353 | int LZ4_decompress_fast(const char *source, char *dest, int originalSize) |
186 | while (ip < iend) { | 354 | { |
355 | return LZ4_decompress_generic(source, dest, 0, originalSize, | ||
356 | endOnOutputSize, full, 0, withPrefix64k, | ||
357 | (BYTE *)(dest - 64 * KB), NULL, 64 * KB); | ||
358 | } | ||
187 | 359 | ||
188 | unsigned token; | 360 | int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, |
189 | size_t length; | 361 | const char *dictionary, int dictSize) |
362 | { | ||
363 | LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode; | ||
190 | 364 | ||
191 | /* get runlength */ | 365 | lz4sd->prefixSize = (size_t) dictSize; |
192 | token = *ip++; | 366 | lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; |
193 | length = (token >> ML_BITS); | 367 | lz4sd->externalDict = NULL; |
194 | if (length == RUN_MASK) { | 368 | lz4sd->extDictSize = 0; |
195 | int s = 255; | 369 | return 1; |
196 | while ((ip < iend) && (s == 255)) { | 370 | } |
197 | s = *ip++; | ||
198 | if (unlikely(length > (size_t)(length + s))) | ||
199 | goto _output_error; | ||
200 | length += s; | ||
201 | } | ||
202 | } | ||
203 | /* copy literals */ | ||
204 | cpy = op + length; | ||
205 | if ((cpy > oend - COPYLENGTH) || | ||
206 | (ip + length > iend - COPYLENGTH)) { | ||
207 | |||
208 | if (cpy > oend) | ||
209 | goto _output_error;/* writes beyond buffer */ | ||
210 | |||
211 | if (ip + length != iend) | ||
212 | goto _output_error;/* | ||
213 | * Error: LZ4 format requires | ||
214 | * to consume all input | ||
215 | * at this stage | ||
216 | */ | ||
217 | memcpy(op, ip, length); | ||
218 | op += length; | ||
219 | break;/* Necessarily EOF, due to parsing restrictions */ | ||
220 | } | ||
221 | LZ4_WILDCOPY(ip, op, cpy); | ||
222 | ip -= (op - cpy); | ||
223 | op = cpy; | ||
224 | 371 | ||
225 | /* get offset */ | 372 | /* |
226 | LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); | 373 | * *_continue() : |
227 | ip += 2; | 374 | * These decoding functions allow decompression of multiple blocks |
228 | if (ref < (BYTE * const) dest) | 375 | * in "streaming" mode. |
229 | goto _output_error; | 376 | * Previously decoded blocks must still be available at the memory |
230 | /* | 377 | * position where they were decoded. |
231 | * Error : offset creates reference | 378 | * If it's not possible, save the relevant part of |
232 | * outside of destination buffer | 379 | * decoded data into a safe buffer, |
233 | */ | 380 | * and indicate where it stands using LZ4_setStreamDecode() |
381 | */ | ||
382 | int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, | ||
383 | const char *source, char *dest, int compressedSize, int maxOutputSize) | ||
384 | { | ||
385 | LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; | ||
386 | int result; | ||
387 | |||
388 | if (lz4sd->prefixEnd == (BYTE *)dest) { | ||
389 | result = LZ4_decompress_generic(source, dest, | ||
390 | compressedSize, | ||
391 | maxOutputSize, | ||
392 | endOnInputSize, full, 0, | ||
393 | usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, | ||
394 | lz4sd->externalDict, | ||
395 | lz4sd->extDictSize); | ||
396 | |||
397 | if (result <= 0) | ||
398 | return result; | ||
399 | |||
400 | lz4sd->prefixSize += result; | ||
401 | lz4sd->prefixEnd += result; | ||
402 | } else { | ||
403 | lz4sd->extDictSize = lz4sd->prefixSize; | ||
404 | lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; | ||
405 | result = LZ4_decompress_generic(source, dest, | ||
406 | compressedSize, maxOutputSize, | ||
407 | endOnInputSize, full, 0, | ||
408 | usingExtDict, (BYTE *)dest, | ||
409 | lz4sd->externalDict, lz4sd->extDictSize); | ||
410 | if (result <= 0) | ||
411 | return result; | ||
412 | lz4sd->prefixSize = result; | ||
413 | lz4sd->prefixEnd = (BYTE *)dest + result; | ||
414 | } | ||
234 | 415 | ||
235 | /* get matchlength */ | 416 | return result; |
236 | length = (token & ML_MASK); | 417 | } |
237 | if (length == ML_MASK) { | ||
238 | while (ip < iend) { | ||
239 | int s = *ip++; | ||
240 | if (unlikely(length > (size_t)(length + s))) | ||
241 | goto _output_error; | ||
242 | length += s; | ||
243 | if (s == 255) | ||
244 | continue; | ||
245 | break; | ||
246 | } | ||
247 | } | ||
248 | 418 | ||
249 | /* copy repeated sequence */ | 419 | int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, |
250 | if (unlikely((op - ref) < STEPSIZE)) { | 420 | const char *source, char *dest, int originalSize) |
251 | #if LZ4_ARCH64 | 421 | { |
252 | int dec64 = dec64table[op - ref]; | 422 | LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; |
253 | #else | 423 | int result; |
254 | const int dec64 = 0; | 424 | |
255 | #endif | 425 | if (lz4sd->prefixEnd == (BYTE *)dest) { |
256 | op[0] = ref[0]; | 426 | result = LZ4_decompress_generic(source, dest, 0, originalSize, |
257 | op[1] = ref[1]; | 427 | endOnOutputSize, full, 0, |
258 | op[2] = ref[2]; | 428 | usingExtDict, |
259 | op[3] = ref[3]; | 429 | lz4sd->prefixEnd - lz4sd->prefixSize, |
260 | op += 4; | 430 | lz4sd->externalDict, lz4sd->extDictSize); |
261 | ref += 4; | 431 | |
262 | ref -= dec32table[op - ref]; | 432 | if (result <= 0) |
263 | PUT4(ref, op); | 433 | return result; |
264 | op += STEPSIZE - 4; | 434 | |
265 | ref -= dec64; | 435 | lz4sd->prefixSize += originalSize; |
266 | } else { | 436 | lz4sd->prefixEnd += originalSize; |
267 | LZ4_COPYSTEP(ref, op); | 437 | } else { |
268 | } | 438 | lz4sd->extDictSize = lz4sd->prefixSize; |
269 | cpy = op + length - (STEPSIZE-4); | 439 | lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; |
270 | if (cpy > oend - COPYLENGTH) { | 440 | result = LZ4_decompress_generic(source, dest, 0, originalSize, |
271 | if (cpy > oend) | 441 | endOnOutputSize, full, 0, |
272 | goto _output_error; /* write outside of buf */ | 442 | usingExtDict, (BYTE *)dest, |
273 | #if LZ4_ARCH64 | 443 | lz4sd->externalDict, lz4sd->extDictSize); |
274 | if ((ref + COPYLENGTH) > oend) | 444 | if (result <= 0) |
275 | #else | 445 | return result; |
276 | if ((ref + COPYLENGTH) > oend || | 446 | lz4sd->prefixSize = originalSize; |
277 | (op + COPYLENGTH) > oend) | 447 | lz4sd->prefixEnd = (BYTE *)dest + originalSize; |
278 | #endif | ||
279 | goto _output_error; | ||
280 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | ||
281 | while (op < cpy) | ||
282 | *op++ = *ref++; | ||
283 | op = cpy; | ||
284 | /* | ||
285 | * Check EOF (should never happen, since last 5 bytes | ||
286 | * are supposed to be literals) | ||
287 | */ | ||
288 | if (op == oend) | ||
289 | goto _output_error; | ||
290 | continue; | ||
291 | } | ||
292 | LZ4_SECURECOPY(ref, op, cpy); | ||
293 | op = cpy; /* correction */ | ||
294 | } | 448 | } |
295 | /* end of decoding */ | ||
296 | return (int) (((char *) op) - dest); | ||
297 | 449 | ||
298 | /* write overflow error detected */ | 450 | return result; |
299 | _output_error: | ||
300 | return -1; | ||
301 | } | 451 | } |
302 | 452 | ||
303 | int lz4_decompress(const unsigned char *src, size_t *src_len, | 453 | /* |
304 | unsigned char *dest, size_t actual_dest_len) | 454 | * Advanced decoding functions : |
455 | * *_usingDict() : | ||
456 | * These decoding functions work the same as "_continue" ones, | ||
457 | * the dictionary must be explicitly provided within parameters | ||
458 | */ | ||
459 | static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source, | ||
460 | char *dest, int compressedSize, int maxOutputSize, int safe, | ||
461 | const char *dictStart, int dictSize) | ||
305 | { | 462 | { |
306 | int ret = -1; | 463 | if (dictSize == 0) |
307 | int input_len = 0; | 464 | return LZ4_decompress_generic(source, dest, |
308 | 465 | compressedSize, maxOutputSize, safe, full, 0, | |
309 | input_len = lz4_uncompress(src, dest, actual_dest_len); | 466 | noDict, (BYTE *)dest, NULL, 0); |
310 | if (input_len < 0) | 467 | if (dictStart + dictSize == dest) { |
311 | goto exit_0; | 468 | if (dictSize >= (int)(64 * KB - 1)) |
312 | *src_len = input_len; | 469 | return LZ4_decompress_generic(source, dest, |
470 | compressedSize, maxOutputSize, safe, full, 0, | ||
471 | withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); | ||
472 | return LZ4_decompress_generic(source, dest, compressedSize, | ||
473 | maxOutputSize, safe, full, 0, noDict, | ||
474 | (BYTE *)dest - dictSize, NULL, 0); | ||
475 | } | ||
476 | return LZ4_decompress_generic(source, dest, compressedSize, | ||
477 | maxOutputSize, safe, full, 0, usingExtDict, | ||
478 | (BYTE *)dest, (const BYTE *)dictStart, dictSize); | ||
479 | } | ||
313 | 480 | ||
314 | return 0; | 481 | int LZ4_decompress_safe_usingDict(const char *source, char *dest, |
315 | exit_0: | 482 | int compressedSize, int maxOutputSize, |
316 | return ret; | 483 | const char *dictStart, int dictSize) |
484 | { | ||
485 | return LZ4_decompress_usingDict_generic(source, dest, | ||
486 | compressedSize, maxOutputSize, 1, dictStart, dictSize); | ||
317 | } | 487 | } |
318 | #ifndef STATIC | ||
319 | EXPORT_SYMBOL(lz4_decompress); | ||
320 | #endif | ||
321 | 488 | ||
322 | int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, | 489 | int LZ4_decompress_fast_usingDict(const char *source, char *dest, |
323 | unsigned char *dest, size_t *dest_len) | 490 | int originalSize, const char *dictStart, int dictSize) |
324 | { | 491 | { |
325 | int ret = -1; | 492 | return LZ4_decompress_usingDict_generic(source, dest, 0, |
326 | int out_len = 0; | 493 | originalSize, 0, dictStart, dictSize); |
327 | |||
328 | out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len, | ||
329 | *dest_len); | ||
330 | if (out_len < 0) | ||
331 | goto exit_0; | ||
332 | *dest_len = out_len; | ||
333 | |||
334 | return 0; | ||
335 | exit_0: | ||
336 | return ret; | ||
337 | } | 494 | } |
495 | |||
338 | #ifndef STATIC | 496 | #ifndef STATIC |
339 | EXPORT_SYMBOL(lz4_decompress_unknownoutputsize); | 497 | EXPORT_SYMBOL(LZ4_decompress_safe); |
498 | EXPORT_SYMBOL(LZ4_decompress_safe_partial); | ||
499 | EXPORT_SYMBOL(LZ4_decompress_fast); | ||
500 | EXPORT_SYMBOL(LZ4_setStreamDecode); | ||
501 | EXPORT_SYMBOL(LZ4_decompress_safe_continue); | ||
502 | EXPORT_SYMBOL(LZ4_decompress_fast_continue); | ||
503 | EXPORT_SYMBOL(LZ4_decompress_safe_usingDict); | ||
504 | EXPORT_SYMBOL(LZ4_decompress_fast_usingDict); | ||
340 | 505 | ||
341 | MODULE_LICENSE("Dual BSD/GPL"); | 506 | MODULE_LICENSE("Dual BSD/GPL"); |
342 | MODULE_DESCRIPTION("LZ4 Decompressor"); | 507 | MODULE_DESCRIPTION("LZ4 decompressor"); |
343 | #endif | 508 | #endif |
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index c79d7ea8a38e..00a0b58a0871 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h | |||
@@ -1,157 +1,227 @@ | |||
1 | #ifndef __LZ4DEFS_H__ | ||
2 | #define __LZ4DEFS_H__ | ||
3 | |||
1 | /* | 4 | /* |
2 | * lz4defs.h -- architecture specific defines | 5 | * lz4defs.h -- common and architecture specific defines for the kernel usage |
3 | * | 6 | |
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | 7 | * LZ4 - Fast LZ compression algorithm |
8 | * Copyright (C) 2011-2016, Yann Collet. | ||
9 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
10 | * Redistribution and use in source and binary forms, with or without | ||
11 | * modification, are permitted provided that the following conditions are | ||
12 | * met: | ||
13 | * * Redistributions of source code must retain the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer. | ||
15 | * * Redistributions in binary form must reproduce the above | ||
16 | * copyright notice, this list of conditions and the following disclaimer | ||
17 | * in the documentation and/or other materials provided with the | ||
18 | * distribution. | ||
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
22 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
23 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
24 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
25 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
26 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
30 | * You can contact the author at : | ||
31 | * - LZ4 homepage : http://www.lz4.org | ||
32 | * - LZ4 source repository : https://github.com/lz4/lz4 | ||
5 | * | 33 | * |
6 | * This program is free software; you can redistribute it and/or modify | 34 | * Changed for kernel usage by: |
7 | * it under the terms of the GNU General Public License version 2 as | 35 | * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
8 | * published by the Free Software Foundation. | ||
9 | */ | 36 | */ |
10 | 37 | ||
11 | /* | 38 | #include <asm/unaligned.h> |
12 | * Detects 64 bits mode | 39 | #include <linux/string.h> /* memset, memcpy */ |
13 | */ | 40 | |
41 | #define FORCE_INLINE __always_inline | ||
42 | |||
43 | /*-************************************ | ||
44 | * Basic Types | ||
45 | **************************************/ | ||
46 | #include <linux/types.h> | ||
47 | |||
48 | typedef uint8_t BYTE; | ||
49 | typedef uint16_t U16; | ||
50 | typedef uint32_t U32; | ||
51 | typedef int32_t S32; | ||
52 | typedef uint64_t U64; | ||
53 | typedef uintptr_t uptrval; | ||
54 | |||
55 | /*-************************************ | ||
56 | * Architecture specifics | ||
57 | **************************************/ | ||
14 | #if defined(CONFIG_64BIT) | 58 | #if defined(CONFIG_64BIT) |
15 | #define LZ4_ARCH64 1 | 59 | #define LZ4_ARCH64 1 |
16 | #else | 60 | #else |
17 | #define LZ4_ARCH64 0 | 61 | #define LZ4_ARCH64 0 |
18 | #endif | 62 | #endif |
19 | 63 | ||
20 | /* | 64 | #if defined(__LITTLE_ENDIAN) |
21 | * Architecture-specific macros | 65 | #define LZ4_LITTLE_ENDIAN 1 |
22 | */ | 66 | #else |
23 | #define BYTE u8 | 67 | #define LZ4_LITTLE_ENDIAN 0 |
24 | typedef struct _U16_S { u16 v; } U16_S; | ||
25 | typedef struct _U32_S { u32 v; } U32_S; | ||
26 | typedef struct _U64_S { u64 v; } U64_S; | ||
27 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
28 | |||
29 | #define A16(x) (((U16_S *)(x))->v) | ||
30 | #define A32(x) (((U32_S *)(x))->v) | ||
31 | #define A64(x) (((U64_S *)(x))->v) | ||
32 | |||
33 | #define PUT4(s, d) (A32(d) = A32(s)) | ||
34 | #define PUT8(s, d) (A64(d) = A64(s)) | ||
35 | |||
36 | #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ | ||
37 | (d = s - A16(p)) | ||
38 | |||
39 | #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ | ||
40 | do { \ | ||
41 | A16(p) = v; \ | ||
42 | p += 2; \ | ||
43 | } while (0) | ||
44 | #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
45 | |||
46 | #define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v)) | ||
47 | #define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v)) | ||
48 | #define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v)) | ||
49 | |||
50 | #define PUT4(s, d) \ | ||
51 | put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) | ||
52 | #define PUT8(s, d) \ | ||
53 | put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) | ||
54 | |||
55 | #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ | ||
56 | (d = s - get_unaligned_le16(p)) | ||
57 | |||
58 | #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ | ||
59 | do { \ | ||
60 | put_unaligned_le16(v, (u16 *)(p)); \ | ||
61 | p += 2; \ | ||
62 | } while (0) | ||
63 | #endif | 68 | #endif |
64 | 69 | ||
65 | #define COPYLENGTH 8 | 70 | /*-************************************ |
66 | #define ML_BITS 4 | 71 | * Constants |
67 | #define ML_MASK ((1U << ML_BITS) - 1) | 72 | **************************************/ |
73 | #define MINMATCH 4 | ||
74 | |||
75 | #define WILDCOPYLENGTH 8 | ||
76 | #define LASTLITERALS 5 | ||
77 | #define MFLIMIT (WILDCOPYLENGTH + MINMATCH) | ||
78 | |||
79 | /* Increase this value ==> compression run slower on incompressible data */ | ||
80 | #define LZ4_SKIPTRIGGER 6 | ||
81 | |||
82 | #define HASH_UNIT sizeof(size_t) | ||
83 | |||
84 | #define KB (1 << 10) | ||
85 | #define MB (1 << 20) | ||
86 | #define GB (1U << 30) | ||
87 | |||
88 | #define MAXD_LOG 16 | ||
89 | #define MAX_DISTANCE ((1 << MAXD_LOG) - 1) | ||
90 | #define STEPSIZE sizeof(size_t) | ||
91 | |||
92 | #define ML_BITS 4 | ||
93 | #define ML_MASK ((1U << ML_BITS) - 1) | ||
68 | #define RUN_BITS (8 - ML_BITS) | 94 | #define RUN_BITS (8 - ML_BITS) |
69 | #define RUN_MASK ((1U << RUN_BITS) - 1) | 95 | #define RUN_MASK ((1U << RUN_BITS) - 1) |
70 | #define MEMORY_USAGE 14 | 96 | |
71 | #define MINMATCH 4 | 97 | /*-************************************ |
72 | #define SKIPSTRENGTH 6 | 98 | * Reading and writing into memory |
73 | #define LASTLITERALS 5 | 99 | **************************************/ |
74 | #define MFLIMIT (COPYLENGTH + MINMATCH) | 100 | static FORCE_INLINE U16 LZ4_read16(const void *ptr) |
75 | #define MINLENGTH (MFLIMIT + 1) | 101 | { |
76 | #define MAXD_LOG 16 | 102 | return get_unaligned((const U16 *)ptr); |
77 | #define MAXD (1 << MAXD_LOG) | 103 | } |
78 | #define MAXD_MASK (u32)(MAXD - 1) | 104 | |
79 | #define MAX_DISTANCE (MAXD - 1) | 105 | static FORCE_INLINE U32 LZ4_read32(const void *ptr) |
80 | #define HASH_LOG (MAXD_LOG - 1) | 106 | { |
81 | #define HASHTABLESIZE (1 << HASH_LOG) | 107 | return get_unaligned((const U32 *)ptr); |
82 | #define MAX_NB_ATTEMPTS 256 | 108 | } |
83 | #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) | 109 | |
84 | #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1)) | 110 | static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr) |
85 | #define HASHLOG64K ((MEMORY_USAGE - 2) + 1) | 111 | { |
86 | #define HASH64KTABLESIZE (1U << HASHLOG64K) | 112 | return get_unaligned((const size_t *)ptr); |
87 | #define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ | 113 | } |
88 | ((MINMATCH * 8) - (MEMORY_USAGE-2))) | 114 | |
89 | #define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \ | 115 | static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) |
90 | ((MINMATCH * 8) - HASHLOG64K)) | 116 | { |
91 | #define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ | 117 | put_unaligned(value, (U16 *)memPtr); |
92 | ((MINMATCH * 8) - HASH_LOG)) | 118 | } |
93 | 119 | ||
94 | #if LZ4_ARCH64/* 64-bit */ | 120 | static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) |
95 | #define STEPSIZE 8 | 121 | { |
96 | 122 | put_unaligned(value, (U32 *)memPtr); | |
97 | #define LZ4_COPYSTEP(s, d) \ | 123 | } |
98 | do { \ | 124 | |
99 | PUT8(s, d); \ | 125 | static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr) |
100 | d += 8; \ | 126 | { |
101 | s += 8; \ | 127 | return get_unaligned_le16(memPtr); |
102 | } while (0) | 128 | } |
103 | 129 | ||
104 | #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d) | 130 | static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value) |
105 | 131 | { | |
106 | #define LZ4_SECURECOPY(s, d, e) \ | 132 | return put_unaligned_le16(value, memPtr); |
107 | do { \ | 133 | } |
108 | if (d < e) { \ | 134 | |
109 | LZ4_WILDCOPY(s, d, e); \ | 135 | static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) |
110 | } \ | 136 | { |
111 | } while (0) | 137 | #if LZ4_ARCH64 |
112 | #define HTYPE u32 | 138 | U64 a = get_unaligned((const U64 *)src); |
113 | 139 | ||
114 | #ifdef __BIG_ENDIAN | 140 | put_unaligned(a, (U64 *)dst); |
115 | #define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3) | 141 | #else |
142 | U32 a = get_unaligned((const U32 *)src); | ||
143 | U32 b = get_unaligned((const U32 *)src + 1); | ||
144 | |||
145 | put_unaligned(a, (U32 *)dst); | ||
146 | put_unaligned(b, (U32 *)dst + 1); | ||
147 | #endif | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * customized variant of memcpy, | ||
152 | * which can overwrite up to 7 bytes beyond dstEnd | ||
153 | */ | ||
154 | static FORCE_INLINE void LZ4_wildCopy(void *dstPtr, | ||
155 | const void *srcPtr, void *dstEnd) | ||
156 | { | ||
157 | BYTE *d = (BYTE *)dstPtr; | ||
158 | const BYTE *s = (const BYTE *)srcPtr; | ||
159 | BYTE *const e = (BYTE *)dstEnd; | ||
160 | |||
161 | do { | ||
162 | LZ4_copy8(d, s); | ||
163 | d += 8; | ||
164 | s += 8; | ||
165 | } while (d < e); | ||
166 | } | ||
167 | |||
168 | static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val) | ||
169 | { | ||
170 | #if LZ4_LITTLE_ENDIAN | ||
171 | return __ffs(val) >> 3; | ||
116 | #else | 172 | #else |
117 | #define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3) | 173 | return (BITS_PER_LONG - 1 - __fls(val)) >> 3; |
174 | #endif | ||
175 | } | ||
176 | |||
177 | static FORCE_INLINE unsigned int LZ4_count( | ||
178 | const BYTE *pIn, | ||
179 | const BYTE *pMatch, | ||
180 | const BYTE *pInLimit) | ||
181 | { | ||
182 | const BYTE *const pStart = pIn; | ||
183 | |||
184 | while (likely(pIn < pInLimit - (STEPSIZE - 1))) { | ||
185 | size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); | ||
186 | |||
187 | if (!diff) { | ||
188 | pIn += STEPSIZE; | ||
189 | pMatch += STEPSIZE; | ||
190 | continue; | ||
191 | } | ||
192 | |||
193 | pIn += LZ4_NbCommonBytes(diff); | ||
194 | |||
195 | return (unsigned int)(pIn - pStart); | ||
196 | } | ||
197 | |||
198 | #if LZ4_ARCH64 | ||
199 | if ((pIn < (pInLimit - 3)) | ||
200 | && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { | ||
201 | pIn += 4; | ||
202 | pMatch += 4; | ||
203 | } | ||
118 | #endif | 204 | #endif |
119 | 205 | ||
120 | #else /* 32-bit */ | 206 | if ((pIn < (pInLimit - 1)) |
121 | #define STEPSIZE 4 | 207 | && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { |
208 | pIn += 2; | ||
209 | pMatch += 2; | ||
210 | } | ||
122 | 211 | ||
123 | #define LZ4_COPYSTEP(s, d) \ | 212 | if ((pIn < pInLimit) && (*pMatch == *pIn)) |
124 | do { \ | 213 | pIn++; |
125 | PUT4(s, d); \ | ||
126 | d += 4; \ | ||
127 | s += 4; \ | ||
128 | } while (0) | ||
129 | 214 | ||
130 | #define LZ4_COPYPACKET(s, d) \ | 215 | return (unsigned int)(pIn - pStart); |
131 | do { \ | 216 | } |
132 | LZ4_COPYSTEP(s, d); \ | ||
133 | LZ4_COPYSTEP(s, d); \ | ||
134 | } while (0) | ||
135 | 217 | ||
136 | #define LZ4_SECURECOPY LZ4_WILDCOPY | 218 | typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; |
137 | #define HTYPE const u8* | 219 | typedef enum { byPtr, byU32, byU16 } tableType_t; |
138 | 220 | ||
139 | #ifdef __BIG_ENDIAN | 221 | typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; |
140 | #define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3) | 222 | typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; |
141 | #else | ||
142 | #define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3) | ||
143 | #endif | ||
144 | 223 | ||
145 | #endif | 224 | typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; |
225 | typedef enum { full = 0, partial = 1 } earlyEnd_directive; | ||
146 | 226 | ||
147 | #define LZ4_WILDCOPY(s, d, e) \ | 227 | #endif |
148 | do { \ | ||
149 | LZ4_COPYPACKET(s, d); \ | ||
150 | } while (d < e) | ||
151 | |||
152 | #define LZ4_BLINDCOPY(s, d, l) \ | ||
153 | do { \ | ||
154 | u8 *e = (d) + l; \ | ||
155 | LZ4_WILDCOPY(s, d, e); \ | ||
156 | d = e; \ | ||
157 | } while (0) | ||
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c index f344f76b6559..176f03b83e56 100644 --- a/lib/lz4/lz4hc_compress.c +++ b/lib/lz4/lz4hc_compress.c | |||
@@ -1,19 +1,17 @@ | |||
1 | /* | 1 | /* |
2 | * LZ4 HC - High Compression Mode of LZ4 | 2 | * LZ4 HC - High Compression Mode of LZ4 |
3 | * Copyright (C) 2011-2012, Yann Collet. | 3 | * Copyright (C) 2011-2015, Yann Collet. |
4 | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
5 | * | 4 | * |
5 | * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) | ||
6 | * Redistribution and use in source and binary forms, with or without | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions are | 7 | * modification, are permitted provided that the following conditions are |
8 | * met: | 8 | * met: |
9 | * | 9 | * * Redistributions of source code must retain the above copyright |
10 | * * Redistributions of source code must retain the above copyright | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * * Redistributions in binary form must reproduce the above |
12 | * * Redistributions in binary form must reproduce the above | ||
13 | * copyright notice, this list of conditions and the following disclaimer | 12 | * copyright notice, this list of conditions and the following disclaimer |
14 | * in the documentation and/or other materials provided with the | 13 | * in the documentation and/or other materials provided with the |
15 | * distribution. | 14 | * distribution. |
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
18 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
19 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
@@ -25,323 +23,361 @@ | |||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 25 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | * | ||
29 | * You can contact the author at : | 26 | * You can contact the author at : |
30 | * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html | 27 | * - LZ4 homepage : http://www.lz4.org |
31 | * - LZ4 source repository : http://code.google.com/p/lz4/ | 28 | * - LZ4 source repository : https://github.com/lz4/lz4 |
32 | * | 29 | * |
33 | * Changed for kernel use by: | 30 | * Changed for kernel usage by: |
34 | * Chanho Min <chanho.min@lge.com> | 31 | * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
35 | */ | 32 | */ |
36 | 33 | ||
37 | #include <linux/module.h> | 34 | /*-************************************ |
38 | #include <linux/kernel.h> | 35 | * Dependencies |
36 | **************************************/ | ||
39 | #include <linux/lz4.h> | 37 | #include <linux/lz4.h> |
40 | #include <asm/unaligned.h> | ||
41 | #include "lz4defs.h" | 38 | #include "lz4defs.h" |
39 | #include <linux/module.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/string.h> /* memset */ | ||
42 | 42 | ||
43 | struct lz4hc_data { | 43 | /* ************************************* |
44 | const u8 *base; | 44 | * Local Constants and types |
45 | HTYPE hashtable[HASHTABLESIZE]; | 45 | ***************************************/ |
46 | u16 chaintable[MAXD]; | ||
47 | const u8 *nexttoupdate; | ||
48 | } __attribute__((__packed__)); | ||
49 | 46 | ||
50 | static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base) | 47 | #define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH) |
48 | |||
49 | #define HASH_FUNCTION(i) (((i) * 2654435761U) \ | ||
50 | >> ((MINMATCH*8) - LZ4HC_HASH_LOG)) | ||
51 | #define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */ | ||
52 | |||
53 | static U32 LZ4HC_hashPtr(const void *ptr) | ||
51 | { | 54 | { |
52 | memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable)); | 55 | return HASH_FUNCTION(LZ4_read32(ptr)); |
53 | memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable)); | 56 | } |
54 | 57 | ||
55 | #if LZ4_ARCH64 | 58 | /************************************** |
56 | hc4->nexttoupdate = base + 1; | 59 | * HC Compression |
57 | #else | 60 | **************************************/ |
58 | hc4->nexttoupdate = base; | 61 | static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start) |
59 | #endif | 62 | { |
60 | hc4->base = base; | 63 | memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable)); |
61 | return 1; | 64 | memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); |
65 | hc4->nextToUpdate = 64 * KB; | ||
66 | hc4->base = start - 64 * KB; | ||
67 | hc4->end = start; | ||
68 | hc4->dictBase = start - 64 * KB; | ||
69 | hc4->dictLimit = 64 * KB; | ||
70 | hc4->lowLimit = 64 * KB; | ||
62 | } | 71 | } |
63 | 72 | ||
64 | /* Update chains up to ip (excluded) */ | 73 | /* Update chains up to ip (excluded) */ |
65 | static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) | 74 | static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4, |
75 | const BYTE *ip) | ||
66 | { | 76 | { |
67 | u16 *chaintable = hc4->chaintable; | 77 | U16 * const chainTable = hc4->chainTable; |
68 | HTYPE *hashtable = hc4->hashtable; | 78 | U32 * const hashTable = hc4->hashTable; |
69 | #if LZ4_ARCH64 | ||
70 | const BYTE * const base = hc4->base; | 79 | const BYTE * const base = hc4->base; |
71 | #else | 80 | U32 const target = (U32)(ip - base); |
72 | const int base = 0; | 81 | U32 idx = hc4->nextToUpdate; |
73 | #endif | 82 | |
83 | while (idx < target) { | ||
84 | U32 const h = LZ4HC_hashPtr(base + idx); | ||
85 | size_t delta = idx - hashTable[h]; | ||
74 | 86 | ||
75 | while (hc4->nexttoupdate < ip) { | ||
76 | const u8 *p = hc4->nexttoupdate; | ||
77 | size_t delta = p - (hashtable[HASH_VALUE(p)] + base); | ||
78 | if (delta > MAX_DISTANCE) | 87 | if (delta > MAX_DISTANCE) |
79 | delta = MAX_DISTANCE; | 88 | delta = MAX_DISTANCE; |
80 | chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta; | ||
81 | hashtable[HASH_VALUE(p)] = (p) - base; | ||
82 | hc4->nexttoupdate++; | ||
83 | } | ||
84 | } | ||
85 | 89 | ||
86 | static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2, | 90 | DELTANEXTU16(idx) = (U16)delta; |
87 | const u8 *const matchlimit) | ||
88 | { | ||
89 | const u8 *p1t = p1; | ||
90 | |||
91 | while (p1t < matchlimit - (STEPSIZE - 1)) { | ||
92 | #if LZ4_ARCH64 | ||
93 | u64 diff = A64(p2) ^ A64(p1t); | ||
94 | #else | ||
95 | u32 diff = A32(p2) ^ A32(p1t); | ||
96 | #endif | ||
97 | if (!diff) { | ||
98 | p1t += STEPSIZE; | ||
99 | p2 += STEPSIZE; | ||
100 | continue; | ||
101 | } | ||
102 | p1t += LZ4_NBCOMMONBYTES(diff); | ||
103 | return p1t - p1; | ||
104 | } | ||
105 | #if LZ4_ARCH64 | ||
106 | if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) { | ||
107 | p1t += 4; | ||
108 | p2 += 4; | ||
109 | } | ||
110 | #endif | ||
111 | 91 | ||
112 | if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) { | 92 | hashTable[h] = idx; |
113 | p1t += 2; | 93 | idx++; |
114 | p2 += 2; | ||
115 | } | 94 | } |
116 | if ((p1t < matchlimit) && (*p2 == *p1t)) | 95 | |
117 | p1t++; | 96 | hc4->nextToUpdate = target; |
118 | return p1t - p1; | ||
119 | } | 97 | } |
120 | 98 | ||
121 | static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, | 99 | static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch( |
122 | const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) | 100 | LZ4HC_CCtx_internal *hc4, /* Index table will be updated */ |
101 | const BYTE *ip, | ||
102 | const BYTE * const iLimit, | ||
103 | const BYTE **matchpos, | ||
104 | const int maxNbAttempts) | ||
123 | { | 105 | { |
124 | u16 *const chaintable = hc4->chaintable; | 106 | U16 * const chainTable = hc4->chainTable; |
125 | HTYPE *const hashtable = hc4->hashtable; | 107 | U32 * const HashTable = hc4->hashTable; |
126 | const u8 *ref; | ||
127 | #if LZ4_ARCH64 | ||
128 | const BYTE * const base = hc4->base; | 108 | const BYTE * const base = hc4->base; |
129 | #else | 109 | const BYTE * const dictBase = hc4->dictBase; |
130 | const int base = 0; | 110 | const U32 dictLimit = hc4->dictLimit; |
131 | #endif | 111 | const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base)) |
132 | int nbattempts = MAX_NB_ATTEMPTS; | 112 | ? hc4->lowLimit |
133 | size_t repl = 0, ml = 0; | 113 | : (U32)(ip - base) - (64 * KB - 1); |
134 | u16 delta; | 114 | U32 matchIndex; |
115 | int nbAttempts = maxNbAttempts; | ||
116 | size_t ml = 0; | ||
135 | 117 | ||
136 | /* HC4 match finder */ | 118 | /* HC4 match finder */ |
137 | lz4hc_insert(hc4, ip); | 119 | LZ4HC_Insert(hc4, ip); |
138 | ref = hashtable[HASH_VALUE(ip)] + base; | 120 | matchIndex = HashTable[LZ4HC_hashPtr(ip)]; |
139 | 121 | ||
140 | /* potential repetition */ | 122 | while ((matchIndex >= lowLimit) |
141 | if (ref >= ip-4) { | 123 | && (nbAttempts)) { |
142 | /* confirmed */ | 124 | nbAttempts--; |
143 | if (A32(ref) == A32(ip)) { | 125 | if (matchIndex >= dictLimit) { |
144 | delta = (u16)(ip-ref); | 126 | const BYTE * const match = base + matchIndex; |
145 | repl = ml = lz4hc_commonlength(ip + MINMATCH, | 127 | |
146 | ref + MINMATCH, matchlimit) + MINMATCH; | 128 | if (*(match + ml) == *(ip + ml) |
147 | *matchpos = ref; | 129 | && (LZ4_read32(match) == LZ4_read32(ip))) { |
148 | } | 130 | size_t const mlt = LZ4_count(ip + MINMATCH, |
149 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | 131 | match + MINMATCH, iLimit) + MINMATCH; |
150 | } | ||
151 | 132 | ||
152 | while ((ref >= ip - MAX_DISTANCE) && nbattempts) { | ||
153 | nbattempts--; | ||
154 | if (*(ref + ml) == *(ip + ml)) { | ||
155 | if (A32(ref) == A32(ip)) { | ||
156 | size_t mlt = | ||
157 | lz4hc_commonlength(ip + MINMATCH, | ||
158 | ref + MINMATCH, matchlimit) + MINMATCH; | ||
159 | if (mlt > ml) { | 133 | if (mlt > ml) { |
160 | ml = mlt; | 134 | ml = mlt; |
161 | *matchpos = ref; | 135 | *matchpos = match; |
136 | } | ||
137 | } | ||
138 | } else { | ||
139 | const BYTE * const match = dictBase + matchIndex; | ||
140 | |||
141 | if (LZ4_read32(match) == LZ4_read32(ip)) { | ||
142 | size_t mlt; | ||
143 | const BYTE *vLimit = ip | ||
144 | + (dictLimit - matchIndex); | ||
145 | |||
146 | if (vLimit > iLimit) | ||
147 | vLimit = iLimit; | ||
148 | mlt = LZ4_count(ip + MINMATCH, | ||
149 | match + MINMATCH, vLimit) + MINMATCH; | ||
150 | if ((ip + mlt == vLimit) | ||
151 | && (vLimit < iLimit)) | ||
152 | mlt += LZ4_count(ip + mlt, | ||
153 | base + dictLimit, | ||
154 | iLimit); | ||
155 | if (mlt > ml) { | ||
156 | /* virtual matchpos */ | ||
157 | ml = mlt; | ||
158 | *matchpos = base + matchIndex; | ||
162 | } | 159 | } |
163 | } | 160 | } |
164 | } | 161 | } |
165 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | 162 | matchIndex -= DELTANEXTU16(matchIndex); |
166 | } | ||
167 | |||
168 | /* Complete table */ | ||
169 | if (repl) { | ||
170 | const BYTE *ptr = ip; | ||
171 | const BYTE *end; | ||
172 | end = ip + repl - (MINMATCH-1); | ||
173 | /* Pre-Load */ | ||
174 | while (ptr < end - delta) { | ||
175 | chaintable[(size_t)(ptr) & MAXD_MASK] = delta; | ||
176 | ptr++; | ||
177 | } | ||
178 | do { | ||
179 | chaintable[(size_t)(ptr) & MAXD_MASK] = delta; | ||
180 | /* Head of chain */ | ||
181 | hashtable[HASH_VALUE(ptr)] = (ptr) - base; | ||
182 | ptr++; | ||
183 | } while (ptr < end); | ||
184 | hc4->nexttoupdate = end; | ||
185 | } | 163 | } |
186 | 164 | ||
187 | return (int)ml; | 165 | return (int)ml; |
188 | } | 166 | } |
189 | 167 | ||
190 | static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, | 168 | static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch( |
191 | const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, | 169 | LZ4HC_CCtx_internal *hc4, |
192 | const u8 **matchpos, const u8 **startpos) | 170 | const BYTE * const ip, |
171 | const BYTE * const iLowLimit, | ||
172 | const BYTE * const iHighLimit, | ||
173 | int longest, | ||
174 | const BYTE **matchpos, | ||
175 | const BYTE **startpos, | ||
176 | const int maxNbAttempts) | ||
193 | { | 177 | { |
194 | u16 *const chaintable = hc4->chaintable; | 178 | U16 * const chainTable = hc4->chainTable; |
195 | HTYPE *const hashtable = hc4->hashtable; | 179 | U32 * const HashTable = hc4->hashTable; |
196 | #if LZ4_ARCH64 | ||
197 | const BYTE * const base = hc4->base; | 180 | const BYTE * const base = hc4->base; |
198 | #else | 181 | const U32 dictLimit = hc4->dictLimit; |
199 | const int base = 0; | 182 | const BYTE * const lowPrefixPtr = base + dictLimit; |
200 | #endif | 183 | const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base)) |
201 | const u8 *ref; | 184 | ? hc4->lowLimit |
202 | int nbattempts = MAX_NB_ATTEMPTS; | 185 | : (U32)(ip - base) - (64 * KB - 1); |
203 | int delta = (int)(ip - startlimit); | 186 | const BYTE * const dictBase = hc4->dictBase; |
187 | U32 matchIndex; | ||
188 | int nbAttempts = maxNbAttempts; | ||
189 | int delta = (int)(ip - iLowLimit); | ||
204 | 190 | ||
205 | /* First Match */ | 191 | /* First Match */ |
206 | lz4hc_insert(hc4, ip); | 192 | LZ4HC_Insert(hc4, ip); |
207 | ref = hashtable[HASH_VALUE(ip)] + base; | 193 | matchIndex = HashTable[LZ4HC_hashPtr(ip)]; |
208 | 194 | ||
209 | while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) | 195 | while ((matchIndex >= lowLimit) |
210 | && (nbattempts)) { | 196 | && (nbAttempts)) { |
211 | nbattempts--; | 197 | nbAttempts--; |
212 | if (*(startlimit + longest) == *(ref - delta + longest)) { | 198 | if (matchIndex >= dictLimit) { |
213 | if (A32(ref) == A32(ip)) { | 199 | const BYTE *matchPtr = base + matchIndex; |
214 | const u8 *reft = ref + MINMATCH; | 200 | |
215 | const u8 *ipt = ip + MINMATCH; | 201 | if (*(iLowLimit + longest) |
216 | const u8 *startt = ip; | 202 | == *(matchPtr - delta + longest)) { |
217 | 203 | if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { | |
218 | while (ipt < matchlimit-(STEPSIZE - 1)) { | 204 | int mlt = MINMATCH + LZ4_count( |
219 | #if LZ4_ARCH64 | 205 | ip + MINMATCH, |
220 | u64 diff = A64(reft) ^ A64(ipt); | 206 | matchPtr + MINMATCH, |
221 | #else | 207 | iHighLimit); |
222 | u32 diff = A32(reft) ^ A32(ipt); | 208 | int back = 0; |
223 | #endif | 209 | |
224 | 210 | while ((ip + back > iLowLimit) | |
225 | if (!diff) { | 211 | && (matchPtr + back > lowPrefixPtr) |
226 | ipt += STEPSIZE; | 212 | && (ip[back - 1] == matchPtr[back - 1])) |
227 | reft += STEPSIZE; | 213 | back--; |
228 | continue; | 214 | |
215 | mlt -= back; | ||
216 | |||
217 | if (mlt > longest) { | ||
218 | longest = (int)mlt; | ||
219 | *matchpos = matchPtr + back; | ||
220 | *startpos = ip + back; | ||
229 | } | 221 | } |
230 | ipt += LZ4_NBCOMMONBYTES(diff); | ||
231 | goto _endcount; | ||
232 | } | ||
233 | #if LZ4_ARCH64 | ||
234 | if ((ipt < (matchlimit - 3)) | ||
235 | && (A32(reft) == A32(ipt))) { | ||
236 | ipt += 4; | ||
237 | reft += 4; | ||
238 | } | ||
239 | ipt += 2; | ||
240 | #endif | ||
241 | if ((ipt < (matchlimit - 1)) | ||
242 | && (A16(reft) == A16(ipt))) { | ||
243 | reft += 2; | ||
244 | } | 222 | } |
245 | if ((ipt < matchlimit) && (*reft == *ipt)) | 223 | } |
246 | ipt++; | 224 | } else { |
247 | _endcount: | 225 | const BYTE * const matchPtr = dictBase + matchIndex; |
248 | reft = ref; | 226 | |
249 | 227 | if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { | |
250 | while ((startt > startlimit) | 228 | size_t mlt; |
251 | && (reft > hc4->base) | 229 | int back = 0; |
252 | && (startt[-1] == reft[-1])) { | 230 | const BYTE *vLimit = ip + (dictLimit - matchIndex); |
253 | startt--; | 231 | |
254 | reft--; | 232 | if (vLimit > iHighLimit) |
255 | } | 233 | vLimit = iHighLimit; |
256 | 234 | ||
257 | if ((ipt - startt) > longest) { | 235 | mlt = LZ4_count(ip + MINMATCH, |
258 | longest = (int)(ipt - startt); | 236 | matchPtr + MINMATCH, vLimit) + MINMATCH; |
259 | *matchpos = reft; | 237 | |
260 | *startpos = startt; | 238 | if ((ip + mlt == vLimit) && (vLimit < iHighLimit)) |
239 | mlt += LZ4_count(ip + mlt, base + dictLimit, | ||
240 | iHighLimit); | ||
241 | while ((ip + back > iLowLimit) | ||
242 | && (matchIndex + back > lowLimit) | ||
243 | && (ip[back - 1] == matchPtr[back - 1])) | ||
244 | back--; | ||
245 | |||
246 | mlt -= back; | ||
247 | |||
248 | if ((int)mlt > longest) { | ||
249 | longest = (int)mlt; | ||
250 | *matchpos = base + matchIndex + back; | ||
251 | *startpos = ip + back; | ||
261 | } | 252 | } |
262 | } | 253 | } |
263 | } | 254 | } |
264 | ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; | 255 | |
256 | matchIndex -= DELTANEXTU16(matchIndex); | ||
265 | } | 257 | } |
258 | |||
266 | return longest; | 259 | return longest; |
267 | } | 260 | } |
268 | 261 | ||
269 | static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, | 262 | static FORCE_INLINE int LZ4HC_encodeSequence( |
270 | int ml, const u8 *ref) | 263 | const BYTE **ip, |
264 | BYTE **op, | ||
265 | const BYTE **anchor, | ||
266 | int matchLength, | ||
267 | const BYTE * const match, | ||
268 | limitedOutput_directive limitedOutputBuffer, | ||
269 | BYTE *oend) | ||
271 | { | 270 | { |
272 | int length, len; | 271 | int length; |
273 | u8 *token; | 272 | BYTE *token; |
274 | 273 | ||
275 | /* Encode Literal length */ | 274 | /* Encode Literal length */ |
276 | length = (int)(*ip - *anchor); | 275 | length = (int)(*ip - *anchor); |
277 | token = (*op)++; | 276 | token = (*op)++; |
277 | |||
278 | if ((limitedOutputBuffer) | ||
279 | && ((*op + (length>>8) | ||
280 | + length + (2 + 1 + LASTLITERALS)) > oend)) { | ||
281 | /* Check output limit */ | ||
282 | return 1; | ||
283 | } | ||
278 | if (length >= (int)RUN_MASK) { | 284 | if (length >= (int)RUN_MASK) { |
279 | *token = (RUN_MASK << ML_BITS); | 285 | int len; |
286 | |||
287 | *token = (RUN_MASK<<ML_BITS); | ||
280 | len = length - RUN_MASK; | 288 | len = length - RUN_MASK; |
281 | for (; len > 254 ; len -= 255) | 289 | for (; len > 254 ; len -= 255) |
282 | *(*op)++ = 255; | 290 | *(*op)++ = 255; |
283 | *(*op)++ = (u8)len; | 291 | *(*op)++ = (BYTE)len; |
284 | } else | 292 | } else |
285 | *token = (length << ML_BITS); | 293 | *token = (BYTE)(length<<ML_BITS); |
286 | 294 | ||
287 | /* Copy Literals */ | 295 | /* Copy Literals */ |
288 | LZ4_BLINDCOPY(*anchor, *op, length); | 296 | LZ4_wildCopy(*op, *anchor, (*op) + length); |
297 | *op += length; | ||
289 | 298 | ||
290 | /* Encode Offset */ | 299 | /* Encode Offset */ |
291 | LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); | 300 | LZ4_writeLE16(*op, (U16)(*ip - match)); |
301 | *op += 2; | ||
292 | 302 | ||
293 | /* Encode MatchLength */ | 303 | /* Encode MatchLength */ |
294 | len = (int)(ml - MINMATCH); | 304 | length = (int)(matchLength - MINMATCH); |
295 | if (len >= (int)ML_MASK) { | 305 | |
306 | if ((limitedOutputBuffer) | ||
307 | && (*op + (length>>8) | ||
308 | + (1 + LASTLITERALS) > oend)) { | ||
309 | /* Check output limit */ | ||
310 | return 1; | ||
311 | } | ||
312 | |||
313 | if (length >= (int)ML_MASK) { | ||
296 | *token += ML_MASK; | 314 | *token += ML_MASK; |
297 | len -= ML_MASK; | 315 | length -= ML_MASK; |
298 | for (; len > 509 ; len -= 510) { | 316 | |
317 | for (; length > 509 ; length -= 510) { | ||
299 | *(*op)++ = 255; | 318 | *(*op)++ = 255; |
300 | *(*op)++ = 255; | 319 | *(*op)++ = 255; |
301 | } | 320 | } |
302 | if (len > 254) { | 321 | |
303 | len -= 255; | 322 | if (length > 254) { |
323 | length -= 255; | ||
304 | *(*op)++ = 255; | 324 | *(*op)++ = 255; |
305 | } | 325 | } |
306 | *(*op)++ = (u8)len; | 326 | |
327 | *(*op)++ = (BYTE)length; | ||
307 | } else | 328 | } else |
308 | *token += len; | 329 | *token += (BYTE)(length); |
309 | 330 | ||
310 | /* Prepare next loop */ | 331 | /* Prepare next loop */ |
311 | *ip += ml; | 332 | *ip += matchLength; |
312 | *anchor = *ip; | 333 | *anchor = *ip; |
313 | 334 | ||
314 | return 0; | 335 | return 0; |
315 | } | 336 | } |
316 | 337 | ||
317 | static int lz4_compresshcctx(struct lz4hc_data *ctx, | 338 | static int LZ4HC_compress_generic( |
318 | const char *source, | 339 | LZ4HC_CCtx_internal *const ctx, |
319 | char *dest, | 340 | const char * const source, |
320 | int isize) | 341 | char * const dest, |
342 | int const inputSize, | ||
343 | int const maxOutputSize, | ||
344 | int compressionLevel, | ||
345 | limitedOutput_directive limit | ||
346 | ) | ||
321 | { | 347 | { |
322 | const u8 *ip = (const u8 *)source; | 348 | const BYTE *ip = (const BYTE *) source; |
323 | const u8 *anchor = ip; | 349 | const BYTE *anchor = ip; |
324 | const u8 *const iend = ip + isize; | 350 | const BYTE * const iend = ip + inputSize; |
325 | const u8 *const mflimit = iend - MFLIMIT; | 351 | const BYTE * const mflimit = iend - MFLIMIT; |
326 | const u8 *const matchlimit = (iend - LASTLITERALS); | 352 | const BYTE * const matchlimit = (iend - LASTLITERALS); |
327 | 353 | ||
328 | u8 *op = (u8 *)dest; | 354 | BYTE *op = (BYTE *) dest; |
355 | BYTE * const oend = op + maxOutputSize; | ||
329 | 356 | ||
357 | unsigned int maxNbAttempts; | ||
330 | int ml, ml2, ml3, ml0; | 358 | int ml, ml2, ml3, ml0; |
331 | const u8 *ref = NULL; | 359 | const BYTE *ref = NULL; |
332 | const u8 *start2 = NULL; | 360 | const BYTE *start2 = NULL; |
333 | const u8 *ref2 = NULL; | 361 | const BYTE *ref2 = NULL; |
334 | const u8 *start3 = NULL; | 362 | const BYTE *start3 = NULL; |
335 | const u8 *ref3 = NULL; | 363 | const BYTE *ref3 = NULL; |
336 | const u8 *start0; | 364 | const BYTE *start0; |
337 | const u8 *ref0; | 365 | const BYTE *ref0; |
338 | int lastrun; | 366 | |
367 | /* init */ | ||
368 | if (compressionLevel > LZ4HC_MAX_CLEVEL) | ||
369 | compressionLevel = LZ4HC_MAX_CLEVEL; | ||
370 | if (compressionLevel < 1) | ||
371 | compressionLevel = LZ4HC_DEFAULT_CLEVEL; | ||
372 | maxNbAttempts = 1 << (compressionLevel - 1); | ||
373 | ctx->end += inputSize; | ||
339 | 374 | ||
340 | ip++; | 375 | ip++; |
341 | 376 | ||
342 | /* Main Loop */ | 377 | /* Main Loop */ |
343 | while (ip < mflimit) { | 378 | while (ip < mflimit) { |
344 | ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); | 379 | ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, |
380 | matchlimit, (&ref), maxNbAttempts); | ||
345 | if (!ml) { | 381 | if (!ml) { |
346 | ip++; | 382 | ip++; |
347 | continue; | 383 | continue; |
@@ -351,51 +387,59 @@ static int lz4_compresshcctx(struct lz4hc_data *ctx, | |||
351 | start0 = ip; | 387 | start0 = ip; |
352 | ref0 = ref; | 388 | ref0 = ref; |
353 | ml0 = ml; | 389 | ml0 = ml; |
354 | _search2: | 390 | |
355 | if (ip+ml < mflimit) | 391 | _Search2: |
356 | ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, | 392 | if (ip + ml < mflimit) |
357 | ip + 1, matchlimit, ml, &ref2, &start2); | 393 | ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, |
394 | ip + ml - 2, ip + 0, | ||
395 | matchlimit, ml, &ref2, | ||
396 | &start2, maxNbAttempts); | ||
358 | else | 397 | else |
359 | ml2 = ml; | 398 | ml2 = ml; |
360 | /* No better match */ | 399 | |
361 | if (ml2 == ml) { | 400 | if (ml2 == ml) { |
362 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | 401 | /* No better match */ |
402 | if (LZ4HC_encodeSequence(&ip, &op, | ||
403 | &anchor, ml, ref, limit, oend)) | ||
404 | return 0; | ||
363 | continue; | 405 | continue; |
364 | } | 406 | } |
365 | 407 | ||
366 | if (start0 < ip) { | 408 | if (start0 < ip) { |
367 | /* empirical */ | ||
368 | if (start2 < ip + ml0) { | 409 | if (start2 < ip + ml0) { |
410 | /* empirical */ | ||
369 | ip = start0; | 411 | ip = start0; |
370 | ref = ref0; | 412 | ref = ref0; |
371 | ml = ml0; | 413 | ml = ml0; |
372 | } | 414 | } |
373 | } | 415 | } |
374 | /* | 416 | |
375 | * Here, start0==ip | 417 | /* Here, start0 == ip */ |
376 | * First Match too small : removed | ||
377 | */ | ||
378 | if ((start2 - ip) < 3) { | 418 | if ((start2 - ip) < 3) { |
419 | /* First Match too small : removed */ | ||
379 | ml = ml2; | 420 | ml = ml2; |
380 | ip = start2; | 421 | ip = start2; |
381 | ref = ref2; | 422 | ref = ref2; |
382 | goto _search2; | 423 | goto _Search2; |
383 | } | 424 | } |
384 | 425 | ||
385 | _search3: | 426 | _Search3: |
386 | /* | 427 | /* |
387 | * Currently we have : | 428 | * Currently we have : |
388 | * ml2 > ml1, and | 429 | * ml2 > ml1, and |
389 | * ip1+3 <= ip2 (usually < ip1+ml1) | 430 | * ip1 + 3 <= ip2 (usually < ip1 + ml1) |
390 | */ | 431 | */ |
391 | if ((start2 - ip) < OPTIMAL_ML) { | 432 | if ((start2 - ip) < OPTIMAL_ML) { |
392 | int correction; | 433 | int correction; |
393 | int new_ml = ml; | 434 | int new_ml = ml; |
435 | |||
394 | if (new_ml > OPTIMAL_ML) | 436 | if (new_ml > OPTIMAL_ML) |
395 | new_ml = OPTIMAL_ML; | 437 | new_ml = OPTIMAL_ML; |
396 | if (ip + new_ml > start2 + ml2 - MINMATCH) | 438 | if (ip + new_ml > start2 + ml2 - MINMATCH) |
397 | new_ml = (int)(start2 - ip) + ml2 - MINMATCH; | 439 | new_ml = (int)(start2 - ip) + ml2 - MINMATCH; |
440 | |||
398 | correction = new_ml - (int)(start2 - ip); | 441 | correction = new_ml - (int)(start2 - ip); |
442 | |||
399 | if (correction > 0) { | 443 | if (correction > 0) { |
400 | start2 += correction; | 444 | start2 += correction; |
401 | ref2 += correction; | 445 | ref2 += correction; |
@@ -403,39 +447,44 @@ _search3: | |||
403 | } | 447 | } |
404 | } | 448 | } |
405 | /* | 449 | /* |
406 | * Now, we have start2 = ip+new_ml, | 450 | * Now, we have start2 = ip + new_ml, |
407 | * with new_ml=min(ml, OPTIMAL_ML=18) | 451 | * with new_ml = min(ml, OPTIMAL_ML = 18) |
408 | */ | 452 | */ |
453 | |||
409 | if (start2 + ml2 < mflimit) | 454 | if (start2 + ml2 < mflimit) |
410 | ml3 = lz4hc_insertandgetwidermatch(ctx, | 455 | ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, |
411 | start2 + ml2 - 3, start2, matchlimit, | 456 | start2 + ml2 - 3, start2, |
412 | ml2, &ref3, &start3); | 457 | matchlimit, ml2, &ref3, &start3, |
458 | maxNbAttempts); | ||
413 | else | 459 | else |
414 | ml3 = ml2; | 460 | ml3 = ml2; |
415 | 461 | ||
416 | /* No better match : 2 sequences to encode */ | ||
417 | if (ml3 == ml2) { | 462 | if (ml3 == ml2) { |
463 | /* No better match : 2 sequences to encode */ | ||
418 | /* ip & ref are known; Now for ml */ | 464 | /* ip & ref are known; Now for ml */ |
419 | if (start2 < ip+ml) | 465 | if (start2 < ip + ml) |
420 | ml = (int)(start2 - ip); | 466 | ml = (int)(start2 - ip); |
421 | |||
422 | /* Now, encode 2 sequences */ | 467 | /* Now, encode 2 sequences */ |
423 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | 468 | if (LZ4HC_encodeSequence(&ip, &op, &anchor, |
469 | ml, ref, limit, oend)) | ||
470 | return 0; | ||
424 | ip = start2; | 471 | ip = start2; |
425 | lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); | 472 | if (LZ4HC_encodeSequence(&ip, &op, &anchor, |
473 | ml2, ref2, limit, oend)) | ||
474 | return 0; | ||
426 | continue; | 475 | continue; |
427 | } | 476 | } |
428 | 477 | ||
429 | /* Not enough space for match 2 : remove it */ | ||
430 | if (start3 < ip + ml + 3) { | 478 | if (start3 < ip + ml + 3) { |
431 | /* | 479 | /* Not enough space for match 2 : remove it */ |
432 | * can write Seq1 immediately ==> Seq2 is removed, | ||
433 | * so Seq3 becomes Seq1 | ||
434 | */ | ||
435 | if (start3 >= (ip + ml)) { | 480 | if (start3 >= (ip + ml)) { |
481 | /* can write Seq1 immediately | ||
482 | * ==> Seq2 is removed, | ||
483 | * so Seq3 becomes Seq1 | ||
484 | */ | ||
436 | if (start2 < ip + ml) { | 485 | if (start2 < ip + ml) { |
437 | int correction = | 486 | int correction = (int)(ip + ml - start2); |
438 | (int)(ip + ml - start2); | 487 | |
439 | start2 += correction; | 488 | start2 += correction; |
440 | ref2 += correction; | 489 | ref2 += correction; |
441 | ml2 -= correction; | 490 | ml2 -= correction; |
@@ -446,35 +495,38 @@ _search3: | |||
446 | } | 495 | } |
447 | } | 496 | } |
448 | 497 | ||
449 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | 498 | if (LZ4HC_encodeSequence(&ip, &op, &anchor, |
450 | ip = start3; | 499 | ml, ref, limit, oend)) |
500 | return 0; | ||
501 | ip = start3; | ||
451 | ref = ref3; | 502 | ref = ref3; |
452 | ml = ml3; | 503 | ml = ml3; |
453 | 504 | ||
454 | start0 = start2; | 505 | start0 = start2; |
455 | ref0 = ref2; | 506 | ref0 = ref2; |
456 | ml0 = ml2; | 507 | ml0 = ml2; |
457 | goto _search2; | 508 | goto _Search2; |
458 | } | 509 | } |
459 | 510 | ||
460 | start2 = start3; | 511 | start2 = start3; |
461 | ref2 = ref3; | 512 | ref2 = ref3; |
462 | ml2 = ml3; | 513 | ml2 = ml3; |
463 | goto _search3; | 514 | goto _Search3; |
464 | } | 515 | } |
465 | 516 | ||
466 | /* | 517 | /* |
467 | * OK, now we have 3 ascending matches; let's write at least | 518 | * OK, now we have 3 ascending matches; |
468 | * the first one ip & ref are known; Now for ml | 519 | * let's write at least the first one |
469 | */ | 520 | * ip & ref are known; Now for ml |
521 | */ | ||
470 | if (start2 < ip + ml) { | 522 | if (start2 < ip + ml) { |
471 | if ((start2 - ip) < (int)ML_MASK) { | 523 | if ((start2 - ip) < (int)ML_MASK) { |
472 | int correction; | 524 | int correction; |
525 | |||
473 | if (ml > OPTIMAL_ML) | 526 | if (ml > OPTIMAL_ML) |
474 | ml = OPTIMAL_ML; | 527 | ml = OPTIMAL_ML; |
475 | if (ip + ml > start2 + ml2 - MINMATCH) | 528 | if (ip + ml > start2 + ml2 - MINMATCH) |
476 | ml = (int)(start2 - ip) + ml2 | 529 | ml = (int)(start2 - ip) + ml2 - MINMATCH; |
477 | - MINMATCH; | ||
478 | correction = ml - (int)(start2 - ip); | 530 | correction = ml - (int)(start2 - ip); |
479 | if (correction > 0) { | 531 | if (correction > 0) { |
480 | start2 += correction; | 532 | start2 += correction; |
@@ -484,7 +536,9 @@ _search3: | |||
484 | } else | 536 | } else |
485 | ml = (int)(start2 - ip); | 537 | ml = (int)(start2 - ip); |
486 | } | 538 | } |
487 | lz4_encodesequence(&ip, &op, &anchor, ml, ref); | 539 | if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, |
540 | ref, limit, oend)) | ||
541 | return 0; | ||
488 | 542 | ||
489 | ip = start2; | 543 | ip = start2; |
490 | ref = ref2; | 544 | ref = ref2; |
@@ -494,46 +548,222 @@ _search3: | |||
494 | ref2 = ref3; | 548 | ref2 = ref3; |
495 | ml2 = ml3; | 549 | ml2 = ml3; |
496 | 550 | ||
497 | goto _search3; | 551 | goto _Search3; |
498 | } | 552 | } |
499 | 553 | ||
500 | /* Encode Last Literals */ | 554 | /* Encode Last Literals */ |
501 | lastrun = (int)(iend - anchor); | 555 | { |
502 | if (lastrun >= (int)RUN_MASK) { | 556 | int lastRun = (int)(iend - anchor); |
503 | *op++ = (RUN_MASK << ML_BITS); | 557 | |
504 | lastrun -= RUN_MASK; | 558 | if ((limit) |
505 | for (; lastrun > 254 ; lastrun -= 255) | 559 | && (((char *)op - dest) + lastRun + 1 |
506 | *op++ = 255; | 560 | + ((lastRun + 255 - RUN_MASK)/255) |
507 | *op++ = (u8) lastrun; | 561 | > (U32)maxOutputSize)) { |
508 | } else | 562 | /* Check output limit */ |
509 | *op++ = (lastrun << ML_BITS); | 563 | return 0; |
510 | memcpy(op, anchor, iend - anchor); | 564 | } |
511 | op += iend - anchor; | 565 | if (lastRun >= (int)RUN_MASK) { |
566 | *op++ = (RUN_MASK<<ML_BITS); | ||
567 | lastRun -= RUN_MASK; | ||
568 | for (; lastRun > 254 ; lastRun -= 255) | ||
569 | *op++ = 255; | ||
570 | *op++ = (BYTE) lastRun; | ||
571 | } else | ||
572 | *op++ = (BYTE)(lastRun<<ML_BITS); | ||
573 | memcpy(op, anchor, iend - anchor); | ||
574 | op += iend - anchor; | ||
575 | } | ||
576 | |||
512 | /* End */ | 577 | /* End */ |
513 | return (int) (((char *)op) - dest); | 578 | return (int) (((char *)op) - dest); |
514 | } | 579 | } |
515 | 580 | ||
516 | int lz4hc_compress(const unsigned char *src, size_t src_len, | 581 | static int LZ4_compress_HC_extStateHC( |
517 | unsigned char *dst, size_t *dst_len, void *wrkmem) | 582 | void *state, |
583 | const char *src, | ||
584 | char *dst, | ||
585 | int srcSize, | ||
586 | int maxDstSize, | ||
587 | int compressionLevel) | ||
518 | { | 588 | { |
519 | int ret = -1; | 589 | LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse; |
520 | int out_len = 0; | ||
521 | 590 | ||
522 | struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem; | 591 | if (((size_t)(state)&(sizeof(void *) - 1)) != 0) { |
523 | lz4hc_init(hc4, (const u8 *)src); | 592 | /* Error : state is not aligned |
524 | out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src, | 593 | * for pointers (32 or 64 bits) |
525 | (char *)dst, (int)src_len); | 594 | */ |
595 | return 0; | ||
596 | } | ||
526 | 597 | ||
527 | if (out_len < 0) | 598 | LZ4HC_init(ctx, (const BYTE *)src); |
528 | goto exit; | ||
529 | 599 | ||
530 | *dst_len = out_len; | 600 | if (maxDstSize < LZ4_compressBound(srcSize)) |
531 | return 0; | 601 | return LZ4HC_compress_generic(ctx, src, dst, |
602 | srcSize, maxDstSize, compressionLevel, limitedOutput); | ||
603 | else | ||
604 | return LZ4HC_compress_generic(ctx, src, dst, | ||
605 | srcSize, maxDstSize, compressionLevel, noLimit); | ||
606 | } | ||
607 | |||
608 | int LZ4_compress_HC(const char *src, char *dst, int srcSize, | ||
609 | int maxDstSize, int compressionLevel, void *wrkmem) | ||
610 | { | ||
611 | return LZ4_compress_HC_extStateHC(wrkmem, src, dst, | ||
612 | srcSize, maxDstSize, compressionLevel); | ||
613 | } | ||
614 | EXPORT_SYMBOL(LZ4_compress_HC); | ||
615 | |||
616 | /************************************** | ||
617 | * Streaming Functions | ||
618 | **************************************/ | ||
619 | void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel) | ||
620 | { | ||
621 | LZ4_streamHCPtr->internal_donotuse.base = NULL; | ||
622 | LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel; | ||
623 | } | ||
624 | |||
625 | int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr, | ||
626 | const char *dictionary, | ||
627 | int dictSize) | ||
628 | { | ||
629 | LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse; | ||
630 | |||
631 | if (dictSize > 64 * KB) { | ||
632 | dictionary += dictSize - 64 * KB; | ||
633 | dictSize = 64 * KB; | ||
634 | } | ||
635 | LZ4HC_init(ctxPtr, (const BYTE *)dictionary); | ||
636 | if (dictSize >= 4) | ||
637 | LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3)); | ||
638 | ctxPtr->end = (const BYTE *)dictionary + dictSize; | ||
639 | return dictSize; | ||
640 | } | ||
641 | EXPORT_SYMBOL(LZ4_loadDictHC); | ||
532 | 642 | ||
533 | exit: | 643 | /* compression */ |
534 | return ret; | 644 | |
645 | static void LZ4HC_setExternalDict( | ||
646 | LZ4HC_CCtx_internal *ctxPtr, | ||
647 | const BYTE *newBlock) | ||
648 | { | ||
649 | if (ctxPtr->end >= ctxPtr->base + 4) { | ||
650 | /* Referencing remaining dictionary content */ | ||
651 | LZ4HC_Insert(ctxPtr, ctxPtr->end - 3); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * Only one memory segment for extDict, | ||
656 | * so any previous extDict is lost at this stage | ||
657 | */ | ||
658 | ctxPtr->lowLimit = ctxPtr->dictLimit; | ||
659 | ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base); | ||
660 | ctxPtr->dictBase = ctxPtr->base; | ||
661 | ctxPtr->base = newBlock - ctxPtr->dictLimit; | ||
662 | ctxPtr->end = newBlock; | ||
663 | /* match referencing will resume from there */ | ||
664 | ctxPtr->nextToUpdate = ctxPtr->dictLimit; | ||
665 | } | ||
666 | EXPORT_SYMBOL(LZ4HC_setExternalDict); | ||
667 | |||
668 | static int LZ4_compressHC_continue_generic( | ||
669 | LZ4_streamHC_t *LZ4_streamHCPtr, | ||
670 | const char *source, | ||
671 | char *dest, | ||
672 | int inputSize, | ||
673 | int maxOutputSize, | ||
674 | limitedOutput_directive limit) | ||
675 | { | ||
676 | LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse; | ||
677 | |||
678 | /* auto - init if forgotten */ | ||
679 | if (ctxPtr->base == NULL) | ||
680 | LZ4HC_init(ctxPtr, (const BYTE *) source); | ||
681 | |||
682 | /* Check overflow */ | ||
683 | if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) { | ||
684 | size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) | ||
685 | - ctxPtr->dictLimit; | ||
686 | if (dictSize > 64 * KB) | ||
687 | dictSize = 64 * KB; | ||
688 | LZ4_loadDictHC(LZ4_streamHCPtr, | ||
689 | (const char *)(ctxPtr->end) - dictSize, (int)dictSize); | ||
690 | } | ||
691 | |||
692 | /* Check if blocks follow each other */ | ||
693 | if ((const BYTE *)source != ctxPtr->end) | ||
694 | LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source); | ||
695 | |||
696 | /* Check overlapping input/dictionary space */ | ||
697 | { | ||
698 | const BYTE *sourceEnd = (const BYTE *) source + inputSize; | ||
699 | const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit; | ||
700 | const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit; | ||
701 | |||
702 | if ((sourceEnd > dictBegin) | ||
703 | && ((const BYTE *)source < dictEnd)) { | ||
704 | if (sourceEnd > dictEnd) | ||
705 | sourceEnd = dictEnd; | ||
706 | ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase); | ||
707 | |||
708 | if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) | ||
709 | ctxPtr->lowLimit = ctxPtr->dictLimit; | ||
710 | } | ||
711 | } | ||
712 | |||
713 | return LZ4HC_compress_generic(ctxPtr, source, dest, | ||
714 | inputSize, maxOutputSize, ctxPtr->compressionLevel, limit); | ||
715 | } | ||
716 | |||
717 | int LZ4_compress_HC_continue( | ||
718 | LZ4_streamHC_t *LZ4_streamHCPtr, | ||
719 | const char *source, | ||
720 | char *dest, | ||
721 | int inputSize, | ||
722 | int maxOutputSize) | ||
723 | { | ||
724 | if (maxOutputSize < LZ4_compressBound(inputSize)) | ||
725 | return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, | ||
726 | source, dest, inputSize, maxOutputSize, limitedOutput); | ||
727 | else | ||
728 | return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, | ||
729 | source, dest, inputSize, maxOutputSize, noLimit); | ||
730 | } | ||
731 | EXPORT_SYMBOL(LZ4_compress_HC_continue); | ||
732 | |||
733 | /* dictionary saving */ | ||
734 | |||
735 | int LZ4_saveDictHC( | ||
736 | LZ4_streamHC_t *LZ4_streamHCPtr, | ||
737 | char *safeBuffer, | ||
738 | int dictSize) | ||
739 | { | ||
740 | LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse; | ||
741 | int const prefixSize = (int)(streamPtr->end | ||
742 | - (streamPtr->base + streamPtr->dictLimit)); | ||
743 | |||
744 | if (dictSize > 64 * KB) | ||
745 | dictSize = 64 * KB; | ||
746 | if (dictSize < 4) | ||
747 | dictSize = 0; | ||
748 | if (dictSize > prefixSize) | ||
749 | dictSize = prefixSize; | ||
750 | |||
751 | memmove(safeBuffer, streamPtr->end - dictSize, dictSize); | ||
752 | |||
753 | { | ||
754 | U32 const endIndex = (U32)(streamPtr->end - streamPtr->base); | ||
755 | |||
756 | streamPtr->end = (const BYTE *)safeBuffer + dictSize; | ||
757 | streamPtr->base = streamPtr->end - endIndex; | ||
758 | streamPtr->dictLimit = endIndex - dictSize; | ||
759 | streamPtr->lowLimit = endIndex - dictSize; | ||
760 | |||
761 | if (streamPtr->nextToUpdate < streamPtr->dictLimit) | ||
762 | streamPtr->nextToUpdate = streamPtr->dictLimit; | ||
763 | } | ||
764 | return dictSize; | ||
535 | } | 765 | } |
536 | EXPORT_SYMBOL(lz4hc_compress); | 766 | EXPORT_SYMBOL(LZ4_saveDictHC); |
537 | 767 | ||
538 | MODULE_LICENSE("Dual BSD/GPL"); | 768 | MODULE_LICENSE("Dual BSD/GPL"); |
539 | MODULE_DESCRIPTION("LZ4HC compressor"); | 769 | MODULE_DESCRIPTION("LZ4 HC compressor"); |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 1f8b112a7c35..4ba2828a67c0 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -427,7 +427,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} | |||
427 | static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} | 427 | static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} |
428 | 428 | ||
429 | static const struct rb_augment_callbacks dummy_callbacks = { | 429 | static const struct rb_augment_callbacks dummy_callbacks = { |
430 | dummy_propagate, dummy_copy, dummy_rotate | 430 | .propagate = dummy_propagate, |
431 | .copy = dummy_copy, | ||
432 | .rotate = dummy_rotate | ||
431 | }; | 433 | }; |
432 | 434 | ||
433 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | 435 | void rb_insert_color(struct rb_node *node, struct rb_root *root) |
diff --git a/lib/sort.c b/lib/sort.c index fc20df42aa6f..975c6ef6fec7 100644 --- a/lib/sort.c +++ b/lib/sort.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * Jan 23 2005 Matt Mackall <mpm@selenic.com> | 4 | * Jan 23 2005 Matt Mackall <mpm@selenic.com> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
8 | |||
7 | #include <linux/types.h> | 9 | #include <linux/types.h> |
8 | #include <linux/export.h> | 10 | #include <linux/export.h> |
9 | #include <linux/sort.h> | 11 | #include <linux/sort.h> |
@@ -101,42 +103,3 @@ void sort(void *base, size_t num, size_t size, | |||
101 | } | 103 | } |
102 | 104 | ||
103 | EXPORT_SYMBOL(sort); | 105 | EXPORT_SYMBOL(sort); |
104 | |||
105 | #if 0 | ||
106 | #include <linux/slab.h> | ||
107 | /* a simple boot-time regression test */ | ||
108 | |||
109 | int cmpint(const void *a, const void *b) | ||
110 | { | ||
111 | return *(int *)a - *(int *)b; | ||
112 | } | ||
113 | |||
114 | static int sort_test(void) | ||
115 | { | ||
116 | int *a, i, r = 1; | ||
117 | |||
118 | a = kmalloc(1000 * sizeof(int), GFP_KERNEL); | ||
119 | BUG_ON(!a); | ||
120 | |||
121 | printk("testing sort()\n"); | ||
122 | |||
123 | for (i = 0; i < 1000; i++) { | ||
124 | r = (r * 725861) % 6599; | ||
125 | a[i] = r; | ||
126 | } | ||
127 | |||
128 | sort(a, 1000, sizeof(int), cmpint, NULL); | ||
129 | |||
130 | for (i = 0; i < 999; i++) | ||
131 | if (a[i] > a[i+1]) { | ||
132 | printk("sort() failed!\n"); | ||
133 | break; | ||
134 | } | ||
135 | |||
136 | kfree(a); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | module_init(sort_test); | ||
142 | #endif | ||
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index fbdf87920093..0b1d3140fbb8 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define pr_fmt(fmt) "kasan test: %s " fmt, __func__ | 12 | #define pr_fmt(fmt) "kasan test: %s " fmt, __func__ |
13 | 13 | ||
14 | #include <linux/delay.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/mman.h> | 16 | #include <linux/mman.h> |
16 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
@@ -331,6 +332,38 @@ static noinline void __init kmem_cache_oob(void) | |||
331 | kmem_cache_destroy(cache); | 332 | kmem_cache_destroy(cache); |
332 | } | 333 | } |
333 | 334 | ||
335 | static noinline void __init memcg_accounted_kmem_cache(void) | ||
336 | { | ||
337 | int i; | ||
338 | char *p; | ||
339 | size_t size = 200; | ||
340 | struct kmem_cache *cache; | ||
341 | |||
342 | cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); | ||
343 | if (!cache) { | ||
344 | pr_err("Cache allocation failed\n"); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | pr_info("allocate memcg accounted object\n"); | ||
349 | /* | ||
350 | * Several allocations with a delay to allow for lazy per memcg kmem | ||
351 | * cache creation. | ||
352 | */ | ||
353 | for (i = 0; i < 5; i++) { | ||
354 | p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
355 | if (!p) { | ||
356 | pr_err("Allocation failed\n"); | ||
357 | goto free_cache; | ||
358 | } | ||
359 | kmem_cache_free(cache, p); | ||
360 | msleep(100); | ||
361 | } | ||
362 | |||
363 | free_cache: | ||
364 | kmem_cache_destroy(cache); | ||
365 | } | ||
366 | |||
334 | static char global_array[10]; | 367 | static char global_array[10]; |
335 | 368 | ||
336 | static noinline void __init kasan_global_oob(void) | 369 | static noinline void __init kasan_global_oob(void) |
@@ -460,6 +493,7 @@ static int __init kmalloc_tests_init(void) | |||
460 | kmalloc_uaf_memset(); | 493 | kmalloc_uaf_memset(); |
461 | kmalloc_uaf2(); | 494 | kmalloc_uaf2(); |
462 | kmem_cache_oob(); | 495 | kmem_cache_oob(); |
496 | memcg_accounted_kmem_cache(); | ||
463 | kasan_stack_oob(); | 497 | kasan_stack_oob(); |
464 | kasan_global_oob(); | 498 | kasan_global_oob(); |
465 | ksize_unpoisons_memory(); | 499 | ksize_unpoisons_memory(); |
diff --git a/lib/test_sort.c b/lib/test_sort.c new file mode 100644 index 000000000000..4db3911db50a --- /dev/null +++ b/lib/test_sort.c | |||
@@ -0,0 +1,44 @@ | |||
1 | #include <linux/sort.h> | ||
2 | #include <linux/slab.h> | ||
3 | #include <linux/init.h> | ||
4 | |||
5 | /* | ||
6 | * A simple boot-time regression test | ||
7 | * License: GPL | ||
8 | */ | ||
9 | |||
10 | #define TEST_LEN 1000 | ||
11 | |||
12 | static int __init cmpint(const void *a, const void *b) | ||
13 | { | ||
14 | return *(int *)a - *(int *)b; | ||
15 | } | ||
16 | |||
17 | static int __init test_sort_init(void) | ||
18 | { | ||
19 | int *a, i, r = 1, err = -ENOMEM; | ||
20 | |||
21 | a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL); | ||
22 | if (!a) | ||
23 | return err; | ||
24 | |||
25 | for (i = 0; i < TEST_LEN; i++) { | ||
26 | r = (r * 725861) % 6599; | ||
27 | a[i] = r; | ||
28 | } | ||
29 | |||
30 | sort(a, TEST_LEN, sizeof(*a), cmpint, NULL); | ||
31 | |||
32 | err = -EINVAL; | ||
33 | for (i = 0; i < TEST_LEN-1; i++) | ||
34 | if (a[i] > a[i+1]) { | ||
35 | pr_err("test has failed\n"); | ||
36 | goto exit; | ||
37 | } | ||
38 | err = 0; | ||
39 | pr_info("test passed\n"); | ||
40 | exit: | ||
41 | kfree(a); | ||
42 | return err; | ||
43 | } | ||
44 | subsys_initcall(test_sort_init); | ||
diff --git a/mm/Makefile b/mm/Makefile index 433eaf9a876e..aa0aa17cb413 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -23,8 +23,10 @@ KCOV_INSTRUMENT_vmstat.o := n | |||
23 | 23 | ||
24 | mmu-y := nommu.o | 24 | mmu-y := nommu.o |
25 | mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ | 25 | mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ |
26 | mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ | 26 | mlock.o mmap.o mprotect.o mremap.o msync.o \ |
27 | vmalloc.o pagewalk.o pgtable-generic.o | 27 | page_vma_mapped.o pagewalk.o pgtable-generic.o \ |
28 | rmap.o vmalloc.o | ||
29 | |||
28 | 30 | ||
29 | ifdef CONFIG_CROSS_MEMORY_ATTACH | 31 | ifdef CONFIG_CROSS_MEMORY_ATTACH |
30 | mmu-$(CONFIG_MMU) += process_vm_access.o | 32 | mmu-$(CONFIG_MMU) += process_vm_access.o |
@@ -348,6 +348,32 @@ err: | |||
348 | return ret; | 348 | return ret; |
349 | } | 349 | } |
350 | 350 | ||
351 | #ifdef CONFIG_CMA_DEBUG | ||
352 | static void cma_debug_show_areas(struct cma *cma) | ||
353 | { | ||
354 | unsigned long next_zero_bit, next_set_bit; | ||
355 | unsigned long start = 0; | ||
356 | unsigned int nr_zero, nr_total = 0; | ||
357 | |||
358 | mutex_lock(&cma->lock); | ||
359 | pr_info("number of available pages: "); | ||
360 | for (;;) { | ||
361 | next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); | ||
362 | if (next_zero_bit >= cma->count) | ||
363 | break; | ||
364 | next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); | ||
365 | nr_zero = next_set_bit - next_zero_bit; | ||
366 | pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); | ||
367 | nr_total += nr_zero; | ||
368 | start = next_zero_bit + nr_zero; | ||
369 | } | ||
370 | pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); | ||
371 | mutex_unlock(&cma->lock); | ||
372 | } | ||
373 | #else | ||
374 | static inline void cma_debug_show_areas(struct cma *cma) { } | ||
375 | #endif | ||
376 | |||
351 | /** | 377 | /** |
352 | * cma_alloc() - allocate pages from contiguous area | 378 | * cma_alloc() - allocate pages from contiguous area |
353 | * @cma: Contiguous memory region for which the allocation is performed. | 379 | * @cma: Contiguous memory region for which the allocation is performed. |
@@ -357,14 +383,15 @@ err: | |||
357 | * This function allocates part of contiguous memory on specific | 383 | * This function allocates part of contiguous memory on specific |
358 | * contiguous memory area. | 384 | * contiguous memory area. |
359 | */ | 385 | */ |
360 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) | 386 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, |
387 | gfp_t gfp_mask) | ||
361 | { | 388 | { |
362 | unsigned long mask, offset; | 389 | unsigned long mask, offset; |
363 | unsigned long pfn = -1; | 390 | unsigned long pfn = -1; |
364 | unsigned long start = 0; | 391 | unsigned long start = 0; |
365 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 392 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
366 | struct page *page = NULL; | 393 | struct page *page = NULL; |
367 | int ret; | 394 | int ret = -ENOMEM; |
368 | 395 | ||
369 | if (!cma || !cma->count) | 396 | if (!cma || !cma->count) |
370 | return NULL; | 397 | return NULL; |
@@ -402,7 +429,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) | |||
402 | 429 | ||
403 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | 430 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
404 | mutex_lock(&cma_mutex); | 431 | mutex_lock(&cma_mutex); |
405 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | 432 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, |
433 | gfp_mask); | ||
406 | mutex_unlock(&cma_mutex); | 434 | mutex_unlock(&cma_mutex); |
407 | if (ret == 0) { | 435 | if (ret == 0) { |
408 | page = pfn_to_page(pfn); | 436 | page = pfn_to_page(pfn); |
@@ -421,6 +449,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) | |||
421 | 449 | ||
422 | trace_cma_alloc(pfn, page, count, align); | 450 | trace_cma_alloc(pfn, page, count, align); |
423 | 451 | ||
452 | if (ret) { | ||
453 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", | ||
454 | __func__, count, ret); | ||
455 | cma_debug_show_areas(cma); | ||
456 | } | ||
457 | |||
424 | pr_debug("%s(): returned %p\n", __func__, page); | 458 | pr_debug("%s(): returned %p\n", __func__, page); |
425 | return page; | 459 | return page; |
426 | } | 460 | } |
diff --git a/mm/cma_debug.c b/mm/cma_debug.c index f8e4b60db167..ffc0c3d0ae64 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c | |||
@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count) | |||
138 | if (!mem) | 138 | if (!mem) |
139 | return -ENOMEM; | 139 | return -ENOMEM; |
140 | 140 | ||
141 | p = cma_alloc(cma, count, 0); | 141 | p = cma_alloc(cma, count, 0, GFP_KERNEL); |
142 | if (!p) { | 142 | if (!p) { |
143 | kfree(mem); | 143 | kfree(mem); |
144 | return -ENOMEM; | 144 | return -ENOMEM; |
diff --git a/mm/compaction.c b/mm/compaction.c index 0aa2757399ee..0fdfde016ee2 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -802,7 +802,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
802 | locked = false; | 802 | locked = false; |
803 | } | 803 | } |
804 | 804 | ||
805 | if (isolate_movable_page(page, isolate_mode)) | 805 | if (!isolate_movable_page(page, isolate_mode)) |
806 | goto isolate_success; | 806 | goto isolate_success; |
807 | } | 807 | } |
808 | 808 | ||
diff --git a/mm/dmapool.c b/mm/dmapool.c index abcbfe86c25a..cef82b8a9291 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -434,11 +434,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
434 | spin_unlock_irqrestore(&pool->lock, flags); | 434 | spin_unlock_irqrestore(&pool->lock, flags); |
435 | if (pool->dev) | 435 | if (pool->dev) |
436 | dev_err(pool->dev, | 436 | dev_err(pool->dev, |
437 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 437 | "dma_pool_free %s, %p (bad vaddr)/%pad\n", |
438 | pool->name, vaddr, (unsigned long long)dma); | 438 | pool->name, vaddr, &dma); |
439 | else | 439 | else |
440 | pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 440 | pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n", |
441 | pool->name, vaddr, (unsigned long long)dma); | 441 | pool->name, vaddr, &dma); |
442 | return; | 442 | return; |
443 | } | 443 | } |
444 | { | 444 | { |
@@ -450,11 +450,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
450 | } | 450 | } |
451 | spin_unlock_irqrestore(&pool->lock, flags); | 451 | spin_unlock_irqrestore(&pool->lock, flags); |
452 | if (pool->dev) | 452 | if (pool->dev) |
453 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | 453 | dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", |
454 | pool->name, (unsigned long long)dma); | 454 | pool->name, &dma); |
455 | else | 455 | else |
456 | pr_err("dma_pool_free %s, dma %Lx already free\n", | 456 | pr_err("dma_pool_free %s, dma %pad already free\n", |
457 | pool->name, (unsigned long long)dma); | 457 | pool->name, &dma); |
458 | return; | 458 | return; |
459 | } | 459 | } |
460 | } | 460 | } |
diff --git a/mm/filemap.c b/mm/filemap.c index 416d563468a3..1944c631e3e6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1008,9 +1008,12 @@ void page_endio(struct page *page, bool is_write, int err) | |||
1008 | unlock_page(page); | 1008 | unlock_page(page); |
1009 | } else { | 1009 | } else { |
1010 | if (err) { | 1010 | if (err) { |
1011 | struct address_space *mapping; | ||
1012 | |||
1011 | SetPageError(page); | 1013 | SetPageError(page); |
1012 | if (page->mapping) | 1014 | mapping = page_mapping(page); |
1013 | mapping_set_error(page->mapping, err); | 1015 | if (mapping) |
1016 | mapping_set_error(mapping, err); | ||
1014 | } | 1017 | } |
1015 | end_page_writeback(page); | 1018 | end_page_writeback(page); |
1016 | } | 1019 | } |
@@ -2169,7 +2172,6 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, | |||
2169 | 2172 | ||
2170 | /** | 2173 | /** |
2171 | * filemap_fault - read in file data for page fault handling | 2174 | * filemap_fault - read in file data for page fault handling |
2172 | * @vma: vma in which the fault was taken | ||
2173 | * @vmf: struct vm_fault containing details of the fault | 2175 | * @vmf: struct vm_fault containing details of the fault |
2174 | * | 2176 | * |
2175 | * filemap_fault() is invoked via the vma operations vector for a | 2177 | * filemap_fault() is invoked via the vma operations vector for a |
@@ -2191,10 +2193,10 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, | |||
2191 | * | 2193 | * |
2192 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. | 2194 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. |
2193 | */ | 2195 | */ |
2194 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2196 | int filemap_fault(struct vm_fault *vmf) |
2195 | { | 2197 | { |
2196 | int error; | 2198 | int error; |
2197 | struct file *file = vma->vm_file; | 2199 | struct file *file = vmf->vma->vm_file; |
2198 | struct address_space *mapping = file->f_mapping; | 2200 | struct address_space *mapping = file->f_mapping; |
2199 | struct file_ra_state *ra = &file->f_ra; | 2201 | struct file_ra_state *ra = &file->f_ra; |
2200 | struct inode *inode = mapping->host; | 2202 | struct inode *inode = mapping->host; |
@@ -2216,12 +2218,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
2216 | * We found the page, so try async readahead before | 2218 | * We found the page, so try async readahead before |
2217 | * waiting for the lock. | 2219 | * waiting for the lock. |
2218 | */ | 2220 | */ |
2219 | do_async_mmap_readahead(vma, ra, file, page, offset); | 2221 | do_async_mmap_readahead(vmf->vma, ra, file, page, offset); |
2220 | } else if (!page) { | 2222 | } else if (!page) { |
2221 | /* No page in the page cache at all */ | 2223 | /* No page in the page cache at all */ |
2222 | do_sync_mmap_readahead(vma, ra, file, offset); | 2224 | do_sync_mmap_readahead(vmf->vma, ra, file, offset); |
2223 | count_vm_event(PGMAJFAULT); | 2225 | count_vm_event(PGMAJFAULT); |
2224 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | 2226 | mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); |
2225 | ret = VM_FAULT_MAJOR; | 2227 | ret = VM_FAULT_MAJOR; |
2226 | retry_find: | 2228 | retry_find: |
2227 | page = find_get_page(mapping, offset); | 2229 | page = find_get_page(mapping, offset); |
@@ -2229,7 +2231,7 @@ retry_find: | |||
2229 | goto no_cached_page; | 2231 | goto no_cached_page; |
2230 | } | 2232 | } |
2231 | 2233 | ||
2232 | if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | 2234 | if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { |
2233 | put_page(page); | 2235 | put_page(page); |
2234 | return ret | VM_FAULT_RETRY; | 2236 | return ret | VM_FAULT_RETRY; |
2235 | } | 2237 | } |
@@ -2396,14 +2398,14 @@ next: | |||
2396 | } | 2398 | } |
2397 | EXPORT_SYMBOL(filemap_map_pages); | 2399 | EXPORT_SYMBOL(filemap_map_pages); |
2398 | 2400 | ||
2399 | int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 2401 | int filemap_page_mkwrite(struct vm_fault *vmf) |
2400 | { | 2402 | { |
2401 | struct page *page = vmf->page; | 2403 | struct page *page = vmf->page; |
2402 | struct inode *inode = file_inode(vma->vm_file); | 2404 | struct inode *inode = file_inode(vmf->vma->vm_file); |
2403 | int ret = VM_FAULT_LOCKED; | 2405 | int ret = VM_FAULT_LOCKED; |
2404 | 2406 | ||
2405 | sb_start_pagefault(inode->i_sb); | 2407 | sb_start_pagefault(inode->i_sb); |
2406 | file_update_time(vma->vm_file); | 2408 | file_update_time(vmf->vma->vm_file); |
2407 | lock_page(page); | 2409 | lock_page(page); |
2408 | if (page->mapping != inode->i_mapping) { | 2410 | if (page->mapping != inode->i_mapping) { |
2409 | unlock_page(page); | 2411 | unlock_page(page); |
@@ -253,6 +253,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
253 | return page; | 253 | return page; |
254 | return no_page_table(vma, flags); | 254 | return no_page_table(vma, flags); |
255 | } | 255 | } |
256 | if (pud_devmap(*pud)) { | ||
257 | ptl = pud_lock(mm, pud); | ||
258 | page = follow_devmap_pud(vma, address, pud, flags); | ||
259 | spin_unlock(ptl); | ||
260 | if (page) | ||
261 | return page; | ||
262 | } | ||
256 | if (unlikely(pud_bad(*pud))) | 263 | if (unlikely(pud_bad(*pud))) |
257 | return no_page_table(vma, flags); | 264 | return no_page_table(vma, flags); |
258 | 265 | ||
@@ -265,8 +272,6 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
265 | return page; | 272 | return page; |
266 | return no_page_table(vma, flags); | 273 | return no_page_table(vma, flags); |
267 | } | 274 | } |
268 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) | ||
269 | return no_page_table(vma, flags); | ||
270 | if (pmd_devmap(*pmd)) { | 275 | if (pmd_devmap(*pmd)) { |
271 | ptl = pmd_lock(mm, pmd); | 276 | ptl = pmd_lock(mm, pmd); |
272 | page = follow_devmap_pmd(vma, address, pmd, flags); | 277 | page = follow_devmap_pmd(vma, address, pmd, flags); |
@@ -277,6 +282,9 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
277 | if (likely(!pmd_trans_huge(*pmd))) | 282 | if (likely(!pmd_trans_huge(*pmd))) |
278 | return follow_page_pte(vma, address, pmd, flags); | 283 | return follow_page_pte(vma, address, pmd, flags); |
279 | 284 | ||
285 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) | ||
286 | return no_page_table(vma, flags); | ||
287 | |||
280 | ptl = pmd_lock(mm, pmd); | 288 | ptl = pmd_lock(mm, pmd); |
281 | if (unlikely(!pmd_trans_huge(*pmd))) { | 289 | if (unlikely(!pmd_trans_huge(*pmd))) { |
282 | spin_unlock(ptl); | 290 | spin_unlock(ptl); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f9ecc2aeadfc..71e3dede95b4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -757,6 +757,60 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
757 | } | 757 | } |
758 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); | 758 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); |
759 | 759 | ||
760 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
761 | static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) | ||
762 | { | ||
763 | if (likely(vma->vm_flags & VM_WRITE)) | ||
764 | pud = pud_mkwrite(pud); | ||
765 | return pud; | ||
766 | } | ||
767 | |||
768 | static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | ||
769 | pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) | ||
770 | { | ||
771 | struct mm_struct *mm = vma->vm_mm; | ||
772 | pud_t entry; | ||
773 | spinlock_t *ptl; | ||
774 | |||
775 | ptl = pud_lock(mm, pud); | ||
776 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); | ||
777 | if (pfn_t_devmap(pfn)) | ||
778 | entry = pud_mkdevmap(entry); | ||
779 | if (write) { | ||
780 | entry = pud_mkyoung(pud_mkdirty(entry)); | ||
781 | entry = maybe_pud_mkwrite(entry, vma); | ||
782 | } | ||
783 | set_pud_at(mm, addr, pud, entry); | ||
784 | update_mmu_cache_pud(vma, addr, pud); | ||
785 | spin_unlock(ptl); | ||
786 | } | ||
787 | |||
788 | int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | ||
789 | pud_t *pud, pfn_t pfn, bool write) | ||
790 | { | ||
791 | pgprot_t pgprot = vma->vm_page_prot; | ||
792 | /* | ||
793 | * If we had pud_special, we could avoid all these restrictions, | ||
794 | * but we need to be consistent with PTEs and architectures that | ||
795 | * can't support a 'special' bit. | ||
796 | */ | ||
797 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); | ||
798 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == | ||
799 | (VM_PFNMAP|VM_MIXEDMAP)); | ||
800 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); | ||
801 | BUG_ON(!pfn_t_devmap(pfn)); | ||
802 | |||
803 | if (addr < vma->vm_start || addr >= vma->vm_end) | ||
804 | return VM_FAULT_SIGBUS; | ||
805 | |||
806 | track_pfn_insert(vma, &pgprot, pfn); | ||
807 | |||
808 | insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); | ||
809 | return VM_FAULT_NOPAGE; | ||
810 | } | ||
811 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); | ||
812 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | ||
813 | |||
760 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, | 814 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
761 | pmd_t *pmd) | 815 | pmd_t *pmd) |
762 | { | 816 | { |
@@ -887,6 +941,123 @@ out: | |||
887 | return ret; | 941 | return ret; |
888 | } | 942 | } |
889 | 943 | ||
944 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
945 | static void touch_pud(struct vm_area_struct *vma, unsigned long addr, | ||
946 | pud_t *pud) | ||
947 | { | ||
948 | pud_t _pud; | ||
949 | |||
950 | /* | ||
951 | * We should set the dirty bit only for FOLL_WRITE but for now | ||
952 | * the dirty bit in the pud is meaningless. And if the dirty | ||
953 | * bit will become meaningful and we'll only set it with | ||
954 | * FOLL_WRITE, an atomic set_bit will be required on the pud to | ||
955 | * set the young bit, instead of the current set_pud_at. | ||
956 | */ | ||
957 | _pud = pud_mkyoung(pud_mkdirty(*pud)); | ||
958 | if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, | ||
959 | pud, _pud, 1)) | ||
960 | update_mmu_cache_pud(vma, addr, pud); | ||
961 | } | ||
962 | |||
963 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | ||
964 | pud_t *pud, int flags) | ||
965 | { | ||
966 | unsigned long pfn = pud_pfn(*pud); | ||
967 | struct mm_struct *mm = vma->vm_mm; | ||
968 | struct dev_pagemap *pgmap; | ||
969 | struct page *page; | ||
970 | |||
971 | assert_spin_locked(pud_lockptr(mm, pud)); | ||
972 | |||
973 | if (flags & FOLL_WRITE && !pud_write(*pud)) | ||
974 | return NULL; | ||
975 | |||
976 | if (pud_present(*pud) && pud_devmap(*pud)) | ||
977 | /* pass */; | ||
978 | else | ||
979 | return NULL; | ||
980 | |||
981 | if (flags & FOLL_TOUCH) | ||
982 | touch_pud(vma, addr, pud); | ||
983 | |||
984 | /* | ||
985 | * device mapped pages can only be returned if the | ||
986 | * caller will manage the page reference count. | ||
987 | */ | ||
988 | if (!(flags & FOLL_GET)) | ||
989 | return ERR_PTR(-EEXIST); | ||
990 | |||
991 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; | ||
992 | pgmap = get_dev_pagemap(pfn, NULL); | ||
993 | if (!pgmap) | ||
994 | return ERR_PTR(-EFAULT); | ||
995 | page = pfn_to_page(pfn); | ||
996 | get_page(page); | ||
997 | put_dev_pagemap(pgmap); | ||
998 | |||
999 | return page; | ||
1000 | } | ||
1001 | |||
1002 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, | ||
1003 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | ||
1004 | struct vm_area_struct *vma) | ||
1005 | { | ||
1006 | spinlock_t *dst_ptl, *src_ptl; | ||
1007 | pud_t pud; | ||
1008 | int ret; | ||
1009 | |||
1010 | dst_ptl = pud_lock(dst_mm, dst_pud); | ||
1011 | src_ptl = pud_lockptr(src_mm, src_pud); | ||
1012 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); | ||
1013 | |||
1014 | ret = -EAGAIN; | ||
1015 | pud = *src_pud; | ||
1016 | if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) | ||
1017 | goto out_unlock; | ||
1018 | |||
1019 | /* | ||
1020 | * When page table lock is held, the huge zero pud should not be | ||
1021 | * under splitting since we don't split the page itself, only pud to | ||
1022 | * a page table. | ||
1023 | */ | ||
1024 | if (is_huge_zero_pud(pud)) { | ||
1025 | /* No huge zero pud yet */ | ||
1026 | } | ||
1027 | |||
1028 | pudp_set_wrprotect(src_mm, addr, src_pud); | ||
1029 | pud = pud_mkold(pud_wrprotect(pud)); | ||
1030 | set_pud_at(dst_mm, addr, dst_pud, pud); | ||
1031 | |||
1032 | ret = 0; | ||
1033 | out_unlock: | ||
1034 | spin_unlock(src_ptl); | ||
1035 | spin_unlock(dst_ptl); | ||
1036 | return ret; | ||
1037 | } | ||
1038 | |||
1039 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | ||
1040 | { | ||
1041 | pud_t entry; | ||
1042 | unsigned long haddr; | ||
1043 | bool write = vmf->flags & FAULT_FLAG_WRITE; | ||
1044 | |||
1045 | vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); | ||
1046 | if (unlikely(!pud_same(*vmf->pud, orig_pud))) | ||
1047 | goto unlock; | ||
1048 | |||
1049 | entry = pud_mkyoung(orig_pud); | ||
1050 | if (write) | ||
1051 | entry = pud_mkdirty(entry); | ||
1052 | haddr = vmf->address & HPAGE_PUD_MASK; | ||
1053 | if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) | ||
1054 | update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); | ||
1055 | |||
1056 | unlock: | ||
1057 | spin_unlock(vmf->ptl); | ||
1058 | } | ||
1059 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | ||
1060 | |||
890 | void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) | 1061 | void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) |
891 | { | 1062 | { |
892 | pmd_t entry; | 1063 | pmd_t entry; |
@@ -1255,7 +1426,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1255 | } | 1426 | } |
1256 | 1427 | ||
1257 | /* See similar comment in do_numa_page for explanation */ | 1428 | /* See similar comment in do_numa_page for explanation */ |
1258 | if (!pmd_write(pmd)) | 1429 | if (!pmd_savedwrite(pmd)) |
1259 | flags |= TNF_NO_GROUP; | 1430 | flags |= TNF_NO_GROUP; |
1260 | 1431 | ||
1261 | /* | 1432 | /* |
@@ -1318,7 +1489,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1318 | goto out; | 1489 | goto out; |
1319 | clear_pmdnuma: | 1490 | clear_pmdnuma: |
1320 | BUG_ON(!PageLocked(page)); | 1491 | BUG_ON(!PageLocked(page)); |
1321 | was_writable = pmd_write(pmd); | 1492 | was_writable = pmd_savedwrite(pmd); |
1322 | pmd = pmd_modify(pmd, vma->vm_page_prot); | 1493 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
1323 | pmd = pmd_mkyoung(pmd); | 1494 | pmd = pmd_mkyoung(pmd); |
1324 | if (was_writable) | 1495 | if (was_writable) |
@@ -1335,7 +1506,7 @@ out: | |||
1335 | 1506 | ||
1336 | if (page_nid != -1) | 1507 | if (page_nid != -1) |
1337 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, | 1508 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, |
1338 | vmf->flags); | 1509 | flags); |
1339 | 1510 | ||
1340 | return 0; | 1511 | return 0; |
1341 | } | 1512 | } |
@@ -1573,7 +1744,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1573 | entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); | 1744 | entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); |
1574 | entry = pmd_modify(entry, newprot); | 1745 | entry = pmd_modify(entry, newprot); |
1575 | if (preserve_write) | 1746 | if (preserve_write) |
1576 | entry = pmd_mkwrite(entry); | 1747 | entry = pmd_mk_savedwrite(entry); |
1577 | ret = HPAGE_PMD_NR; | 1748 | ret = HPAGE_PMD_NR; |
1578 | set_pmd_at(mm, addr, pmd, entry); | 1749 | set_pmd_at(mm, addr, pmd, entry); |
1579 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && | 1750 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && |
@@ -1601,6 +1772,84 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) | |||
1601 | return NULL; | 1772 | return NULL; |
1602 | } | 1773 | } |
1603 | 1774 | ||
1775 | /* | ||
1776 | * Returns true if a given pud maps a thp, false otherwise. | ||
1777 | * | ||
1778 | * Note that if it returns true, this routine returns without unlocking page | ||
1779 | * table lock. So callers must unlock it. | ||
1780 | */ | ||
1781 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) | ||
1782 | { | ||
1783 | spinlock_t *ptl; | ||
1784 | |||
1785 | ptl = pud_lock(vma->vm_mm, pud); | ||
1786 | if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) | ||
1787 | return ptl; | ||
1788 | spin_unlock(ptl); | ||
1789 | return NULL; | ||
1790 | } | ||
1791 | |||
1792 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
1793 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, | ||
1794 | pud_t *pud, unsigned long addr) | ||
1795 | { | ||
1796 | pud_t orig_pud; | ||
1797 | spinlock_t *ptl; | ||
1798 | |||
1799 | ptl = __pud_trans_huge_lock(pud, vma); | ||
1800 | if (!ptl) | ||
1801 | return 0; | ||
1802 | /* | ||
1803 | * For architectures like ppc64 we look at deposited pgtable | ||
1804 | * when calling pudp_huge_get_and_clear. So do the | ||
1805 | * pgtable_trans_huge_withdraw after finishing pudp related | ||
1806 | * operations. | ||
1807 | */ | ||
1808 | orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, | ||
1809 | tlb->fullmm); | ||
1810 | tlb_remove_pud_tlb_entry(tlb, pud, addr); | ||
1811 | if (vma_is_dax(vma)) { | ||
1812 | spin_unlock(ptl); | ||
1813 | /* No zero page support yet */ | ||
1814 | } else { | ||
1815 | /* No support for anonymous PUD pages yet */ | ||
1816 | BUG(); | ||
1817 | } | ||
1818 | return 1; | ||
1819 | } | ||
1820 | |||
1821 | static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, | ||
1822 | unsigned long haddr) | ||
1823 | { | ||
1824 | VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); | ||
1825 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); | ||
1826 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); | ||
1827 | VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); | ||
1828 | |||
1829 | count_vm_event(THP_SPLIT_PMD); | ||
1830 | |||
1831 | pudp_huge_clear_flush_notify(vma, haddr, pud); | ||
1832 | } | ||
1833 | |||
1834 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, | ||
1835 | unsigned long address) | ||
1836 | { | ||
1837 | spinlock_t *ptl; | ||
1838 | struct mm_struct *mm = vma->vm_mm; | ||
1839 | unsigned long haddr = address & HPAGE_PUD_MASK; | ||
1840 | |||
1841 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); | ||
1842 | ptl = pud_lock(mm, pud); | ||
1843 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) | ||
1844 | goto out; | ||
1845 | __split_huge_pud_locked(vma, pud, haddr); | ||
1846 | |||
1847 | out: | ||
1848 | spin_unlock(ptl); | ||
1849 | mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE); | ||
1850 | } | ||
1851 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | ||
1852 | |||
1604 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, | 1853 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, |
1605 | unsigned long haddr, pmd_t *pmd) | 1854 | unsigned long haddr, pmd_t *pmd) |
1606 | { | 1855 | { |
@@ -1857,32 +2106,27 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
1857 | static void freeze_page(struct page *page) | 2106 | static void freeze_page(struct page *page) |
1858 | { | 2107 | { |
1859 | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | | 2108 | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | |
1860 | TTU_RMAP_LOCKED; | 2109 | TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; |
1861 | int i, ret; | 2110 | int ret; |
1862 | 2111 | ||
1863 | VM_BUG_ON_PAGE(!PageHead(page), page); | 2112 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1864 | 2113 | ||
1865 | if (PageAnon(page)) | 2114 | if (PageAnon(page)) |
1866 | ttu_flags |= TTU_MIGRATION; | 2115 | ttu_flags |= TTU_MIGRATION; |
1867 | 2116 | ||
1868 | /* We only need TTU_SPLIT_HUGE_PMD once */ | 2117 | ret = try_to_unmap(page, ttu_flags); |
1869 | ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); | 2118 | VM_BUG_ON_PAGE(ret, page); |
1870 | for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { | ||
1871 | /* Cut short if the page is unmapped */ | ||
1872 | if (page_count(page) == 1) | ||
1873 | return; | ||
1874 | |||
1875 | ret = try_to_unmap(page + i, ttu_flags); | ||
1876 | } | ||
1877 | VM_BUG_ON_PAGE(ret, page + i - 1); | ||
1878 | } | 2119 | } |
1879 | 2120 | ||
1880 | static void unfreeze_page(struct page *page) | 2121 | static void unfreeze_page(struct page *page) |
1881 | { | 2122 | { |
1882 | int i; | 2123 | int i; |
1883 | 2124 | if (PageTransHuge(page)) { | |
1884 | for (i = 0; i < HPAGE_PMD_NR; i++) | 2125 | remove_migration_ptes(page, page, true); |
1885 | remove_migration_ptes(page + i, page + i, true); | 2126 | } else { |
2127 | for (i = 0; i < HPAGE_PMD_NR; i++) | ||
2128 | remove_migration_ptes(page + i, page + i, true); | ||
2129 | } | ||
1886 | } | 2130 | } |
1887 | 2131 | ||
1888 | static void __split_huge_page_tail(struct page *head, int tail, | 2132 | static void __split_huge_page_tail(struct page *head, int tail, |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 30e7709a5121..2e0e8159ce8e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1052,7 +1052,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn, | |||
1052 | unsigned long nr_pages) | 1052 | unsigned long nr_pages) |
1053 | { | 1053 | { |
1054 | unsigned long end_pfn = start_pfn + nr_pages; | 1054 | unsigned long end_pfn = start_pfn + nr_pages; |
1055 | return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | 1055 | return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, |
1056 | GFP_KERNEL); | ||
1056 | } | 1057 | } |
1057 | 1058 | ||
1058 | static bool pfn_range_valid_gigantic(struct zone *z, | 1059 | static bool pfn_range_valid_gigantic(struct zone *z, |
@@ -3142,7 +3143,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) | |||
3142 | * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get | 3143 | * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get |
3143 | * this far. | 3144 | * this far. |
3144 | */ | 3145 | */ |
3145 | static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 3146 | static int hugetlb_vm_op_fault(struct vm_fault *vmf) |
3146 | { | 3147 | { |
3147 | BUG(); | 3148 | BUG(); |
3148 | return 0; | 3149 | return 0; |
diff --git a/mm/internal.h b/mm/internal.h index 8ab72f4374e0..ccfc2a2969f4 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -335,12 +335,15 @@ __vma_address(struct page *page, struct vm_area_struct *vma) | |||
335 | static inline unsigned long | 335 | static inline unsigned long |
336 | vma_address(struct page *page, struct vm_area_struct *vma) | 336 | vma_address(struct page *page, struct vm_area_struct *vma) |
337 | { | 337 | { |
338 | unsigned long address = __vma_address(page, vma); | 338 | unsigned long start, end; |
339 | |||
340 | start = __vma_address(page, vma); | ||
341 | end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); | ||
339 | 342 | ||
340 | /* page should be within @vma mapping range */ | 343 | /* page should be within @vma mapping range */ |
341 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); | 344 | VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
342 | 345 | ||
343 | return address; | 346 | return max(start, vma->vm_start); |
344 | } | 347 | } |
345 | 348 | ||
346 | #else /* !CONFIG_MMU */ | 349 | #else /* !CONFIG_MMU */ |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index b2a0cff2bb35..25f0e6521f36 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -435,7 +435,7 @@ void kasan_cache_shrink(struct kmem_cache *cache) | |||
435 | quarantine_remove_cache(cache); | 435 | quarantine_remove_cache(cache); |
436 | } | 436 | } |
437 | 437 | ||
438 | void kasan_cache_destroy(struct kmem_cache *cache) | 438 | void kasan_cache_shutdown(struct kmem_cache *cache) |
439 | { | 439 | { |
440 | quarantine_remove_cache(cache); | 440 | quarantine_remove_cache(cache); |
441 | } | 441 | } |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index dae929c02bbb..6f1ed1630873 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -274,6 +274,7 @@ static void per_cpu_remove_cache(void *arg) | |||
274 | qlist_free_all(&to_free, cache); | 274 | qlist_free_all(&to_free, cache); |
275 | } | 275 | } |
276 | 276 | ||
277 | /* Free all quarantined objects belonging to cache. */ | ||
277 | void quarantine_remove_cache(struct kmem_cache *cache) | 278 | void quarantine_remove_cache(struct kmem_cache *cache) |
278 | { | 279 | { |
279 | unsigned long flags, i; | 280 | unsigned long flags, i; |
@@ -223,6 +223,12 @@ static unsigned int ksm_thread_pages_to_scan = 100; | |||
223 | /* Milliseconds ksmd should sleep between batches */ | 223 | /* Milliseconds ksmd should sleep between batches */ |
224 | static unsigned int ksm_thread_sleep_millisecs = 20; | 224 | static unsigned int ksm_thread_sleep_millisecs = 20; |
225 | 225 | ||
226 | /* Checksum of an empty (zeroed) page */ | ||
227 | static unsigned int zero_checksum __read_mostly; | ||
228 | |||
229 | /* Whether to merge empty (zeroed) pages with actual zero pages */ | ||
230 | static bool ksm_use_zero_pages __read_mostly; | ||
231 | |||
226 | #ifdef CONFIG_NUMA | 232 | #ifdef CONFIG_NUMA |
227 | /* Zeroed when merging across nodes is not allowed */ | 233 | /* Zeroed when merging across nodes is not allowed */ |
228 | static unsigned int ksm_merge_across_nodes = 1; | 234 | static unsigned int ksm_merge_across_nodes = 1; |
@@ -850,33 +856,36 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
850 | pte_t *orig_pte) | 856 | pte_t *orig_pte) |
851 | { | 857 | { |
852 | struct mm_struct *mm = vma->vm_mm; | 858 | struct mm_struct *mm = vma->vm_mm; |
853 | unsigned long addr; | 859 | struct page_vma_mapped_walk pvmw = { |
854 | pte_t *ptep; | 860 | .page = page, |
855 | spinlock_t *ptl; | 861 | .vma = vma, |
862 | }; | ||
856 | int swapped; | 863 | int swapped; |
857 | int err = -EFAULT; | 864 | int err = -EFAULT; |
858 | unsigned long mmun_start; /* For mmu_notifiers */ | 865 | unsigned long mmun_start; /* For mmu_notifiers */ |
859 | unsigned long mmun_end; /* For mmu_notifiers */ | 866 | unsigned long mmun_end; /* For mmu_notifiers */ |
860 | 867 | ||
861 | addr = page_address_in_vma(page, vma); | 868 | pvmw.address = page_address_in_vma(page, vma); |
862 | if (addr == -EFAULT) | 869 | if (pvmw.address == -EFAULT) |
863 | goto out; | 870 | goto out; |
864 | 871 | ||
865 | BUG_ON(PageTransCompound(page)); | 872 | BUG_ON(PageTransCompound(page)); |
866 | 873 | ||
867 | mmun_start = addr; | 874 | mmun_start = pvmw.address; |
868 | mmun_end = addr + PAGE_SIZE; | 875 | mmun_end = pvmw.address + PAGE_SIZE; |
869 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 876 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
870 | 877 | ||
871 | ptep = page_check_address(page, mm, addr, &ptl, 0); | 878 | if (!page_vma_mapped_walk(&pvmw)) |
872 | if (!ptep) | ||
873 | goto out_mn; | 879 | goto out_mn; |
880 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) | ||
881 | goto out_unlock; | ||
874 | 882 | ||
875 | if (pte_write(*ptep) || pte_dirty(*ptep)) { | 883 | if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || |
884 | (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { | ||
876 | pte_t entry; | 885 | pte_t entry; |
877 | 886 | ||
878 | swapped = PageSwapCache(page); | 887 | swapped = PageSwapCache(page); |
879 | flush_cache_page(vma, addr, page_to_pfn(page)); | 888 | flush_cache_page(vma, pvmw.address, page_to_pfn(page)); |
880 | /* | 889 | /* |
881 | * Ok this is tricky, when get_user_pages_fast() run it doesn't | 890 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
882 | * take any lock, therefore the check that we are going to make | 891 | * take any lock, therefore the check that we are going to make |
@@ -886,25 +895,29 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
886 | * this assure us that no O_DIRECT can happen after the check | 895 | * this assure us that no O_DIRECT can happen after the check |
887 | * or in the middle of the check. | 896 | * or in the middle of the check. |
888 | */ | 897 | */ |
889 | entry = ptep_clear_flush_notify(vma, addr, ptep); | 898 | entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte); |
890 | /* | 899 | /* |
891 | * Check that no O_DIRECT or similar I/O is in progress on the | 900 | * Check that no O_DIRECT or similar I/O is in progress on the |
892 | * page | 901 | * page |
893 | */ | 902 | */ |
894 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { | 903 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
895 | set_pte_at(mm, addr, ptep, entry); | 904 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
896 | goto out_unlock; | 905 | goto out_unlock; |
897 | } | 906 | } |
898 | if (pte_dirty(entry)) | 907 | if (pte_dirty(entry)) |
899 | set_page_dirty(page); | 908 | set_page_dirty(page); |
900 | entry = pte_mkclean(pte_wrprotect(entry)); | 909 | |
901 | set_pte_at_notify(mm, addr, ptep, entry); | 910 | if (pte_protnone(entry)) |
911 | entry = pte_mkclean(pte_clear_savedwrite(entry)); | ||
912 | else | ||
913 | entry = pte_mkclean(pte_wrprotect(entry)); | ||
914 | set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); | ||
902 | } | 915 | } |
903 | *orig_pte = *ptep; | 916 | *orig_pte = *pvmw.pte; |
904 | err = 0; | 917 | err = 0; |
905 | 918 | ||
906 | out_unlock: | 919 | out_unlock: |
907 | pte_unmap_unlock(ptep, ptl); | 920 | page_vma_mapped_walk_done(&pvmw); |
908 | out_mn: | 921 | out_mn: |
909 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 922 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
910 | out: | 923 | out: |
@@ -926,6 +939,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
926 | struct mm_struct *mm = vma->vm_mm; | 939 | struct mm_struct *mm = vma->vm_mm; |
927 | pmd_t *pmd; | 940 | pmd_t *pmd; |
928 | pte_t *ptep; | 941 | pte_t *ptep; |
942 | pte_t newpte; | ||
929 | spinlock_t *ptl; | 943 | spinlock_t *ptl; |
930 | unsigned long addr; | 944 | unsigned long addr; |
931 | int err = -EFAULT; | 945 | int err = -EFAULT; |
@@ -950,12 +964,22 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, | |||
950 | goto out_mn; | 964 | goto out_mn; |
951 | } | 965 | } |
952 | 966 | ||
953 | get_page(kpage); | 967 | /* |
954 | page_add_anon_rmap(kpage, vma, addr, false); | 968 | * No need to check ksm_use_zero_pages here: we can only have a |
969 | * zero_page here if ksm_use_zero_pages was enabled alreaady. | ||
970 | */ | ||
971 | if (!is_zero_pfn(page_to_pfn(kpage))) { | ||
972 | get_page(kpage); | ||
973 | page_add_anon_rmap(kpage, vma, addr, false); | ||
974 | newpte = mk_pte(kpage, vma->vm_page_prot); | ||
975 | } else { | ||
976 | newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), | ||
977 | vma->vm_page_prot)); | ||
978 | } | ||
955 | 979 | ||
956 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | 980 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
957 | ptep_clear_flush_notify(vma, addr, ptep); | 981 | ptep_clear_flush_notify(vma, addr, ptep); |
958 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | 982 | set_pte_at_notify(mm, addr, ptep, newpte); |
959 | 983 | ||
960 | page_remove_rmap(page, false); | 984 | page_remove_rmap(page, false); |
961 | if (!page_mapped(page)) | 985 | if (!page_mapped(page)) |
@@ -1467,6 +1491,23 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |||
1467 | return; | 1491 | return; |
1468 | } | 1492 | } |
1469 | 1493 | ||
1494 | /* | ||
1495 | * Same checksum as an empty page. We attempt to merge it with the | ||
1496 | * appropriate zero page if the user enabled this via sysfs. | ||
1497 | */ | ||
1498 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | ||
1499 | struct vm_area_struct *vma; | ||
1500 | |||
1501 | vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); | ||
1502 | err = try_to_merge_one_page(vma, page, | ||
1503 | ZERO_PAGE(rmap_item->address)); | ||
1504 | /* | ||
1505 | * In case of failure, the page was not really empty, so we | ||
1506 | * need to continue. Otherwise we're done. | ||
1507 | */ | ||
1508 | if (!err) | ||
1509 | return; | ||
1510 | } | ||
1470 | tree_rmap_item = | 1511 | tree_rmap_item = |
1471 | unstable_tree_search_insert(rmap_item, page, &tree_page); | 1512 | unstable_tree_search_insert(rmap_item, page, &tree_page); |
1472 | if (tree_rmap_item) { | 1513 | if (tree_rmap_item) { |
@@ -2233,6 +2274,28 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj, | |||
2233 | KSM_ATTR(merge_across_nodes); | 2274 | KSM_ATTR(merge_across_nodes); |
2234 | #endif | 2275 | #endif |
2235 | 2276 | ||
2277 | static ssize_t use_zero_pages_show(struct kobject *kobj, | ||
2278 | struct kobj_attribute *attr, char *buf) | ||
2279 | { | ||
2280 | return sprintf(buf, "%u\n", ksm_use_zero_pages); | ||
2281 | } | ||
2282 | static ssize_t use_zero_pages_store(struct kobject *kobj, | ||
2283 | struct kobj_attribute *attr, | ||
2284 | const char *buf, size_t count) | ||
2285 | { | ||
2286 | int err; | ||
2287 | bool value; | ||
2288 | |||
2289 | err = kstrtobool(buf, &value); | ||
2290 | if (err) | ||
2291 | return -EINVAL; | ||
2292 | |||
2293 | ksm_use_zero_pages = value; | ||
2294 | |||
2295 | return count; | ||
2296 | } | ||
2297 | KSM_ATTR(use_zero_pages); | ||
2298 | |||
2236 | static ssize_t pages_shared_show(struct kobject *kobj, | 2299 | static ssize_t pages_shared_show(struct kobject *kobj, |
2237 | struct kobj_attribute *attr, char *buf) | 2300 | struct kobj_attribute *attr, char *buf) |
2238 | { | 2301 | { |
@@ -2290,6 +2353,7 @@ static struct attribute *ksm_attrs[] = { | |||
2290 | #ifdef CONFIG_NUMA | 2353 | #ifdef CONFIG_NUMA |
2291 | &merge_across_nodes_attr.attr, | 2354 | &merge_across_nodes_attr.attr, |
2292 | #endif | 2355 | #endif |
2356 | &use_zero_pages_attr.attr, | ||
2293 | NULL, | 2357 | NULL, |
2294 | }; | 2358 | }; |
2295 | 2359 | ||
@@ -2304,6 +2368,11 @@ static int __init ksm_init(void) | |||
2304 | struct task_struct *ksm_thread; | 2368 | struct task_struct *ksm_thread; |
2305 | int err; | 2369 | int err; |
2306 | 2370 | ||
2371 | /* The correct value depends on page size and endianness */ | ||
2372 | zero_checksum = calc_checksum(ZERO_PAGE(0)); | ||
2373 | /* Default to false for backwards compatibility */ | ||
2374 | ksm_use_zero_pages = false; | ||
2375 | |||
2307 | err = ksm_slab_init(); | 2376 | err = ksm_slab_init(); |
2308 | if (err) | 2377 | if (err) |
2309 | goto out; | 2378 | goto out; |
diff --git a/mm/madvise.c b/mm/madvise.c index b530a4986035..dc5927c812d3 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
22 | #include <linux/swap.h> | 22 | #include <linux/swap.h> |
23 | #include <linux/swapops.h> | 23 | #include <linux/swapops.h> |
24 | #include <linux/shmem_fs.h> | ||
24 | #include <linux/mmu_notifier.h> | 25 | #include <linux/mmu_notifier.h> |
25 | 26 | ||
26 | #include <asm/tlb.h> | 27 | #include <asm/tlb.h> |
@@ -92,14 +93,28 @@ static long madvise_behavior(struct vm_area_struct *vma, | |||
92 | case MADV_MERGEABLE: | 93 | case MADV_MERGEABLE: |
93 | case MADV_UNMERGEABLE: | 94 | case MADV_UNMERGEABLE: |
94 | error = ksm_madvise(vma, start, end, behavior, &new_flags); | 95 | error = ksm_madvise(vma, start, end, behavior, &new_flags); |
95 | if (error) | 96 | if (error) { |
97 | /* | ||
98 | * madvise() returns EAGAIN if kernel resources, such as | ||
99 | * slab, are temporarily unavailable. | ||
100 | */ | ||
101 | if (error == -ENOMEM) | ||
102 | error = -EAGAIN; | ||
96 | goto out; | 103 | goto out; |
104 | } | ||
97 | break; | 105 | break; |
98 | case MADV_HUGEPAGE: | 106 | case MADV_HUGEPAGE: |
99 | case MADV_NOHUGEPAGE: | 107 | case MADV_NOHUGEPAGE: |
100 | error = hugepage_madvise(vma, &new_flags, behavior); | 108 | error = hugepage_madvise(vma, &new_flags, behavior); |
101 | if (error) | 109 | if (error) { |
110 | /* | ||
111 | * madvise() returns EAGAIN if kernel resources, such as | ||
112 | * slab, are temporarily unavailable. | ||
113 | */ | ||
114 | if (error == -ENOMEM) | ||
115 | error = -EAGAIN; | ||
102 | goto out; | 116 | goto out; |
117 | } | ||
103 | break; | 118 | break; |
104 | } | 119 | } |
105 | 120 | ||
@@ -120,15 +135,37 @@ static long madvise_behavior(struct vm_area_struct *vma, | |||
120 | *prev = vma; | 135 | *prev = vma; |
121 | 136 | ||
122 | if (start != vma->vm_start) { | 137 | if (start != vma->vm_start) { |
123 | error = split_vma(mm, vma, start, 1); | 138 | if (unlikely(mm->map_count >= sysctl_max_map_count)) { |
124 | if (error) | 139 | error = -ENOMEM; |
125 | goto out; | 140 | goto out; |
141 | } | ||
142 | error = __split_vma(mm, vma, start, 1); | ||
143 | if (error) { | ||
144 | /* | ||
145 | * madvise() returns EAGAIN if kernel resources, such as | ||
146 | * slab, are temporarily unavailable. | ||
147 | */ | ||
148 | if (error == -ENOMEM) | ||
149 | error = -EAGAIN; | ||
150 | goto out; | ||
151 | } | ||
126 | } | 152 | } |
127 | 153 | ||
128 | if (end != vma->vm_end) { | 154 | if (end != vma->vm_end) { |
129 | error = split_vma(mm, vma, end, 0); | 155 | if (unlikely(mm->map_count >= sysctl_max_map_count)) { |
130 | if (error) | 156 | error = -ENOMEM; |
157 | goto out; | ||
158 | } | ||
159 | error = __split_vma(mm, vma, end, 0); | ||
160 | if (error) { | ||
161 | /* | ||
162 | * madvise() returns EAGAIN if kernel resources, such as | ||
163 | * slab, are temporarily unavailable. | ||
164 | */ | ||
165 | if (error == -ENOMEM) | ||
166 | error = -EAGAIN; | ||
131 | goto out; | 167 | goto out; |
168 | } | ||
132 | } | 169 | } |
133 | 170 | ||
134 | success: | 171 | success: |
@@ -136,10 +173,7 @@ success: | |||
136 | * vm_flags is protected by the mmap_sem held in write mode. | 173 | * vm_flags is protected by the mmap_sem held in write mode. |
137 | */ | 174 | */ |
138 | vma->vm_flags = new_flags; | 175 | vma->vm_flags = new_flags; |
139 | |||
140 | out: | 176 | out: |
141 | if (error == -ENOMEM) | ||
142 | error = -EAGAIN; | ||
143 | return error; | 177 | return error; |
144 | } | 178 | } |
145 | 179 | ||
@@ -479,7 +513,7 @@ static long madvise_dontneed(struct vm_area_struct *vma, | |||
479 | if (!can_madv_dontneed_vma(vma)) | 513 | if (!can_madv_dontneed_vma(vma)) |
480 | return -EINVAL; | 514 | return -EINVAL; |
481 | 515 | ||
482 | madvise_userfault_dontneed(vma, prev, start, end); | 516 | userfaultfd_remove(vma, prev, start, end); |
483 | zap_page_range(vma, start, end - start); | 517 | zap_page_range(vma, start, end - start); |
484 | return 0; | 518 | return 0; |
485 | } | 519 | } |
@@ -520,6 +554,7 @@ static long madvise_remove(struct vm_area_struct *vma, | |||
520 | * mmap_sem. | 554 | * mmap_sem. |
521 | */ | 555 | */ |
522 | get_file(f); | 556 | get_file(f); |
557 | userfaultfd_remove(vma, prev, start, end); | ||
523 | up_read(¤t->mm->mmap_sem); | 558 | up_read(¤t->mm->mmap_sem); |
524 | error = vfs_fallocate(f, | 559 | error = vfs_fallocate(f, |
525 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | 560 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
diff --git a/mm/memblock.c b/mm/memblock.c index c004f52be419..b64b47803e52 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -35,15 +35,18 @@ struct memblock memblock __initdata_memblock = { | |||
35 | .memory.regions = memblock_memory_init_regions, | 35 | .memory.regions = memblock_memory_init_regions, |
36 | .memory.cnt = 1, /* empty dummy entry */ | 36 | .memory.cnt = 1, /* empty dummy entry */ |
37 | .memory.max = INIT_MEMBLOCK_REGIONS, | 37 | .memory.max = INIT_MEMBLOCK_REGIONS, |
38 | .memory.name = "memory", | ||
38 | 39 | ||
39 | .reserved.regions = memblock_reserved_init_regions, | 40 | .reserved.regions = memblock_reserved_init_regions, |
40 | .reserved.cnt = 1, /* empty dummy entry */ | 41 | .reserved.cnt = 1, /* empty dummy entry */ |
41 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 42 | .reserved.max = INIT_MEMBLOCK_REGIONS, |
43 | .reserved.name = "reserved", | ||
42 | 44 | ||
43 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 45 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
44 | .physmem.regions = memblock_physmem_init_regions, | 46 | .physmem.regions = memblock_physmem_init_regions, |
45 | .physmem.cnt = 1, /* empty dummy entry */ | 47 | .physmem.cnt = 1, /* empty dummy entry */ |
46 | .physmem.max = INIT_PHYSMEM_REGIONS, | 48 | .physmem.max = INIT_PHYSMEM_REGIONS, |
49 | .physmem.name = "physmem", | ||
47 | #endif | 50 | #endif |
48 | 51 | ||
49 | .bottom_up = false, | 52 | .bottom_up = false, |
@@ -64,18 +67,6 @@ ulong __init_memblock choose_memblock_flags(void) | |||
64 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; | 67 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; |
65 | } | 68 | } |
66 | 69 | ||
67 | /* inline so we don't get a warning when pr_debug is compiled out */ | ||
68 | static __init_memblock const char * | ||
69 | memblock_type_name(struct memblock_type *type) | ||
70 | { | ||
71 | if (type == &memblock.memory) | ||
72 | return "memory"; | ||
73 | else if (type == &memblock.reserved) | ||
74 | return "reserved"; | ||
75 | else | ||
76 | return "unknown"; | ||
77 | } | ||
78 | |||
79 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ | 70 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
80 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) | 71 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
81 | { | 72 | { |
@@ -402,12 +393,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, | |||
402 | } | 393 | } |
403 | if (!addr) { | 394 | if (!addr) { |
404 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | 395 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
405 | memblock_type_name(type), type->max, type->max * 2); | 396 | type->name, type->max, type->max * 2); |
406 | return -1; | 397 | return -1; |
407 | } | 398 | } |
408 | 399 | ||
409 | memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", | 400 | memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", |
410 | memblock_type_name(type), type->max * 2, (u64)addr, | 401 | type->name, type->max * 2, (u64)addr, |
411 | (u64)addr + new_size - 1); | 402 | (u64)addr + new_size - 1); |
412 | 403 | ||
413 | /* | 404 | /* |
@@ -1693,14 +1684,14 @@ phys_addr_t __init_memblock memblock_get_current_limit(void) | |||
1693 | return memblock.current_limit; | 1684 | return memblock.current_limit; |
1694 | } | 1685 | } |
1695 | 1686 | ||
1696 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) | 1687 | static void __init_memblock memblock_dump(struct memblock_type *type) |
1697 | { | 1688 | { |
1698 | phys_addr_t base, end, size; | 1689 | phys_addr_t base, end, size; |
1699 | unsigned long flags; | 1690 | unsigned long flags; |
1700 | int idx; | 1691 | int idx; |
1701 | struct memblock_region *rgn; | 1692 | struct memblock_region *rgn; |
1702 | 1693 | ||
1703 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); | 1694 | pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); |
1704 | 1695 | ||
1705 | for_each_memblock_type(type, rgn) { | 1696 | for_each_memblock_type(type, rgn) { |
1706 | char nid_buf[32] = ""; | 1697 | char nid_buf[32] = ""; |
@@ -1715,7 +1706,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name | |||
1715 | memblock_get_region_node(rgn)); | 1706 | memblock_get_region_node(rgn)); |
1716 | #endif | 1707 | #endif |
1717 | pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n", | 1708 | pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n", |
1718 | name, idx, &base, &end, &size, nid_buf, flags); | 1709 | type->name, idx, &base, &end, &size, nid_buf, flags); |
1719 | } | 1710 | } |
1720 | } | 1711 | } |
1721 | 1712 | ||
@@ -1726,8 +1717,11 @@ void __init_memblock __memblock_dump_all(void) | |||
1726 | &memblock.memory.total_size, | 1717 | &memblock.memory.total_size, |
1727 | &memblock.reserved.total_size); | 1718 | &memblock.reserved.total_size); |
1728 | 1719 | ||
1729 | memblock_dump(&memblock.memory, "memory"); | 1720 | memblock_dump(&memblock.memory); |
1730 | memblock_dump(&memblock.reserved, "reserved"); | 1721 | memblock_dump(&memblock.reserved); |
1722 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | ||
1723 | memblock_dump(&memblock.physmem); | ||
1724 | #endif | ||
1731 | } | 1725 | } |
1732 | 1726 | ||
1733 | void __init memblock_allow_resize(void) | 1727 | void __init memblock_allow_resize(void) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1fd6affcdde7..45867e439d31 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/memcontrol.h> | 35 | #include <linux/memcontrol.h> |
36 | #include <linux/cgroup.h> | 36 | #include <linux/cgroup.h> |
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/shmem_fs.h> | ||
38 | #include <linux/hugetlb.h> | 39 | #include <linux/hugetlb.h> |
39 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
40 | #include <linux/smp.h> | 41 | #include <linux/smp.h> |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f283c7e0a2a3..3d0f2fd4bf73 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1527,7 +1527,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) | |||
1527 | { | 1527 | { |
1528 | int ret = __get_any_page(page, pfn, flags); | 1528 | int ret = __get_any_page(page, pfn, flags); |
1529 | 1529 | ||
1530 | if (ret == 1 && !PageHuge(page) && !PageLRU(page)) { | 1530 | if (ret == 1 && !PageHuge(page) && |
1531 | !PageLRU(page) && !__PageMovable(page)) { | ||
1531 | /* | 1532 | /* |
1532 | * Try to free it. | 1533 | * Try to free it. |
1533 | */ | 1534 | */ |
@@ -1649,7 +1650,10 @@ static int __soft_offline_page(struct page *page, int flags) | |||
1649 | * Try to migrate to a new page instead. migrate.c | 1650 | * Try to migrate to a new page instead. migrate.c |
1650 | * handles a large number of cases for us. | 1651 | * handles a large number of cases for us. |
1651 | */ | 1652 | */ |
1652 | ret = isolate_lru_page(page); | 1653 | if (PageLRU(page)) |
1654 | ret = isolate_lru_page(page); | ||
1655 | else | ||
1656 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | ||
1653 | /* | 1657 | /* |
1654 | * Drop page reference which is came from get_any_page() | 1658 | * Drop page reference which is came from get_any_page() |
1655 | * successful isolate_lru_page() already took another one. | 1659 | * successful isolate_lru_page() already took another one. |
@@ -1657,18 +1661,20 @@ static int __soft_offline_page(struct page *page, int flags) | |||
1657 | put_hwpoison_page(page); | 1661 | put_hwpoison_page(page); |
1658 | if (!ret) { | 1662 | if (!ret) { |
1659 | LIST_HEAD(pagelist); | 1663 | LIST_HEAD(pagelist); |
1660 | inc_node_page_state(page, NR_ISOLATED_ANON + | 1664 | /* |
1661 | page_is_file_cache(page)); | 1665 | * After isolated lru page, the PageLRU will be cleared, |
1666 | * so use !__PageMovable instead for LRU page's mapping | ||
1667 | * cannot have PAGE_MAPPING_MOVABLE. | ||
1668 | */ | ||
1669 | if (!__PageMovable(page)) | ||
1670 | inc_node_page_state(page, NR_ISOLATED_ANON + | ||
1671 | page_is_file_cache(page)); | ||
1662 | list_add(&page->lru, &pagelist); | 1672 | list_add(&page->lru, &pagelist); |
1663 | ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, | 1673 | ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, |
1664 | MIGRATE_SYNC, MR_MEMORY_FAILURE); | 1674 | MIGRATE_SYNC, MR_MEMORY_FAILURE); |
1665 | if (ret) { | 1675 | if (ret) { |
1666 | if (!list_empty(&pagelist)) { | 1676 | if (!list_empty(&pagelist)) |
1667 | list_del(&page->lru); | 1677 | putback_movable_pages(&pagelist); |
1668 | dec_node_page_state(page, NR_ISOLATED_ANON + | ||
1669 | page_is_file_cache(page)); | ||
1670 | putback_lru_page(page); | ||
1671 | } | ||
1672 | 1678 | ||
1673 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", | 1679 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", |
1674 | pfn, ret, page->flags); | 1680 | pfn, ret, page->flags); |
diff --git a/mm/memory.c b/mm/memory.c index 7663068a33c6..14fc0b40f0bb 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | /* | 31 | /* |
32 | * 05.04.94 - Multi-page memory management added for v1.1. | 32 | * 05.04.94 - Multi-page memory management added for v1.1. |
33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) | 33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) |
34 | * | 34 | * |
35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG | 35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG |
36 | * (Gerhard.Wichert@pdb.siemens.de) | 36 | * (Gerhard.Wichert@pdb.siemens.de) |
@@ -82,9 +82,9 @@ | |||
82 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 82 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
83 | /* use the per-pgdat data instead for discontigmem - mbligh */ | 83 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
84 | unsigned long max_mapnr; | 84 | unsigned long max_mapnr; |
85 | struct page *mem_map; | ||
86 | |||
87 | EXPORT_SYMBOL(max_mapnr); | 85 | EXPORT_SYMBOL(max_mapnr); |
86 | |||
87 | struct page *mem_map; | ||
88 | EXPORT_SYMBOL(mem_map); | 88 | EXPORT_SYMBOL(mem_map); |
89 | #endif | 89 | #endif |
90 | 90 | ||
@@ -95,8 +95,7 @@ EXPORT_SYMBOL(mem_map); | |||
95 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL | 95 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL |
96 | * and ZONE_HIGHMEM. | 96 | * and ZONE_HIGHMEM. |
97 | */ | 97 | */ |
98 | void * high_memory; | 98 | void *high_memory; |
99 | |||
100 | EXPORT_SYMBOL(high_memory); | 99 | EXPORT_SYMBOL(high_memory); |
101 | 100 | ||
102 | /* | 101 | /* |
@@ -120,10 +119,10 @@ static int __init disable_randmaps(char *s) | |||
120 | __setup("norandmaps", disable_randmaps); | 119 | __setup("norandmaps", disable_randmaps); |
121 | 120 | ||
122 | unsigned long zero_pfn __read_mostly; | 121 | unsigned long zero_pfn __read_mostly; |
123 | unsigned long highest_memmap_pfn __read_mostly; | ||
124 | |||
125 | EXPORT_SYMBOL(zero_pfn); | 122 | EXPORT_SYMBOL(zero_pfn); |
126 | 123 | ||
124 | unsigned long highest_memmap_pfn __read_mostly; | ||
125 | |||
127 | /* | 126 | /* |
128 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 127 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
129 | */ | 128 | */ |
@@ -556,7 +555,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
556 | 555 | ||
557 | if (is_vm_hugetlb_page(vma)) { | 556 | if (is_vm_hugetlb_page(vma)) { |
558 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, | 557 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, |
559 | floor, next? next->vm_start: ceiling); | 558 | floor, next ? next->vm_start : ceiling); |
560 | } else { | 559 | } else { |
561 | /* | 560 | /* |
562 | * Optimization: gather nearby vmas into one call down | 561 | * Optimization: gather nearby vmas into one call down |
@@ -569,7 +568,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
569 | unlink_file_vma(vma); | 568 | unlink_file_vma(vma); |
570 | } | 569 | } |
571 | free_pgd_range(tlb, addr, vma->vm_end, | 570 | free_pgd_range(tlb, addr, vma->vm_end, |
572 | floor, next? next->vm_start: ceiling); | 571 | floor, next ? next->vm_start : ceiling); |
573 | } | 572 | } |
574 | vma = next; | 573 | vma = next; |
575 | } | 574 | } |
@@ -1001,7 +1000,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src | |||
1001 | next = pmd_addr_end(addr, end); | 1000 | next = pmd_addr_end(addr, end); |
1002 | if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { | 1001 | if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { |
1003 | int err; | 1002 | int err; |
1004 | VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); | 1003 | VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma); |
1005 | err = copy_huge_pmd(dst_mm, src_mm, | 1004 | err = copy_huge_pmd(dst_mm, src_mm, |
1006 | dst_pmd, src_pmd, addr, vma); | 1005 | dst_pmd, src_pmd, addr, vma); |
1007 | if (err == -ENOMEM) | 1006 | if (err == -ENOMEM) |
@@ -1032,6 +1031,18 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src | |||
1032 | src_pud = pud_offset(src_pgd, addr); | 1031 | src_pud = pud_offset(src_pgd, addr); |
1033 | do { | 1032 | do { |
1034 | next = pud_addr_end(addr, end); | 1033 | next = pud_addr_end(addr, end); |
1034 | if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { | ||
1035 | int err; | ||
1036 | |||
1037 | VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma); | ||
1038 | err = copy_huge_pud(dst_mm, src_mm, | ||
1039 | dst_pud, src_pud, addr, vma); | ||
1040 | if (err == -ENOMEM) | ||
1041 | return -ENOMEM; | ||
1042 | if (!err) | ||
1043 | continue; | ||
1044 | /* fall through */ | ||
1045 | } | ||
1035 | if (pud_none_or_clear_bad(src_pud)) | 1046 | if (pud_none_or_clear_bad(src_pud)) |
1036 | continue; | 1047 | continue; |
1037 | if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, | 1048 | if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, |
@@ -1129,9 +1140,8 @@ again: | |||
1129 | arch_enter_lazy_mmu_mode(); | 1140 | arch_enter_lazy_mmu_mode(); |
1130 | do { | 1141 | do { |
1131 | pte_t ptent = *pte; | 1142 | pte_t ptent = *pte; |
1132 | if (pte_none(ptent)) { | 1143 | if (pte_none(ptent)) |
1133 | continue; | 1144 | continue; |
1134 | } | ||
1135 | 1145 | ||
1136 | if (pte_present(ptent)) { | 1146 | if (pte_present(ptent)) { |
1137 | struct page *page; | 1147 | struct page *page; |
@@ -1263,9 +1273,19 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, | |||
1263 | pud = pud_offset(pgd, addr); | 1273 | pud = pud_offset(pgd, addr); |
1264 | do { | 1274 | do { |
1265 | next = pud_addr_end(addr, end); | 1275 | next = pud_addr_end(addr, end); |
1276 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) { | ||
1277 | if (next - addr != HPAGE_PUD_SIZE) { | ||
1278 | VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); | ||
1279 | split_huge_pud(vma, pud, addr); | ||
1280 | } else if (zap_huge_pud(tlb, vma, pud, addr)) | ||
1281 | goto next; | ||
1282 | /* fall through */ | ||
1283 | } | ||
1266 | if (pud_none_or_clear_bad(pud)) | 1284 | if (pud_none_or_clear_bad(pud)) |
1267 | continue; | 1285 | continue; |
1268 | next = zap_pmd_range(tlb, vma, pud, addr, next, details); | 1286 | next = zap_pmd_range(tlb, vma, pud, addr, next, details); |
1287 | next: | ||
1288 | cond_resched(); | ||
1269 | } while (pud++, addr = next, addr != end); | 1289 | } while (pud++, addr = next, addr != end); |
1270 | 1290 | ||
1271 | return addr; | 1291 | return addr; |
@@ -1441,10 +1461,10 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes); | |||
1441 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, | 1461 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
1442 | spinlock_t **ptl) | 1462 | spinlock_t **ptl) |
1443 | { | 1463 | { |
1444 | pgd_t * pgd = pgd_offset(mm, addr); | 1464 | pgd_t *pgd = pgd_offset(mm, addr); |
1445 | pud_t * pud = pud_alloc(mm, pgd, addr); | 1465 | pud_t *pud = pud_alloc(mm, pgd, addr); |
1446 | if (pud) { | 1466 | if (pud) { |
1447 | pmd_t * pmd = pmd_alloc(mm, pud, addr); | 1467 | pmd_t *pmd = pmd_alloc(mm, pud, addr); |
1448 | if (pmd) { | 1468 | if (pmd) { |
1449 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 1469 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
1450 | return pte_alloc_map_lock(mm, pmd, addr, ptl); | 1470 | return pte_alloc_map_lock(mm, pmd, addr, ptl); |
@@ -2035,7 +2055,7 @@ static int do_page_mkwrite(struct vm_fault *vmf) | |||
2035 | 2055 | ||
2036 | vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | 2056 | vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; |
2037 | 2057 | ||
2038 | ret = vmf->vma->vm_ops->page_mkwrite(vmf->vma, vmf); | 2058 | ret = vmf->vma->vm_ops->page_mkwrite(vmf); |
2039 | /* Restore original flags so that caller is not surprised */ | 2059 | /* Restore original flags so that caller is not surprised */ |
2040 | vmf->flags = old_flags; | 2060 | vmf->flags = old_flags; |
2041 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | 2061 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
@@ -2307,7 +2327,7 @@ static int wp_pfn_shared(struct vm_fault *vmf) | |||
2307 | 2327 | ||
2308 | pte_unmap_unlock(vmf->pte, vmf->ptl); | 2328 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
2309 | vmf->flags |= FAULT_FLAG_MKWRITE; | 2329 | vmf->flags |= FAULT_FLAG_MKWRITE; |
2310 | ret = vma->vm_ops->pfn_mkwrite(vma, vmf); | 2330 | ret = vma->vm_ops->pfn_mkwrite(vmf); |
2311 | if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) | 2331 | if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) |
2312 | return ret; | 2332 | return ret; |
2313 | return finish_mkwrite_fault(vmf); | 2333 | return finish_mkwrite_fault(vmf); |
@@ -2503,7 +2523,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2503 | hlen = ULONG_MAX - hba + 1; | 2523 | hlen = ULONG_MAX - hba + 1; |
2504 | } | 2524 | } |
2505 | 2525 | ||
2506 | details.check_mapping = even_cows? NULL: mapping; | 2526 | details.check_mapping = even_cows ? NULL : mapping; |
2507 | details.first_index = hba; | 2527 | details.first_index = hba; |
2508 | details.last_index = hba + hlen - 1; | 2528 | details.last_index = hba + hlen - 1; |
2509 | if (details.last_index < details.first_index) | 2529 | if (details.last_index < details.first_index) |
@@ -2861,7 +2881,7 @@ static int __do_fault(struct vm_fault *vmf) | |||
2861 | struct vm_area_struct *vma = vmf->vma; | 2881 | struct vm_area_struct *vma = vmf->vma; |
2862 | int ret; | 2882 | int ret; |
2863 | 2883 | ||
2864 | ret = vma->vm_ops->fault(vma, vmf); | 2884 | ret = vma->vm_ops->fault(vmf); |
2865 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | | 2885 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | |
2866 | VM_FAULT_DONE_COW))) | 2886 | VM_FAULT_DONE_COW))) |
2867 | return ret; | 2887 | return ret; |
@@ -2898,7 +2918,7 @@ static int pte_alloc_one_map(struct vm_fault *vmf) | |||
2898 | atomic_long_inc(&vma->vm_mm->nr_ptes); | 2918 | atomic_long_inc(&vma->vm_mm->nr_ptes); |
2899 | pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); | 2919 | pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); |
2900 | spin_unlock(vmf->ptl); | 2920 | spin_unlock(vmf->ptl); |
2901 | vmf->prealloc_pte = 0; | 2921 | vmf->prealloc_pte = NULL; |
2902 | } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { | 2922 | } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { |
2903 | return VM_FAULT_OOM; | 2923 | return VM_FAULT_OOM; |
2904 | } | 2924 | } |
@@ -2946,7 +2966,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) | |||
2946 | * count that as nr_ptes. | 2966 | * count that as nr_ptes. |
2947 | */ | 2967 | */ |
2948 | atomic_long_inc(&vma->vm_mm->nr_ptes); | 2968 | atomic_long_inc(&vma->vm_mm->nr_ptes); |
2949 | vmf->prealloc_pte = 0; | 2969 | vmf->prealloc_pte = NULL; |
2950 | } | 2970 | } |
2951 | 2971 | ||
2952 | static int do_set_pmd(struct vm_fault *vmf, struct page *page) | 2972 | static int do_set_pmd(struct vm_fault *vmf, struct page *page) |
@@ -3352,7 +3372,7 @@ static int do_fault(struct vm_fault *vmf) | |||
3352 | /* preallocated pagetable is unused: free it */ | 3372 | /* preallocated pagetable is unused: free it */ |
3353 | if (vmf->prealloc_pte) { | 3373 | if (vmf->prealloc_pte) { |
3354 | pte_free(vma->vm_mm, vmf->prealloc_pte); | 3374 | pte_free(vma->vm_mm, vmf->prealloc_pte); |
3355 | vmf->prealloc_pte = 0; | 3375 | vmf->prealloc_pte = NULL; |
3356 | } | 3376 | } |
3357 | return ret; | 3377 | return ret; |
3358 | } | 3378 | } |
@@ -3380,32 +3400,32 @@ static int do_numa_page(struct vm_fault *vmf) | |||
3380 | int last_cpupid; | 3400 | int last_cpupid; |
3381 | int target_nid; | 3401 | int target_nid; |
3382 | bool migrated = false; | 3402 | bool migrated = false; |
3383 | pte_t pte = vmf->orig_pte; | 3403 | pte_t pte; |
3384 | bool was_writable = pte_write(pte); | 3404 | bool was_writable = pte_savedwrite(vmf->orig_pte); |
3385 | int flags = 0; | 3405 | int flags = 0; |
3386 | 3406 | ||
3387 | /* | 3407 | /* |
3388 | * The "pte" at this point cannot be used safely without | 3408 | * The "pte" at this point cannot be used safely without |
3389 | * validation through pte_unmap_same(). It's of NUMA type but | 3409 | * validation through pte_unmap_same(). It's of NUMA type but |
3390 | * the pfn may be screwed if the read is non atomic. | 3410 | * the pfn may be screwed if the read is non atomic. |
3391 | * | 3411 | */ |
3392 | * We can safely just do a "set_pte_at()", because the old | ||
3393 | * page table entry is not accessible, so there would be no | ||
3394 | * concurrent hardware modifications to the PTE. | ||
3395 | */ | ||
3396 | vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); | 3412 | vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); |
3397 | spin_lock(vmf->ptl); | 3413 | spin_lock(vmf->ptl); |
3398 | if (unlikely(!pte_same(*vmf->pte, pte))) { | 3414 | if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { |
3399 | pte_unmap_unlock(vmf->pte, vmf->ptl); | 3415 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
3400 | goto out; | 3416 | goto out; |
3401 | } | 3417 | } |
3402 | 3418 | ||
3403 | /* Make it present again */ | 3419 | /* |
3420 | * Make it present again, Depending on how arch implementes non | ||
3421 | * accessible ptes, some can allow access by kernel mode. | ||
3422 | */ | ||
3423 | pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); | ||
3404 | pte = pte_modify(pte, vma->vm_page_prot); | 3424 | pte = pte_modify(pte, vma->vm_page_prot); |
3405 | pte = pte_mkyoung(pte); | 3425 | pte = pte_mkyoung(pte); |
3406 | if (was_writable) | 3426 | if (was_writable) |
3407 | pte = pte_mkwrite(pte); | 3427 | pte = pte_mkwrite(pte); |
3408 | set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); | 3428 | ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); |
3409 | update_mmu_cache(vma, vmf->address, vmf->pte); | 3429 | update_mmu_cache(vma, vmf->address, vmf->pte); |
3410 | 3430 | ||
3411 | page = vm_normal_page(vma, vmf->address, pte); | 3431 | page = vm_normal_page(vma, vmf->address, pte); |
@@ -3466,8 +3486,8 @@ static int create_huge_pmd(struct vm_fault *vmf) | |||
3466 | { | 3486 | { |
3467 | if (vma_is_anonymous(vmf->vma)) | 3487 | if (vma_is_anonymous(vmf->vma)) |
3468 | return do_huge_pmd_anonymous_page(vmf); | 3488 | return do_huge_pmd_anonymous_page(vmf); |
3469 | if (vmf->vma->vm_ops->pmd_fault) | 3489 | if (vmf->vma->vm_ops->huge_fault) |
3470 | return vmf->vma->vm_ops->pmd_fault(vmf); | 3490 | return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); |
3471 | return VM_FAULT_FALLBACK; | 3491 | return VM_FAULT_FALLBACK; |
3472 | } | 3492 | } |
3473 | 3493 | ||
@@ -3475,8 +3495,8 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) | |||
3475 | { | 3495 | { |
3476 | if (vma_is_anonymous(vmf->vma)) | 3496 | if (vma_is_anonymous(vmf->vma)) |
3477 | return do_huge_pmd_wp_page(vmf, orig_pmd); | 3497 | return do_huge_pmd_wp_page(vmf, orig_pmd); |
3478 | if (vmf->vma->vm_ops->pmd_fault) | 3498 | if (vmf->vma->vm_ops->huge_fault) |
3479 | return vmf->vma->vm_ops->pmd_fault(vmf); | 3499 | return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); |
3480 | 3500 | ||
3481 | /* COW handled on pte level: split pmd */ | 3501 | /* COW handled on pte level: split pmd */ |
3482 | VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); | 3502 | VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); |
@@ -3490,6 +3510,30 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) | |||
3490 | return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); | 3510 | return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); |
3491 | } | 3511 | } |
3492 | 3512 | ||
3513 | static int create_huge_pud(struct vm_fault *vmf) | ||
3514 | { | ||
3515 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
3516 | /* No support for anonymous transparent PUD pages yet */ | ||
3517 | if (vma_is_anonymous(vmf->vma)) | ||
3518 | return VM_FAULT_FALLBACK; | ||
3519 | if (vmf->vma->vm_ops->huge_fault) | ||
3520 | return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); | ||
3521 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
3522 | return VM_FAULT_FALLBACK; | ||
3523 | } | ||
3524 | |||
3525 | static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) | ||
3526 | { | ||
3527 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
3528 | /* No support for anonymous transparent PUD pages yet */ | ||
3529 | if (vma_is_anonymous(vmf->vma)) | ||
3530 | return VM_FAULT_FALLBACK; | ||
3531 | if (vmf->vma->vm_ops->huge_fault) | ||
3532 | return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); | ||
3533 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
3534 | return VM_FAULT_FALLBACK; | ||
3535 | } | ||
3536 | |||
3493 | /* | 3537 | /* |
3494 | * These routines also need to handle stuff like marking pages dirty | 3538 | * These routines also need to handle stuff like marking pages dirty |
3495 | * and/or accessed for architectures that don't do it in hardware (most | 3539 | * and/or accessed for architectures that don't do it in hardware (most |
@@ -3605,22 +3649,46 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
3605 | }; | 3649 | }; |
3606 | struct mm_struct *mm = vma->vm_mm; | 3650 | struct mm_struct *mm = vma->vm_mm; |
3607 | pgd_t *pgd; | 3651 | pgd_t *pgd; |
3608 | pud_t *pud; | 3652 | int ret; |
3609 | 3653 | ||
3610 | pgd = pgd_offset(mm, address); | 3654 | pgd = pgd_offset(mm, address); |
3611 | pud = pud_alloc(mm, pgd, address); | 3655 | |
3612 | if (!pud) | 3656 | vmf.pud = pud_alloc(mm, pgd, address); |
3657 | if (!vmf.pud) | ||
3613 | return VM_FAULT_OOM; | 3658 | return VM_FAULT_OOM; |
3614 | vmf.pmd = pmd_alloc(mm, pud, address); | 3659 | if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { |
3660 | ret = create_huge_pud(&vmf); | ||
3661 | if (!(ret & VM_FAULT_FALLBACK)) | ||
3662 | return ret; | ||
3663 | } else { | ||
3664 | pud_t orig_pud = *vmf.pud; | ||
3665 | |||
3666 | barrier(); | ||
3667 | if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { | ||
3668 | unsigned int dirty = flags & FAULT_FLAG_WRITE; | ||
3669 | |||
3670 | /* NUMA case for anonymous PUDs would go here */ | ||
3671 | |||
3672 | if (dirty && !pud_write(orig_pud)) { | ||
3673 | ret = wp_huge_pud(&vmf, orig_pud); | ||
3674 | if (!(ret & VM_FAULT_FALLBACK)) | ||
3675 | return ret; | ||
3676 | } else { | ||
3677 | huge_pud_set_accessed(&vmf, orig_pud); | ||
3678 | return 0; | ||
3679 | } | ||
3680 | } | ||
3681 | } | ||
3682 | |||
3683 | vmf.pmd = pmd_alloc(mm, vmf.pud, address); | ||
3615 | if (!vmf.pmd) | 3684 | if (!vmf.pmd) |
3616 | return VM_FAULT_OOM; | 3685 | return VM_FAULT_OOM; |
3617 | if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { | 3686 | if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { |
3618 | int ret = create_huge_pmd(&vmf); | 3687 | ret = create_huge_pmd(&vmf); |
3619 | if (!(ret & VM_FAULT_FALLBACK)) | 3688 | if (!(ret & VM_FAULT_FALLBACK)) |
3620 | return ret; | 3689 | return ret; |
3621 | } else { | 3690 | } else { |
3622 | pmd_t orig_pmd = *vmf.pmd; | 3691 | pmd_t orig_pmd = *vmf.pmd; |
3623 | int ret; | ||
3624 | 3692 | ||
3625 | barrier(); | 3693 | barrier(); |
3626 | if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { | 3694 | if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { |
@@ -3680,14 +3748,14 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
3680 | 3748 | ||
3681 | if (flags & FAULT_FLAG_USER) { | 3749 | if (flags & FAULT_FLAG_USER) { |
3682 | mem_cgroup_oom_disable(); | 3750 | mem_cgroup_oom_disable(); |
3683 | /* | 3751 | /* |
3684 | * The task may have entered a memcg OOM situation but | 3752 | * The task may have entered a memcg OOM situation but |
3685 | * if the allocation error was handled gracefully (no | 3753 | * if the allocation error was handled gracefully (no |
3686 | * VM_FAULT_OOM), there is no need to kill anything. | 3754 | * VM_FAULT_OOM), there is no need to kill anything. |
3687 | * Just clean up the OOM state peacefully. | 3755 | * Just clean up the OOM state peacefully. |
3688 | */ | 3756 | */ |
3689 | if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) | 3757 | if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) |
3690 | mem_cgroup_oom_synchronize(false); | 3758 | mem_cgroup_oom_synchronize(false); |
3691 | } | 3759 | } |
3692 | 3760 | ||
3693 | /* | 3761 | /* |
@@ -3737,13 +3805,14 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |||
3737 | */ | 3805 | */ |
3738 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | 3806 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
3739 | { | 3807 | { |
3808 | spinlock_t *ptl; | ||
3740 | pmd_t *new = pmd_alloc_one(mm, address); | 3809 | pmd_t *new = pmd_alloc_one(mm, address); |
3741 | if (!new) | 3810 | if (!new) |
3742 | return -ENOMEM; | 3811 | return -ENOMEM; |
3743 | 3812 | ||
3744 | smp_wmb(); /* See comment in __pte_alloc */ | 3813 | smp_wmb(); /* See comment in __pte_alloc */ |
3745 | 3814 | ||
3746 | spin_lock(&mm->page_table_lock); | 3815 | ptl = pud_lock(mm, pud); |
3747 | #ifndef __ARCH_HAS_4LEVEL_HACK | 3816 | #ifndef __ARCH_HAS_4LEVEL_HACK |
3748 | if (!pud_present(*pud)) { | 3817 | if (!pud_present(*pud)) { |
3749 | mm_inc_nr_pmds(mm); | 3818 | mm_inc_nr_pmds(mm); |
@@ -3757,7 +3826,7 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |||
3757 | } else /* Another has populated it */ | 3826 | } else /* Another has populated it */ |
3758 | pmd_free(mm, new); | 3827 | pmd_free(mm, new); |
3759 | #endif /* __ARCH_HAS_4LEVEL_HACK */ | 3828 | #endif /* __ARCH_HAS_4LEVEL_HACK */ |
3760 | spin_unlock(&mm->page_table_lock); | 3829 | spin_unlock(ptl); |
3761 | return 0; | 3830 | return 0; |
3762 | } | 3831 | } |
3763 | #endif /* __PAGETABLE_PMD_FOLDED */ | 3832 | #endif /* __PAGETABLE_PMD_FOLDED */ |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index d67787d10ff0..1d3ed58f92ab 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -126,6 +126,8 @@ void put_online_mems(void) | |||
126 | 126 | ||
127 | void mem_hotplug_begin(void) | 127 | void mem_hotplug_begin(void) |
128 | { | 128 | { |
129 | assert_held_device_hotplug(); | ||
130 | |||
129 | mem_hotplug.active_writer = current; | 131 | mem_hotplug.active_writer = current; |
130 | 132 | ||
131 | memhp_lock_acquire(); | 133 | memhp_lock_acquire(); |
@@ -862,7 +864,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
862 | 864 | ||
863 | return ret; | 865 | return ret; |
864 | } | 866 | } |
865 | EXPORT_SYMBOL_GPL(__remove_pages); | ||
866 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 867 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
867 | 868 | ||
868 | int set_online_page_callback(online_page_callback_t callback) | 869 | int set_online_page_callback(online_page_callback_t callback) |
@@ -1336,7 +1337,7 @@ int zone_for_memory(int nid, u64 start, u64 size, int zone_default, | |||
1336 | 1337 | ||
1337 | static int online_memory_block(struct memory_block *mem, void *arg) | 1338 | static int online_memory_block(struct memory_block *mem, void *arg) |
1338 | { | 1339 | { |
1339 | return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); | 1340 | return device_online(&mem->dev); |
1340 | } | 1341 | } |
1341 | 1342 | ||
1342 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | 1343 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
@@ -1508,7 +1509,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
1508 | while ((i < MAX_ORDER_NR_PAGES) && | 1509 | while ((i < MAX_ORDER_NR_PAGES) && |
1509 | !pfn_valid_within(pfn + i)) | 1510 | !pfn_valid_within(pfn + i)) |
1510 | i++; | 1511 | i++; |
1511 | if (i == MAX_ORDER_NR_PAGES) | 1512 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) |
1512 | continue; | 1513 | continue; |
1513 | page = pfn_to_page(pfn + i); | 1514 | page = pfn_to_page(pfn + i); |
1514 | if (zone && page_zone(page) != zone) | 1515 | if (zone && page_zone(page) != zone) |
@@ -1522,7 +1523,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
1522 | 1523 | ||
1523 | if (zone) { | 1524 | if (zone) { |
1524 | *valid_start = start; | 1525 | *valid_start = start; |
1525 | *valid_end = end; | 1526 | *valid_end = min(end, end_pfn); |
1526 | return 1; | 1527 | return 1; |
1527 | } else { | 1528 | } else { |
1528 | return 0; | 1529 | return 0; |
@@ -1530,10 +1531,10 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
1530 | } | 1531 | } |
1531 | 1532 | ||
1532 | /* | 1533 | /* |
1533 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages | 1534 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
1534 | * and hugepages). We scan pfn because it's much easier than scanning over | 1535 | * non-lru movable pages and hugepages). We scan pfn because it's much |
1535 | * linked list. This function returns the pfn of the first found movable | 1536 | * easier than scanning over linked list. This function returns the pfn |
1536 | * page if it's found, otherwise 0. | 1537 | * of the first found movable page if it's found, otherwise 0. |
1537 | */ | 1538 | */ |
1538 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | 1539 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) |
1539 | { | 1540 | { |
@@ -1544,6 +1545,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | |||
1544 | page = pfn_to_page(pfn); | 1545 | page = pfn_to_page(pfn); |
1545 | if (PageLRU(page)) | 1546 | if (PageLRU(page)) |
1546 | return pfn; | 1547 | return pfn; |
1548 | if (__PageMovable(page)) | ||
1549 | return pfn; | ||
1547 | if (PageHuge(page)) { | 1550 | if (PageHuge(page)) { |
1548 | if (page_huge_active(page)) | 1551 | if (page_huge_active(page)) |
1549 | return pfn; | 1552 | return pfn; |
@@ -1620,21 +1623,25 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1620 | if (!get_page_unless_zero(page)) | 1623 | if (!get_page_unless_zero(page)) |
1621 | continue; | 1624 | continue; |
1622 | /* | 1625 | /* |
1623 | * We can skip free pages. And we can only deal with pages on | 1626 | * We can skip free pages. And we can deal with pages on |
1624 | * LRU. | 1627 | * LRU and non-lru movable pages. |
1625 | */ | 1628 | */ |
1626 | ret = isolate_lru_page(page); | 1629 | if (PageLRU(page)) |
1630 | ret = isolate_lru_page(page); | ||
1631 | else | ||
1632 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | ||
1627 | if (!ret) { /* Success */ | 1633 | if (!ret) { /* Success */ |
1628 | put_page(page); | 1634 | put_page(page); |
1629 | list_add_tail(&page->lru, &source); | 1635 | list_add_tail(&page->lru, &source); |
1630 | move_pages--; | 1636 | move_pages--; |
1631 | inc_node_page_state(page, NR_ISOLATED_ANON + | 1637 | if (!__PageMovable(page)) |
1632 | page_is_file_cache(page)); | 1638 | inc_node_page_state(page, NR_ISOLATED_ANON + |
1639 | page_is_file_cache(page)); | ||
1633 | 1640 | ||
1634 | } else { | 1641 | } else { |
1635 | #ifdef CONFIG_DEBUG_VM | 1642 | #ifdef CONFIG_DEBUG_VM |
1636 | pr_alert("removing pfn %lx from LRU failed\n", pfn); | 1643 | pr_alert("failed to isolate pfn %lx\n", pfn); |
1637 | dump_page(page, "failed to remove from LRU"); | 1644 | dump_page(page, "isolation failed"); |
1638 | #endif | 1645 | #endif |
1639 | put_page(page); | 1646 | put_page(page); |
1640 | /* Because we don't have big zone->lock. we should | 1647 | /* Because we don't have big zone->lock. we should |
diff --git a/mm/migrate.c b/mm/migrate.c index 87f4d0f81819..2c63ac06791b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -74,7 +74,7 @@ int migrate_prep_local(void) | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | bool isolate_movable_page(struct page *page, isolate_mode_t mode) | 77 | int isolate_movable_page(struct page *page, isolate_mode_t mode) |
78 | { | 78 | { |
79 | struct address_space *mapping; | 79 | struct address_space *mapping; |
80 | 80 | ||
@@ -125,14 +125,14 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) | |||
125 | __SetPageIsolated(page); | 125 | __SetPageIsolated(page); |
126 | unlock_page(page); | 126 | unlock_page(page); |
127 | 127 | ||
128 | return true; | 128 | return 0; |
129 | 129 | ||
130 | out_no_isolated: | 130 | out_no_isolated: |
131 | unlock_page(page); | 131 | unlock_page(page); |
132 | out_putpage: | 132 | out_putpage: |
133 | put_page(page); | 133 | put_page(page); |
134 | out: | 134 | out: |
135 | return false; | 135 | return -EBUSY; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* It should be called on page which is PG_movable */ | 138 | /* It should be called on page which is PG_movable */ |
@@ -193,82 +193,62 @@ void putback_movable_pages(struct list_head *l) | |||
193 | /* | 193 | /* |
194 | * Restore a potential migration pte to a working pte entry | 194 | * Restore a potential migration pte to a working pte entry |
195 | */ | 195 | */ |
196 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | 196 | static int remove_migration_pte(struct page *page, struct vm_area_struct *vma, |
197 | unsigned long addr, void *old) | 197 | unsigned long addr, void *old) |
198 | { | 198 | { |
199 | struct mm_struct *mm = vma->vm_mm; | 199 | struct page_vma_mapped_walk pvmw = { |
200 | .page = old, | ||
201 | .vma = vma, | ||
202 | .address = addr, | ||
203 | .flags = PVMW_SYNC | PVMW_MIGRATION, | ||
204 | }; | ||
205 | struct page *new; | ||
206 | pte_t pte; | ||
200 | swp_entry_t entry; | 207 | swp_entry_t entry; |
201 | pmd_t *pmd; | ||
202 | pte_t *ptep, pte; | ||
203 | spinlock_t *ptl; | ||
204 | 208 | ||
205 | if (unlikely(PageHuge(new))) { | 209 | VM_BUG_ON_PAGE(PageTail(page), page); |
206 | ptep = huge_pte_offset(mm, addr); | 210 | while (page_vma_mapped_walk(&pvmw)) { |
207 | if (!ptep) | 211 | new = page - pvmw.page->index + |
208 | goto out; | 212 | linear_page_index(vma, pvmw.address); |
209 | ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); | ||
210 | } else { | ||
211 | pmd = mm_find_pmd(mm, addr); | ||
212 | if (!pmd) | ||
213 | goto out; | ||
214 | 213 | ||
215 | ptep = pte_offset_map(pmd, addr); | 214 | get_page(new); |
215 | pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); | ||
216 | if (pte_swp_soft_dirty(*pvmw.pte)) | ||
217 | pte = pte_mksoft_dirty(pte); | ||
216 | 218 | ||
217 | /* | 219 | /* |
218 | * Peek to check is_swap_pte() before taking ptlock? No, we | 220 | * Recheck VMA as permissions can change since migration started |
219 | * can race mremap's move_ptes(), which skips anon_vma lock. | ||
220 | */ | 221 | */ |
221 | 222 | entry = pte_to_swp_entry(*pvmw.pte); | |
222 | ptl = pte_lockptr(mm, pmd); | 223 | if (is_write_migration_entry(entry)) |
223 | } | 224 | pte = maybe_mkwrite(pte, vma); |
224 | |||
225 | spin_lock(ptl); | ||
226 | pte = *ptep; | ||
227 | if (!is_swap_pte(pte)) | ||
228 | goto unlock; | ||
229 | |||
230 | entry = pte_to_swp_entry(pte); | ||
231 | |||
232 | if (!is_migration_entry(entry) || | ||
233 | migration_entry_to_page(entry) != old) | ||
234 | goto unlock; | ||
235 | |||
236 | get_page(new); | ||
237 | pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); | ||
238 | if (pte_swp_soft_dirty(*ptep)) | ||
239 | pte = pte_mksoft_dirty(pte); | ||
240 | |||
241 | /* Recheck VMA as permissions can change since migration started */ | ||
242 | if (is_write_migration_entry(entry)) | ||
243 | pte = maybe_mkwrite(pte, vma); | ||
244 | 225 | ||
245 | #ifdef CONFIG_HUGETLB_PAGE | 226 | #ifdef CONFIG_HUGETLB_PAGE |
246 | if (PageHuge(new)) { | 227 | if (PageHuge(new)) { |
247 | pte = pte_mkhuge(pte); | 228 | pte = pte_mkhuge(pte); |
248 | pte = arch_make_huge_pte(pte, vma, new, 0); | 229 | pte = arch_make_huge_pte(pte, vma, new, 0); |
249 | } | 230 | } |
250 | #endif | 231 | #endif |
251 | flush_dcache_page(new); | 232 | flush_dcache_page(new); |
252 | set_pte_at(mm, addr, ptep, pte); | 233 | set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); |
253 | 234 | ||
254 | if (PageHuge(new)) { | 235 | if (PageHuge(new)) { |
255 | if (PageAnon(new)) | 236 | if (PageAnon(new)) |
256 | hugepage_add_anon_rmap(new, vma, addr); | 237 | hugepage_add_anon_rmap(new, vma, pvmw.address); |
238 | else | ||
239 | page_dup_rmap(new, true); | ||
240 | } else if (PageAnon(new)) | ||
241 | page_add_anon_rmap(new, vma, pvmw.address, false); | ||
257 | else | 242 | else |
258 | page_dup_rmap(new, true); | 243 | page_add_file_rmap(new, false); |
259 | } else if (PageAnon(new)) | ||
260 | page_add_anon_rmap(new, vma, addr, false); | ||
261 | else | ||
262 | page_add_file_rmap(new, false); | ||
263 | 244 | ||
264 | if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) | 245 | if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) |
265 | mlock_vma_page(new); | 246 | mlock_vma_page(new); |
247 | |||
248 | /* No need to invalidate - it was non-present before */ | ||
249 | update_mmu_cache(vma, pvmw.address, pvmw.pte); | ||
250 | } | ||
266 | 251 | ||
267 | /* No need to invalidate - it was non-present before */ | ||
268 | update_mmu_cache(vma, addr, ptep); | ||
269 | unlock: | ||
270 | pte_unmap_unlock(ptep, ptl); | ||
271 | out: | ||
272 | return SWAP_AGAIN; | 252 | return SWAP_AGAIN; |
273 | } | 253 | } |
274 | 254 | ||
diff --git a/mm/mincore.c b/mm/mincore.c index ddb872da3f5b..c5687c45c326 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/syscalls.h> | 14 | #include <linux/syscalls.h> |
15 | #include <linux/swap.h> | 15 | #include <linux/swap.h> |
16 | #include <linux/swapops.h> | 16 | #include <linux/swapops.h> |
17 | #include <linux/shmem_fs.h> | ||
17 | #include <linux/hugetlb.h> | 18 | #include <linux/hugetlb.h> |
18 | 19 | ||
19 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
@@ -176,7 +176,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
176 | return next; | 176 | return next; |
177 | } | 177 | } |
178 | 178 | ||
179 | static int do_brk(unsigned long addr, unsigned long len); | 179 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); |
180 | 180 | ||
181 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 181 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
182 | { | 182 | { |
@@ -185,6 +185,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
185 | struct mm_struct *mm = current->mm; | 185 | struct mm_struct *mm = current->mm; |
186 | unsigned long min_brk; | 186 | unsigned long min_brk; |
187 | bool populate; | 187 | bool populate; |
188 | LIST_HEAD(uf); | ||
188 | 189 | ||
189 | if (down_write_killable(&mm->mmap_sem)) | 190 | if (down_write_killable(&mm->mmap_sem)) |
190 | return -EINTR; | 191 | return -EINTR; |
@@ -222,7 +223,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
222 | 223 | ||
223 | /* Always allow shrinking brk. */ | 224 | /* Always allow shrinking brk. */ |
224 | if (brk <= mm->brk) { | 225 | if (brk <= mm->brk) { |
225 | if (!do_munmap(mm, newbrk, oldbrk-newbrk)) | 226 | if (!do_munmap(mm, newbrk, oldbrk-newbrk, &uf)) |
226 | goto set_brk; | 227 | goto set_brk; |
227 | goto out; | 228 | goto out; |
228 | } | 229 | } |
@@ -232,13 +233,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
232 | goto out; | 233 | goto out; |
233 | 234 | ||
234 | /* Ok, looks good - let it rip. */ | 235 | /* Ok, looks good - let it rip. */ |
235 | if (do_brk(oldbrk, newbrk-oldbrk) < 0) | 236 | if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) |
236 | goto out; | 237 | goto out; |
237 | 238 | ||
238 | set_brk: | 239 | set_brk: |
239 | mm->brk = brk; | 240 | mm->brk = brk; |
240 | populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; | 241 | populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; |
241 | up_write(&mm->mmap_sem); | 242 | up_write(&mm->mmap_sem); |
243 | userfaultfd_unmap_complete(mm, &uf); | ||
242 | if (populate) | 244 | if (populate) |
243 | mm_populate(oldbrk, newbrk - oldbrk); | 245 | mm_populate(oldbrk, newbrk - oldbrk); |
244 | return brk; | 246 | return brk; |
@@ -1304,7 +1306,8 @@ static inline int mlock_future_check(struct mm_struct *mm, | |||
1304 | unsigned long do_mmap(struct file *file, unsigned long addr, | 1306 | unsigned long do_mmap(struct file *file, unsigned long addr, |
1305 | unsigned long len, unsigned long prot, | 1307 | unsigned long len, unsigned long prot, |
1306 | unsigned long flags, vm_flags_t vm_flags, | 1308 | unsigned long flags, vm_flags_t vm_flags, |
1307 | unsigned long pgoff, unsigned long *populate) | 1309 | unsigned long pgoff, unsigned long *populate, |
1310 | struct list_head *uf) | ||
1308 | { | 1311 | { |
1309 | struct mm_struct *mm = current->mm; | 1312 | struct mm_struct *mm = current->mm; |
1310 | int pkey = 0; | 1313 | int pkey = 0; |
@@ -1447,7 +1450,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, | |||
1447 | vm_flags |= VM_NORESERVE; | 1450 | vm_flags |= VM_NORESERVE; |
1448 | } | 1451 | } |
1449 | 1452 | ||
1450 | addr = mmap_region(file, addr, len, vm_flags, pgoff); | 1453 | addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); |
1451 | if (!IS_ERR_VALUE(addr) && | 1454 | if (!IS_ERR_VALUE(addr) && |
1452 | ((vm_flags & VM_LOCKED) || | 1455 | ((vm_flags & VM_LOCKED) || |
1453 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) | 1456 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) |
@@ -1583,7 +1586,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) | |||
1583 | } | 1586 | } |
1584 | 1587 | ||
1585 | unsigned long mmap_region(struct file *file, unsigned long addr, | 1588 | unsigned long mmap_region(struct file *file, unsigned long addr, |
1586 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) | 1589 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, |
1590 | struct list_head *uf) | ||
1587 | { | 1591 | { |
1588 | struct mm_struct *mm = current->mm; | 1592 | struct mm_struct *mm = current->mm; |
1589 | struct vm_area_struct *vma, *prev; | 1593 | struct vm_area_struct *vma, *prev; |
@@ -1609,7 +1613,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, | |||
1609 | /* Clear old maps */ | 1613 | /* Clear old maps */ |
1610 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, | 1614 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, |
1611 | &rb_parent)) { | 1615 | &rb_parent)) { |
1612 | if (do_munmap(mm, addr, len)) | 1616 | if (do_munmap(mm, addr, len, uf)) |
1613 | return -ENOMEM; | 1617 | return -ENOMEM; |
1614 | } | 1618 | } |
1615 | 1619 | ||
@@ -2495,11 +2499,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2495 | } | 2499 | } |
2496 | 2500 | ||
2497 | /* | 2501 | /* |
2498 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the | 2502 | * __split_vma() bypasses sysctl_max_map_count checking. We use this where it |
2499 | * munmap path where it doesn't make sense to fail. | 2503 | * has already been checked or doesn't make sense to fail. |
2500 | */ | 2504 | */ |
2501 | static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | 2505 | int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
2502 | unsigned long addr, int new_below) | 2506 | unsigned long addr, int new_below) |
2503 | { | 2507 | { |
2504 | struct vm_area_struct *new; | 2508 | struct vm_area_struct *new; |
2505 | int err; | 2509 | int err; |
@@ -2579,7 +2583,8 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2579 | * work. This now handles partial unmappings. | 2583 | * work. This now handles partial unmappings. |
2580 | * Jeremy Fitzhardinge <jeremy@goop.org> | 2584 | * Jeremy Fitzhardinge <jeremy@goop.org> |
2581 | */ | 2585 | */ |
2582 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 2586 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, |
2587 | struct list_head *uf) | ||
2583 | { | 2588 | { |
2584 | unsigned long end; | 2589 | unsigned long end; |
2585 | struct vm_area_struct *vma, *prev, *last; | 2590 | struct vm_area_struct *vma, *prev, *last; |
@@ -2603,6 +2608,13 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2603 | if (vma->vm_start >= end) | 2608 | if (vma->vm_start >= end) |
2604 | return 0; | 2609 | return 0; |
2605 | 2610 | ||
2611 | if (uf) { | ||
2612 | int error = userfaultfd_unmap_prep(vma, start, end, uf); | ||
2613 | |||
2614 | if (error) | ||
2615 | return error; | ||
2616 | } | ||
2617 | |||
2606 | /* | 2618 | /* |
2607 | * If we need to split any vma, do it now to save pain later. | 2619 | * If we need to split any vma, do it now to save pain later. |
2608 | * | 2620 | * |
@@ -2668,27 +2680,22 @@ int vm_munmap(unsigned long start, size_t len) | |||
2668 | { | 2680 | { |
2669 | int ret; | 2681 | int ret; |
2670 | struct mm_struct *mm = current->mm; | 2682 | struct mm_struct *mm = current->mm; |
2683 | LIST_HEAD(uf); | ||
2671 | 2684 | ||
2672 | if (down_write_killable(&mm->mmap_sem)) | 2685 | if (down_write_killable(&mm->mmap_sem)) |
2673 | return -EINTR; | 2686 | return -EINTR; |
2674 | 2687 | ||
2675 | ret = do_munmap(mm, start, len); | 2688 | ret = do_munmap(mm, start, len, &uf); |
2676 | up_write(&mm->mmap_sem); | 2689 | up_write(&mm->mmap_sem); |
2690 | userfaultfd_unmap_complete(mm, &uf); | ||
2677 | return ret; | 2691 | return ret; |
2678 | } | 2692 | } |
2679 | EXPORT_SYMBOL(vm_munmap); | 2693 | EXPORT_SYMBOL(vm_munmap); |
2680 | 2694 | ||
2681 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 2695 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
2682 | { | 2696 | { |
2683 | int ret; | ||
2684 | struct mm_struct *mm = current->mm; | ||
2685 | |||
2686 | profile_munmap(addr); | 2697 | profile_munmap(addr); |
2687 | if (down_write_killable(&mm->mmap_sem)) | 2698 | return vm_munmap(addr, len); |
2688 | return -EINTR; | ||
2689 | ret = do_munmap(mm, addr, len); | ||
2690 | up_write(&mm->mmap_sem); | ||
2691 | return ret; | ||
2692 | } | 2699 | } |
2693 | 2700 | ||
2694 | 2701 | ||
@@ -2780,7 +2787,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
2780 | 2787 | ||
2781 | file = get_file(vma->vm_file); | 2788 | file = get_file(vma->vm_file); |
2782 | ret = do_mmap_pgoff(vma->vm_file, start, size, | 2789 | ret = do_mmap_pgoff(vma->vm_file, start, size, |
2783 | prot, flags, pgoff, &populate); | 2790 | prot, flags, pgoff, &populate, NULL); |
2784 | fput(file); | 2791 | fput(file); |
2785 | out: | 2792 | out: |
2786 | up_write(&mm->mmap_sem); | 2793 | up_write(&mm->mmap_sem); |
@@ -2806,7 +2813,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2806 | * anonymous maps. eventually we may be able to do some | 2813 | * anonymous maps. eventually we may be able to do some |
2807 | * brk-specific accounting here. | 2814 | * brk-specific accounting here. |
2808 | */ | 2815 | */ |
2809 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) | 2816 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) |
2810 | { | 2817 | { |
2811 | struct mm_struct *mm = current->mm; | 2818 | struct mm_struct *mm = current->mm; |
2812 | struct vm_area_struct *vma, *prev; | 2819 | struct vm_area_struct *vma, *prev; |
@@ -2845,7 +2852,7 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long | |||
2845 | */ | 2852 | */ |
2846 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, | 2853 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, |
2847 | &rb_parent)) { | 2854 | &rb_parent)) { |
2848 | if (do_munmap(mm, addr, len)) | 2855 | if (do_munmap(mm, addr, len, uf)) |
2849 | return -ENOMEM; | 2856 | return -ENOMEM; |
2850 | } | 2857 | } |
2851 | 2858 | ||
@@ -2892,9 +2899,9 @@ out: | |||
2892 | return 0; | 2899 | return 0; |
2893 | } | 2900 | } |
2894 | 2901 | ||
2895 | static int do_brk(unsigned long addr, unsigned long len) | 2902 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) |
2896 | { | 2903 | { |
2897 | return do_brk_flags(addr, len, 0); | 2904 | return do_brk_flags(addr, len, 0, uf); |
2898 | } | 2905 | } |
2899 | 2906 | ||
2900 | int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) | 2907 | int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) |
@@ -2902,13 +2909,15 @@ int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) | |||
2902 | struct mm_struct *mm = current->mm; | 2909 | struct mm_struct *mm = current->mm; |
2903 | int ret; | 2910 | int ret; |
2904 | bool populate; | 2911 | bool populate; |
2912 | LIST_HEAD(uf); | ||
2905 | 2913 | ||
2906 | if (down_write_killable(&mm->mmap_sem)) | 2914 | if (down_write_killable(&mm->mmap_sem)) |
2907 | return -EINTR; | 2915 | return -EINTR; |
2908 | 2916 | ||
2909 | ret = do_brk_flags(addr, len, flags); | 2917 | ret = do_brk_flags(addr, len, flags, &uf); |
2910 | populate = ((mm->def_flags & VM_LOCKED) != 0); | 2918 | populate = ((mm->def_flags & VM_LOCKED) != 0); |
2911 | up_write(&mm->mmap_sem); | 2919 | up_write(&mm->mmap_sem); |
2920 | userfaultfd_unmap_complete(mm, &uf); | ||
2912 | if (populate && !ret) | 2921 | if (populate && !ret) |
2913 | mm_populate(addr, len); | 2922 | mm_populate(addr, len); |
2914 | return ret; | 2923 | return ret; |
@@ -3125,8 +3134,7 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) | |||
3125 | mm->data_vm += npages; | 3134 | mm->data_vm += npages; |
3126 | } | 3135 | } |
3127 | 3136 | ||
3128 | static int special_mapping_fault(struct vm_area_struct *vma, | 3137 | static int special_mapping_fault(struct vm_fault *vmf); |
3129 | struct vm_fault *vmf); | ||
3130 | 3138 | ||
3131 | /* | 3139 | /* |
3132 | * Having a close hook prevents vma merging regardless of flags. | 3140 | * Having a close hook prevents vma merging regardless of flags. |
@@ -3161,9 +3169,9 @@ static const struct vm_operations_struct legacy_special_mapping_vmops = { | |||
3161 | .fault = special_mapping_fault, | 3169 | .fault = special_mapping_fault, |
3162 | }; | 3170 | }; |
3163 | 3171 | ||
3164 | static int special_mapping_fault(struct vm_area_struct *vma, | 3172 | static int special_mapping_fault(struct vm_fault *vmf) |
3165 | struct vm_fault *vmf) | ||
3166 | { | 3173 | { |
3174 | struct vm_area_struct *vma = vmf->vma; | ||
3167 | pgoff_t pgoff; | 3175 | pgoff_t pgoff; |
3168 | struct page **pages; | 3176 | struct page **pages; |
3169 | 3177 | ||
@@ -3173,7 +3181,7 @@ static int special_mapping_fault(struct vm_area_struct *vma, | |||
3173 | struct vm_special_mapping *sm = vma->vm_private_data; | 3181 | struct vm_special_mapping *sm = vma->vm_private_data; |
3174 | 3182 | ||
3175 | if (sm->fault) | 3183 | if (sm->fault) |
3176 | return sm->fault(sm, vma, vmf); | 3184 | return sm->fault(sm, vmf->vma, vmf); |
3177 | 3185 | ||
3178 | pages = sm->pages; | 3186 | pages = sm->pages; |
3179 | } | 3187 | } |
@@ -3447,7 +3455,7 @@ void mm_drop_all_locks(struct mm_struct *mm) | |||
3447 | } | 3455 | } |
3448 | 3456 | ||
3449 | /* | 3457 | /* |
3450 | * initialise the VMA slab | 3458 | * initialise the percpu counter for VM |
3451 | */ | 3459 | */ |
3452 | void __init mmap_init(void) | 3460 | void __init mmap_init(void) |
3453 | { | 3461 | { |
diff --git a/mm/mprotect.c b/mm/mprotect.c index a45b4dc6a7f5..848e946b08e5 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -99,7 +99,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
99 | ptent = ptep_modify_prot_start(mm, addr, pte); | 99 | ptent = ptep_modify_prot_start(mm, addr, pte); |
100 | ptent = pte_modify(ptent, newprot); | 100 | ptent = pte_modify(ptent, newprot); |
101 | if (preserve_write) | 101 | if (preserve_write) |
102 | ptent = pte_mkwrite(ptent); | 102 | ptent = pte_mk_savedwrite(ptent); |
103 | 103 | ||
104 | /* Avoid taking write faults for known dirty pages */ | 104 | /* Avoid taking write faults for known dirty pages */ |
105 | if (dirty_accountable && pte_dirty(ptent) && | 105 | if (dirty_accountable && pte_dirty(ptent) && |
diff --git a/mm/mremap.c b/mm/mremap.c index 8779928d6a70..8233b0105c82 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -252,7 +252,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
252 | static unsigned long move_vma(struct vm_area_struct *vma, | 252 | static unsigned long move_vma(struct vm_area_struct *vma, |
253 | unsigned long old_addr, unsigned long old_len, | 253 | unsigned long old_addr, unsigned long old_len, |
254 | unsigned long new_len, unsigned long new_addr, | 254 | unsigned long new_len, unsigned long new_addr, |
255 | bool *locked, struct vm_userfaultfd_ctx *uf) | 255 | bool *locked, struct vm_userfaultfd_ctx *uf, |
256 | struct list_head *uf_unmap) | ||
256 | { | 257 | { |
257 | struct mm_struct *mm = vma->vm_mm; | 258 | struct mm_struct *mm = vma->vm_mm; |
258 | struct vm_area_struct *new_vma; | 259 | struct vm_area_struct *new_vma; |
@@ -341,7 +342,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, | |||
341 | if (unlikely(vma->vm_flags & VM_PFNMAP)) | 342 | if (unlikely(vma->vm_flags & VM_PFNMAP)) |
342 | untrack_pfn_moved(vma); | 343 | untrack_pfn_moved(vma); |
343 | 344 | ||
344 | if (do_munmap(mm, old_addr, old_len) < 0) { | 345 | if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { |
345 | /* OOM: unable to split vma, just get accounts right */ | 346 | /* OOM: unable to split vma, just get accounts right */ |
346 | vm_unacct_memory(excess >> PAGE_SHIFT); | 347 | vm_unacct_memory(excess >> PAGE_SHIFT); |
347 | excess = 0; | 348 | excess = 0; |
@@ -417,7 +418,8 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, | |||
417 | 418 | ||
418 | static unsigned long mremap_to(unsigned long addr, unsigned long old_len, | 419 | static unsigned long mremap_to(unsigned long addr, unsigned long old_len, |
419 | unsigned long new_addr, unsigned long new_len, bool *locked, | 420 | unsigned long new_addr, unsigned long new_len, bool *locked, |
420 | struct vm_userfaultfd_ctx *uf) | 421 | struct vm_userfaultfd_ctx *uf, |
422 | struct list_head *uf_unmap) | ||
421 | { | 423 | { |
422 | struct mm_struct *mm = current->mm; | 424 | struct mm_struct *mm = current->mm; |
423 | struct vm_area_struct *vma; | 425 | struct vm_area_struct *vma; |
@@ -435,12 +437,12 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, | |||
435 | if (addr + old_len > new_addr && new_addr + new_len > addr) | 437 | if (addr + old_len > new_addr && new_addr + new_len > addr) |
436 | goto out; | 438 | goto out; |
437 | 439 | ||
438 | ret = do_munmap(mm, new_addr, new_len); | 440 | ret = do_munmap(mm, new_addr, new_len, NULL); |
439 | if (ret) | 441 | if (ret) |
440 | goto out; | 442 | goto out; |
441 | 443 | ||
442 | if (old_len >= new_len) { | 444 | if (old_len >= new_len) { |
443 | ret = do_munmap(mm, addr+new_len, old_len - new_len); | 445 | ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); |
444 | if (ret && old_len != new_len) | 446 | if (ret && old_len != new_len) |
445 | goto out; | 447 | goto out; |
446 | old_len = new_len; | 448 | old_len = new_len; |
@@ -462,7 +464,8 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, | |||
462 | if (offset_in_page(ret)) | 464 | if (offset_in_page(ret)) |
463 | goto out1; | 465 | goto out1; |
464 | 466 | ||
465 | ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf); | 467 | ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf, |
468 | uf_unmap); | ||
466 | if (!(offset_in_page(ret))) | 469 | if (!(offset_in_page(ret))) |
467 | goto out; | 470 | goto out; |
468 | out1: | 471 | out1: |
@@ -502,6 +505,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
502 | unsigned long charged = 0; | 505 | unsigned long charged = 0; |
503 | bool locked = false; | 506 | bool locked = false; |
504 | struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; | 507 | struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; |
508 | LIST_HEAD(uf_unmap); | ||
505 | 509 | ||
506 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) | 510 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) |
507 | return ret; | 511 | return ret; |
@@ -528,7 +532,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
528 | 532 | ||
529 | if (flags & MREMAP_FIXED) { | 533 | if (flags & MREMAP_FIXED) { |
530 | ret = mremap_to(addr, old_len, new_addr, new_len, | 534 | ret = mremap_to(addr, old_len, new_addr, new_len, |
531 | &locked, &uf); | 535 | &locked, &uf, &uf_unmap); |
532 | goto out; | 536 | goto out; |
533 | } | 537 | } |
534 | 538 | ||
@@ -538,7 +542,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
538 | * do_munmap does all the needed commit accounting | 542 | * do_munmap does all the needed commit accounting |
539 | */ | 543 | */ |
540 | if (old_len >= new_len) { | 544 | if (old_len >= new_len) { |
541 | ret = do_munmap(mm, addr+new_len, old_len - new_len); | 545 | ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap); |
542 | if (ret && old_len != new_len) | 546 | if (ret && old_len != new_len) |
543 | goto out; | 547 | goto out; |
544 | ret = addr; | 548 | ret = addr; |
@@ -598,7 +602,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
598 | } | 602 | } |
599 | 603 | ||
600 | ret = move_vma(vma, addr, old_len, new_len, new_addr, | 604 | ret = move_vma(vma, addr, old_len, new_len, new_addr, |
601 | &locked, &uf); | 605 | &locked, &uf, &uf_unmap); |
602 | } | 606 | } |
603 | out: | 607 | out: |
604 | if (offset_in_page(ret)) { | 608 | if (offset_in_page(ret)) { |
@@ -609,5 +613,6 @@ out: | |||
609 | if (locked && new_len > old_len) | 613 | if (locked && new_len > old_len) |
610 | mm_populate(new_addr + old_len, new_len - old_len); | 614 | mm_populate(new_addr + old_len, new_len - old_len); |
611 | mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); | 615 | mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); |
616 | userfaultfd_unmap_complete(mm, &uf_unmap); | ||
612 | return ret; | 617 | return ret; |
613 | } | 618 | } |
diff --git a/mm/nommu.c b/mm/nommu.c index bc964c26be8c..fe9f4fa4a7a7 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -517,7 +517,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
517 | } | 517 | } |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * initialise the VMA and region record slabs | 520 | * initialise the percpu counter for VM and region record slabs |
521 | */ | 521 | */ |
522 | void __init mmap_init(void) | 522 | void __init mmap_init(void) |
523 | { | 523 | { |
@@ -1205,7 +1205,8 @@ unsigned long do_mmap(struct file *file, | |||
1205 | unsigned long flags, | 1205 | unsigned long flags, |
1206 | vm_flags_t vm_flags, | 1206 | vm_flags_t vm_flags, |
1207 | unsigned long pgoff, | 1207 | unsigned long pgoff, |
1208 | unsigned long *populate) | 1208 | unsigned long *populate, |
1209 | struct list_head *uf) | ||
1209 | { | 1210 | { |
1210 | struct vm_area_struct *vma; | 1211 | struct vm_area_struct *vma; |
1211 | struct vm_region *region; | 1212 | struct vm_region *region; |
@@ -1577,7 +1578,7 @@ static int shrink_vma(struct mm_struct *mm, | |||
1577 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single | 1578 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single |
1578 | * VMA, though it need not cover the whole VMA | 1579 | * VMA, though it need not cover the whole VMA |
1579 | */ | 1580 | */ |
1580 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 1581 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) |
1581 | { | 1582 | { |
1582 | struct vm_area_struct *vma; | 1583 | struct vm_area_struct *vma; |
1583 | unsigned long end; | 1584 | unsigned long end; |
@@ -1643,7 +1644,7 @@ int vm_munmap(unsigned long addr, size_t len) | |||
1643 | int ret; | 1644 | int ret; |
1644 | 1645 | ||
1645 | down_write(&mm->mmap_sem); | 1646 | down_write(&mm->mmap_sem); |
1646 | ret = do_munmap(mm, addr, len); | 1647 | ret = do_munmap(mm, addr, len, NULL); |
1647 | up_write(&mm->mmap_sem); | 1648 | up_write(&mm->mmap_sem); |
1648 | return ret; | 1649 | return ret; |
1649 | } | 1650 | } |
@@ -1794,7 +1795,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
1794 | } | 1795 | } |
1795 | EXPORT_SYMBOL(unmap_mapping_range); | 1796 | EXPORT_SYMBOL(unmap_mapping_range); |
1796 | 1797 | ||
1797 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1798 | int filemap_fault(struct vm_fault *vmf) |
1798 | { | 1799 | { |
1799 | BUG(); | 1800 | BUG(); |
1800 | return 0; | 1801 | return 0; |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 8256788ac119..578321f1c070 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -403,12 +403,14 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | |||
403 | 403 | ||
404 | static void dump_header(struct oom_control *oc, struct task_struct *p) | 404 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
405 | { | 405 | { |
406 | nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed; | 406 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=", |
407 | 407 | current->comm, oc->gfp_mask, &oc->gfp_mask); | |
408 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", | 408 | if (oc->nodemask) |
409 | current->comm, oc->gfp_mask, &oc->gfp_mask, | 409 | pr_cont("%*pbl", nodemask_pr_args(oc->nodemask)); |
410 | nodemask_pr_args(nm), oc->order, | 410 | else |
411 | current->signal->oom_score_adj); | 411 | pr_cont("(null)"); |
412 | pr_cont(", order=%d, oom_score_adj=%hd\n", | ||
413 | oc->order, current->signal->oom_score_adj); | ||
412 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) | 414 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
413 | pr_warn("COMPACTION is disabled!!!\n"); | 415 | pr_warn("COMPACTION is disabled!!!\n"); |
414 | 416 | ||
@@ -417,7 +419,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) | |||
417 | if (oc->memcg) | 419 | if (oc->memcg) |
418 | mem_cgroup_print_oom_info(oc->memcg, p); | 420 | mem_cgroup_print_oom_info(oc->memcg, p); |
419 | else | 421 | else |
420 | show_mem(SHOW_MEM_FILTER_NODES, nm); | 422 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
421 | if (sysctl_oom_dump_tasks) | 423 | if (sysctl_oom_dump_tasks) |
422 | dump_tasks(oc->memcg, oc->nodemask); | 424 | dump_tasks(oc->memcg, oc->nodemask); |
423 | } | 425 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 216449825859..ae6e601f0a58 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -580,7 +580,7 @@ static void wb_domain_writeout_inc(struct wb_domain *dom, | |||
580 | __fprop_inc_percpu_max(&dom->completions, completions, | 580 | __fprop_inc_percpu_max(&dom->completions, completions, |
581 | max_prop_frac); | 581 | max_prop_frac); |
582 | /* First event after period switching was turned off? */ | 582 | /* First event after period switching was turned off? */ |
583 | if (!unlikely(dom->period_time)) { | 583 | if (unlikely(!dom->period_time)) { |
584 | /* | 584 | /* |
585 | * We can race with other __bdi_writeout_inc calls here but | 585 | * We can race with other __bdi_writeout_inc calls here but |
586 | * it does not cause any harm since the resulting time when | 586 | * it does not cause any harm since the resulting time when |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c21b33668133..9f9623d690d6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #include <linux/prefetch.h> | 59 | #include <linux/prefetch.h> |
60 | #include <linux/mm_inline.h> | 60 | #include <linux/mm_inline.h> |
61 | #include <linux/migrate.h> | 61 | #include <linux/migrate.h> |
62 | #include <linux/page_ext.h> | ||
63 | #include <linux/hugetlb.h> | 62 | #include <linux/hugetlb.h> |
64 | #include <linux/sched/rt.h> | 63 | #include <linux/sched/rt.h> |
65 | #include <linux/page_owner.h> | 64 | #include <linux/page_owner.h> |
@@ -92,6 +91,10 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_); | |||
92 | int _node_numa_mem_[MAX_NUMNODES]; | 91 | int _node_numa_mem_[MAX_NUMNODES]; |
93 | #endif | 92 | #endif |
94 | 93 | ||
94 | /* work_structs for global per-cpu drains */ | ||
95 | DEFINE_MUTEX(pcpu_drain_mutex); | ||
96 | DEFINE_PER_CPU(struct work_struct, pcpu_drain); | ||
97 | |||
95 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY | 98 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY |
96 | volatile unsigned long latent_entropy __latent_entropy; | 99 | volatile unsigned long latent_entropy __latent_entropy; |
97 | EXPORT_SYMBOL(latent_entropy); | 100 | EXPORT_SYMBOL(latent_entropy); |
@@ -1085,10 +1088,10 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
1085 | { | 1088 | { |
1086 | int migratetype = 0; | 1089 | int migratetype = 0; |
1087 | int batch_free = 0; | 1090 | int batch_free = 0; |
1088 | unsigned long nr_scanned; | 1091 | unsigned long nr_scanned, flags; |
1089 | bool isolated_pageblocks; | 1092 | bool isolated_pageblocks; |
1090 | 1093 | ||
1091 | spin_lock(&zone->lock); | 1094 | spin_lock_irqsave(&zone->lock, flags); |
1092 | isolated_pageblocks = has_isolate_pageblock(zone); | 1095 | isolated_pageblocks = has_isolate_pageblock(zone); |
1093 | nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); | 1096 | nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); |
1094 | if (nr_scanned) | 1097 | if (nr_scanned) |
@@ -1137,7 +1140,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
1137 | trace_mm_page_pcpu_drain(page, 0, mt); | 1140 | trace_mm_page_pcpu_drain(page, 0, mt); |
1138 | } while (--count && --batch_free && !list_empty(list)); | 1141 | } while (--count && --batch_free && !list_empty(list)); |
1139 | } | 1142 | } |
1140 | spin_unlock(&zone->lock); | 1143 | spin_unlock_irqrestore(&zone->lock, flags); |
1141 | } | 1144 | } |
1142 | 1145 | ||
1143 | static void free_one_page(struct zone *zone, | 1146 | static void free_one_page(struct zone *zone, |
@@ -1145,8 +1148,9 @@ static void free_one_page(struct zone *zone, | |||
1145 | unsigned int order, | 1148 | unsigned int order, |
1146 | int migratetype) | 1149 | int migratetype) |
1147 | { | 1150 | { |
1148 | unsigned long nr_scanned; | 1151 | unsigned long nr_scanned, flags; |
1149 | spin_lock(&zone->lock); | 1152 | spin_lock_irqsave(&zone->lock, flags); |
1153 | __count_vm_events(PGFREE, 1 << order); | ||
1150 | nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); | 1154 | nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); |
1151 | if (nr_scanned) | 1155 | if (nr_scanned) |
1152 | __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); | 1156 | __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); |
@@ -1156,7 +1160,7 @@ static void free_one_page(struct zone *zone, | |||
1156 | migratetype = get_pfnblock_migratetype(page, pfn); | 1160 | migratetype = get_pfnblock_migratetype(page, pfn); |
1157 | } | 1161 | } |
1158 | __free_one_page(page, pfn, zone, order, migratetype); | 1162 | __free_one_page(page, pfn, zone, order, migratetype); |
1159 | spin_unlock(&zone->lock); | 1163 | spin_unlock_irqrestore(&zone->lock, flags); |
1160 | } | 1164 | } |
1161 | 1165 | ||
1162 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, | 1166 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, |
@@ -1234,7 +1238,6 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) | |||
1234 | 1238 | ||
1235 | static void __free_pages_ok(struct page *page, unsigned int order) | 1239 | static void __free_pages_ok(struct page *page, unsigned int order) |
1236 | { | 1240 | { |
1237 | unsigned long flags; | ||
1238 | int migratetype; | 1241 | int migratetype; |
1239 | unsigned long pfn = page_to_pfn(page); | 1242 | unsigned long pfn = page_to_pfn(page); |
1240 | 1243 | ||
@@ -1242,10 +1245,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
1242 | return; | 1245 | return; |
1243 | 1246 | ||
1244 | migratetype = get_pfnblock_migratetype(page, pfn); | 1247 | migratetype = get_pfnblock_migratetype(page, pfn); |
1245 | local_irq_save(flags); | ||
1246 | __count_vm_events(PGFREE, 1 << order); | ||
1247 | free_one_page(page_zone(page), page, pfn, order, migratetype); | 1248 | free_one_page(page_zone(page), page, pfn, order, migratetype); |
1248 | local_irq_restore(flags); | ||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | static void __init __free_pages_boot_core(struct page *page, unsigned int order) | 1251 | static void __init __free_pages_boot_core(struct page *page, unsigned int order) |
@@ -2217,8 +2217,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
2217 | int migratetype, bool cold) | 2217 | int migratetype, bool cold) |
2218 | { | 2218 | { |
2219 | int i, alloced = 0; | 2219 | int i, alloced = 0; |
2220 | unsigned long flags; | ||
2220 | 2221 | ||
2221 | spin_lock(&zone->lock); | 2222 | spin_lock_irqsave(&zone->lock, flags); |
2222 | for (i = 0; i < count; ++i) { | 2223 | for (i = 0; i < count; ++i) { |
2223 | struct page *page = __rmqueue(zone, order, migratetype); | 2224 | struct page *page = __rmqueue(zone, order, migratetype); |
2224 | if (unlikely(page == NULL)) | 2225 | if (unlikely(page == NULL)) |
@@ -2254,7 +2255,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
2254 | * pages added to the pcp list. | 2255 | * pages added to the pcp list. |
2255 | */ | 2256 | */ |
2256 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); | 2257 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); |
2257 | spin_unlock(&zone->lock); | 2258 | spin_unlock_irqrestore(&zone->lock, flags); |
2258 | return alloced; | 2259 | return alloced; |
2259 | } | 2260 | } |
2260 | 2261 | ||
@@ -2339,16 +2340,26 @@ void drain_local_pages(struct zone *zone) | |||
2339 | drain_pages(cpu); | 2340 | drain_pages(cpu); |
2340 | } | 2341 | } |
2341 | 2342 | ||
2343 | static void drain_local_pages_wq(struct work_struct *work) | ||
2344 | { | ||
2345 | /* | ||
2346 | * drain_all_pages doesn't use proper cpu hotplug protection so | ||
2347 | * we can race with cpu offline when the WQ can move this from | ||
2348 | * a cpu pinned worker to an unbound one. We can operate on a different | ||
2349 | * cpu which is allright but we also have to make sure to not move to | ||
2350 | * a different one. | ||
2351 | */ | ||
2352 | preempt_disable(); | ||
2353 | drain_local_pages(NULL); | ||
2354 | preempt_enable(); | ||
2355 | } | ||
2356 | |||
2342 | /* | 2357 | /* |
2343 | * Spill all the per-cpu pages from all CPUs back into the buddy allocator. | 2358 | * Spill all the per-cpu pages from all CPUs back into the buddy allocator. |
2344 | * | 2359 | * |
2345 | * When zone parameter is non-NULL, spill just the single zone's pages. | 2360 | * When zone parameter is non-NULL, spill just the single zone's pages. |
2346 | * | 2361 | * |
2347 | * Note that this code is protected against sending an IPI to an offline | 2362 | * Note that this can be extremely slow as the draining happens in a workqueue. |
2348 | * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: | ||
2349 | * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but | ||
2350 | * nothing keeps CPUs from showing up after we populated the cpumask and | ||
2351 | * before the call to on_each_cpu_mask(). | ||
2352 | */ | 2363 | */ |
2353 | void drain_all_pages(struct zone *zone) | 2364 | void drain_all_pages(struct zone *zone) |
2354 | { | 2365 | { |
@@ -2360,6 +2371,21 @@ void drain_all_pages(struct zone *zone) | |||
2360 | */ | 2371 | */ |
2361 | static cpumask_t cpus_with_pcps; | 2372 | static cpumask_t cpus_with_pcps; |
2362 | 2373 | ||
2374 | /* Workqueues cannot recurse */ | ||
2375 | if (current->flags & PF_WQ_WORKER) | ||
2376 | return; | ||
2377 | |||
2378 | /* | ||
2379 | * Do not drain if one is already in progress unless it's specific to | ||
2380 | * a zone. Such callers are primarily CMA and memory hotplug and need | ||
2381 | * the drain to be complete when the call returns. | ||
2382 | */ | ||
2383 | if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { | ||
2384 | if (!zone) | ||
2385 | return; | ||
2386 | mutex_lock(&pcpu_drain_mutex); | ||
2387 | } | ||
2388 | |||
2363 | /* | 2389 | /* |
2364 | * We don't care about racing with CPU hotplug event | 2390 | * We don't care about racing with CPU hotplug event |
2365 | * as offline notification will cause the notified | 2391 | * as offline notification will cause the notified |
@@ -2390,8 +2416,16 @@ void drain_all_pages(struct zone *zone) | |||
2390 | else | 2416 | else |
2391 | cpumask_clear_cpu(cpu, &cpus_with_pcps); | 2417 | cpumask_clear_cpu(cpu, &cpus_with_pcps); |
2392 | } | 2418 | } |
2393 | on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, | 2419 | |
2394 | zone, 1); | 2420 | for_each_cpu(cpu, &cpus_with_pcps) { |
2421 | struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); | ||
2422 | INIT_WORK(work, drain_local_pages_wq); | ||
2423 | schedule_work_on(cpu, work); | ||
2424 | } | ||
2425 | for_each_cpu(cpu, &cpus_with_pcps) | ||
2426 | flush_work(per_cpu_ptr(&pcpu_drain, cpu)); | ||
2427 | |||
2428 | mutex_unlock(&pcpu_drain_mutex); | ||
2395 | } | 2429 | } |
2396 | 2430 | ||
2397 | #ifdef CONFIG_HIBERNATION | 2431 | #ifdef CONFIG_HIBERNATION |
@@ -2442,17 +2476,20 @@ void free_hot_cold_page(struct page *page, bool cold) | |||
2442 | { | 2476 | { |
2443 | struct zone *zone = page_zone(page); | 2477 | struct zone *zone = page_zone(page); |
2444 | struct per_cpu_pages *pcp; | 2478 | struct per_cpu_pages *pcp; |
2445 | unsigned long flags; | ||
2446 | unsigned long pfn = page_to_pfn(page); | 2479 | unsigned long pfn = page_to_pfn(page); |
2447 | int migratetype; | 2480 | int migratetype; |
2448 | 2481 | ||
2482 | if (in_interrupt()) { | ||
2483 | __free_pages_ok(page, 0); | ||
2484 | return; | ||
2485 | } | ||
2486 | |||
2449 | if (!free_pcp_prepare(page)) | 2487 | if (!free_pcp_prepare(page)) |
2450 | return; | 2488 | return; |
2451 | 2489 | ||
2452 | migratetype = get_pfnblock_migratetype(page, pfn); | 2490 | migratetype = get_pfnblock_migratetype(page, pfn); |
2453 | set_pcppage_migratetype(page, migratetype); | 2491 | set_pcppage_migratetype(page, migratetype); |
2454 | local_irq_save(flags); | 2492 | preempt_disable(); |
2455 | __count_vm_event(PGFREE); | ||
2456 | 2493 | ||
2457 | /* | 2494 | /* |
2458 | * We only track unmovable, reclaimable and movable on pcp lists. | 2495 | * We only track unmovable, reclaimable and movable on pcp lists. |
@@ -2469,6 +2506,7 @@ void free_hot_cold_page(struct page *page, bool cold) | |||
2469 | migratetype = MIGRATE_MOVABLE; | 2506 | migratetype = MIGRATE_MOVABLE; |
2470 | } | 2507 | } |
2471 | 2508 | ||
2509 | __count_vm_event(PGFREE); | ||
2472 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | 2510 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
2473 | if (!cold) | 2511 | if (!cold) |
2474 | list_add(&page->lru, &pcp->lists[migratetype]); | 2512 | list_add(&page->lru, &pcp->lists[migratetype]); |
@@ -2482,7 +2520,7 @@ void free_hot_cold_page(struct page *page, bool cold) | |||
2482 | } | 2520 | } |
2483 | 2521 | ||
2484 | out: | 2522 | out: |
2485 | local_irq_restore(flags); | 2523 | preempt_enable(); |
2486 | } | 2524 | } |
2487 | 2525 | ||
2488 | /* | 2526 | /* |
@@ -2600,74 +2638,105 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) | |||
2600 | #endif | 2638 | #endif |
2601 | } | 2639 | } |
2602 | 2640 | ||
2641 | /* Remove page from the per-cpu list, caller must protect the list */ | ||
2642 | static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, | ||
2643 | bool cold, struct per_cpu_pages *pcp, | ||
2644 | struct list_head *list) | ||
2645 | { | ||
2646 | struct page *page; | ||
2647 | |||
2648 | VM_BUG_ON(in_interrupt()); | ||
2649 | |||
2650 | do { | ||
2651 | if (list_empty(list)) { | ||
2652 | pcp->count += rmqueue_bulk(zone, 0, | ||
2653 | pcp->batch, list, | ||
2654 | migratetype, cold); | ||
2655 | if (unlikely(list_empty(list))) | ||
2656 | return NULL; | ||
2657 | } | ||
2658 | |||
2659 | if (cold) | ||
2660 | page = list_last_entry(list, struct page, lru); | ||
2661 | else | ||
2662 | page = list_first_entry(list, struct page, lru); | ||
2663 | |||
2664 | list_del(&page->lru); | ||
2665 | pcp->count--; | ||
2666 | } while (check_new_pcp(page)); | ||
2667 | |||
2668 | return page; | ||
2669 | } | ||
2670 | |||
2671 | /* Lock and remove page from the per-cpu list */ | ||
2672 | static struct page *rmqueue_pcplist(struct zone *preferred_zone, | ||
2673 | struct zone *zone, unsigned int order, | ||
2674 | gfp_t gfp_flags, int migratetype) | ||
2675 | { | ||
2676 | struct per_cpu_pages *pcp; | ||
2677 | struct list_head *list; | ||
2678 | bool cold = ((gfp_flags & __GFP_COLD) != 0); | ||
2679 | struct page *page; | ||
2680 | |||
2681 | preempt_disable(); | ||
2682 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | ||
2683 | list = &pcp->lists[migratetype]; | ||
2684 | page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); | ||
2685 | if (page) { | ||
2686 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); | ||
2687 | zone_statistics(preferred_zone, zone); | ||
2688 | } | ||
2689 | preempt_enable(); | ||
2690 | return page; | ||
2691 | } | ||
2692 | |||
2603 | /* | 2693 | /* |
2604 | * Allocate a page from the given zone. Use pcplists for order-0 allocations. | 2694 | * Allocate a page from the given zone. Use pcplists for order-0 allocations. |
2605 | */ | 2695 | */ |
2606 | static inline | 2696 | static inline |
2607 | struct page *buffered_rmqueue(struct zone *preferred_zone, | 2697 | struct page *rmqueue(struct zone *preferred_zone, |
2608 | struct zone *zone, unsigned int order, | 2698 | struct zone *zone, unsigned int order, |
2609 | gfp_t gfp_flags, unsigned int alloc_flags, | 2699 | gfp_t gfp_flags, unsigned int alloc_flags, |
2610 | int migratetype) | 2700 | int migratetype) |
2611 | { | 2701 | { |
2612 | unsigned long flags; | 2702 | unsigned long flags; |
2613 | struct page *page; | 2703 | struct page *page; |
2614 | bool cold = ((gfp_flags & __GFP_COLD) != 0); | ||
2615 | 2704 | ||
2616 | if (likely(order == 0)) { | 2705 | if (likely(order == 0) && !in_interrupt()) { |
2617 | struct per_cpu_pages *pcp; | 2706 | page = rmqueue_pcplist(preferred_zone, zone, order, |
2618 | struct list_head *list; | 2707 | gfp_flags, migratetype); |
2619 | 2708 | goto out; | |
2620 | local_irq_save(flags); | 2709 | } |
2621 | do { | ||
2622 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | ||
2623 | list = &pcp->lists[migratetype]; | ||
2624 | if (list_empty(list)) { | ||
2625 | pcp->count += rmqueue_bulk(zone, 0, | ||
2626 | pcp->batch, list, | ||
2627 | migratetype, cold); | ||
2628 | if (unlikely(list_empty(list))) | ||
2629 | goto failed; | ||
2630 | } | ||
2631 | |||
2632 | if (cold) | ||
2633 | page = list_last_entry(list, struct page, lru); | ||
2634 | else | ||
2635 | page = list_first_entry(list, struct page, lru); | ||
2636 | |||
2637 | list_del(&page->lru); | ||
2638 | pcp->count--; | ||
2639 | 2710 | ||
2640 | } while (check_new_pcp(page)); | 2711 | /* |
2641 | } else { | 2712 | * We most definitely don't want callers attempting to |
2642 | /* | 2713 | * allocate greater than order-1 page units with __GFP_NOFAIL. |
2643 | * We most definitely don't want callers attempting to | 2714 | */ |
2644 | * allocate greater than order-1 page units with __GFP_NOFAIL. | 2715 | WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); |
2645 | */ | 2716 | spin_lock_irqsave(&zone->lock, flags); |
2646 | WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); | ||
2647 | spin_lock_irqsave(&zone->lock, flags); | ||
2648 | 2717 | ||
2649 | do { | 2718 | do { |
2650 | page = NULL; | 2719 | page = NULL; |
2651 | if (alloc_flags & ALLOC_HARDER) { | 2720 | if (alloc_flags & ALLOC_HARDER) { |
2652 | page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); | 2721 | page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); |
2653 | if (page) | 2722 | if (page) |
2654 | trace_mm_page_alloc_zone_locked(page, order, migratetype); | 2723 | trace_mm_page_alloc_zone_locked(page, order, migratetype); |
2655 | } | 2724 | } |
2656 | if (!page) | ||
2657 | page = __rmqueue(zone, order, migratetype); | ||
2658 | } while (page && check_new_pages(page, order)); | ||
2659 | spin_unlock(&zone->lock); | ||
2660 | if (!page) | 2725 | if (!page) |
2661 | goto failed; | 2726 | page = __rmqueue(zone, order, migratetype); |
2662 | __mod_zone_freepage_state(zone, -(1 << order), | 2727 | } while (page && check_new_pages(page, order)); |
2663 | get_pcppage_migratetype(page)); | 2728 | spin_unlock(&zone->lock); |
2664 | } | 2729 | if (!page) |
2730 | goto failed; | ||
2731 | __mod_zone_freepage_state(zone, -(1 << order), | ||
2732 | get_pcppage_migratetype(page)); | ||
2665 | 2733 | ||
2666 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); | 2734 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); |
2667 | zone_statistics(preferred_zone, zone); | 2735 | zone_statistics(preferred_zone, zone); |
2668 | local_irq_restore(flags); | 2736 | local_irq_restore(flags); |
2669 | 2737 | ||
2670 | VM_BUG_ON_PAGE(bad_range(zone, page), page); | 2738 | out: |
2739 | VM_BUG_ON_PAGE(page && bad_range(zone, page), page); | ||
2671 | return page; | 2740 | return page; |
2672 | 2741 | ||
2673 | failed: | 2742 | failed: |
@@ -2875,7 +2944,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order, | |||
2875 | #ifdef CONFIG_NUMA | 2944 | #ifdef CONFIG_NUMA |
2876 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | 2945 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
2877 | { | 2946 | { |
2878 | return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < | 2947 | return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= |
2879 | RECLAIM_DISTANCE; | 2948 | RECLAIM_DISTANCE; |
2880 | } | 2949 | } |
2881 | #else /* CONFIG_NUMA */ | 2950 | #else /* CONFIG_NUMA */ |
@@ -2972,7 +3041,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, | |||
2972 | } | 3041 | } |
2973 | 3042 | ||
2974 | try_this_zone: | 3043 | try_this_zone: |
2975 | page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order, | 3044 | page = rmqueue(ac->preferred_zoneref->zone, zone, order, |
2976 | gfp_mask, alloc_flags, ac->migratetype); | 3045 | gfp_mask, alloc_flags, ac->migratetype); |
2977 | if (page) { | 3046 | if (page) { |
2978 | prep_new_page(page, order, gfp_mask, alloc_flags); | 3047 | prep_new_page(page, order, gfp_mask, alloc_flags); |
@@ -3825,76 +3894,76 @@ got_pg: | |||
3825 | return page; | 3894 | return page; |
3826 | } | 3895 | } |
3827 | 3896 | ||
3828 | /* | 3897 | static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, |
3829 | * This is the 'heart' of the zoned buddy allocator. | 3898 | struct zonelist *zonelist, nodemask_t *nodemask, |
3830 | */ | 3899 | struct alloc_context *ac, gfp_t *alloc_mask, |
3831 | struct page * | 3900 | unsigned int *alloc_flags) |
3832 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
3833 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
3834 | { | 3901 | { |
3835 | struct page *page; | 3902 | ac->high_zoneidx = gfp_zone(gfp_mask); |
3836 | unsigned int alloc_flags = ALLOC_WMARK_LOW; | 3903 | ac->zonelist = zonelist; |
3837 | gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ | 3904 | ac->nodemask = nodemask; |
3838 | struct alloc_context ac = { | 3905 | ac->migratetype = gfpflags_to_migratetype(gfp_mask); |
3839 | .high_zoneidx = gfp_zone(gfp_mask), | ||
3840 | .zonelist = zonelist, | ||
3841 | .nodemask = nodemask, | ||
3842 | .migratetype = gfpflags_to_migratetype(gfp_mask), | ||
3843 | }; | ||
3844 | 3906 | ||
3845 | if (cpusets_enabled()) { | 3907 | if (cpusets_enabled()) { |
3846 | alloc_mask |= __GFP_HARDWALL; | 3908 | *alloc_mask |= __GFP_HARDWALL; |
3847 | alloc_flags |= ALLOC_CPUSET; | 3909 | if (!ac->nodemask) |
3848 | if (!ac.nodemask) | 3910 | ac->nodemask = &cpuset_current_mems_allowed; |
3849 | ac.nodemask = &cpuset_current_mems_allowed; | 3911 | else |
3912 | *alloc_flags |= ALLOC_CPUSET; | ||
3850 | } | 3913 | } |
3851 | 3914 | ||
3852 | gfp_mask &= gfp_allowed_mask; | ||
3853 | |||
3854 | lockdep_trace_alloc(gfp_mask); | 3915 | lockdep_trace_alloc(gfp_mask); |
3855 | 3916 | ||
3856 | might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); | 3917 | might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); |
3857 | 3918 | ||
3858 | if (should_fail_alloc_page(gfp_mask, order)) | 3919 | if (should_fail_alloc_page(gfp_mask, order)) |
3859 | return NULL; | 3920 | return false; |
3860 | 3921 | ||
3861 | /* | 3922 | if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) |
3862 | * Check the zones suitable for the gfp_mask contain at least one | 3923 | *alloc_flags |= ALLOC_CMA; |
3863 | * valid zone. It's possible to have an empty zonelist as a result | ||
3864 | * of __GFP_THISNODE and a memoryless node | ||
3865 | */ | ||
3866 | if (unlikely(!zonelist->_zonerefs->zone)) | ||
3867 | return NULL; | ||
3868 | 3924 | ||
3869 | if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) | 3925 | return true; |
3870 | alloc_flags |= ALLOC_CMA; | 3926 | } |
3871 | 3927 | ||
3928 | /* Determine whether to spread dirty pages and what the first usable zone */ | ||
3929 | static inline void finalise_ac(gfp_t gfp_mask, | ||
3930 | unsigned int order, struct alloc_context *ac) | ||
3931 | { | ||
3872 | /* Dirty zone balancing only done in the fast path */ | 3932 | /* Dirty zone balancing only done in the fast path */ |
3873 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); | 3933 | ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
3874 | 3934 | ||
3875 | /* | 3935 | /* |
3876 | * The preferred zone is used for statistics but crucially it is | 3936 | * The preferred zone is used for statistics but crucially it is |
3877 | * also used as the starting point for the zonelist iterator. It | 3937 | * also used as the starting point for the zonelist iterator. It |
3878 | * may get reset for allocations that ignore memory policies. | 3938 | * may get reset for allocations that ignore memory policies. |
3879 | */ | 3939 | */ |
3880 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, | 3940 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, |
3881 | ac.high_zoneidx, ac.nodemask); | 3941 | ac->high_zoneidx, ac->nodemask); |
3882 | if (!ac.preferred_zoneref->zone) { | 3942 | } |
3883 | page = NULL; | 3943 | |
3884 | /* | 3944 | /* |
3885 | * This might be due to race with cpuset_current_mems_allowed | 3945 | * This is the 'heart' of the zoned buddy allocator. |
3886 | * update, so make sure we retry with original nodemask in the | 3946 | */ |
3887 | * slow path. | 3947 | struct page * |
3888 | */ | 3948 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
3889 | goto no_zone; | 3949 | struct zonelist *zonelist, nodemask_t *nodemask) |
3890 | } | 3950 | { |
3951 | struct page *page; | ||
3952 | unsigned int alloc_flags = ALLOC_WMARK_LOW; | ||
3953 | gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ | ||
3954 | struct alloc_context ac = { }; | ||
3955 | |||
3956 | gfp_mask &= gfp_allowed_mask; | ||
3957 | if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags)) | ||
3958 | return NULL; | ||
3959 | |||
3960 | finalise_ac(gfp_mask, order, &ac); | ||
3891 | 3961 | ||
3892 | /* First allocation attempt */ | 3962 | /* First allocation attempt */ |
3893 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); | 3963 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); |
3894 | if (likely(page)) | 3964 | if (likely(page)) |
3895 | goto out; | 3965 | goto out; |
3896 | 3966 | ||
3897 | no_zone: | ||
3898 | /* | 3967 | /* |
3899 | * Runtime PM, block IO and its error handling path can deadlock | 3968 | * Runtime PM, block IO and its error handling path can deadlock |
3900 | * because I/O on the device might not complete. | 3969 | * because I/O on the device might not complete. |
@@ -6420,8 +6489,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
6420 | 6489 | ||
6421 | start_pfn = end_pfn; | 6490 | start_pfn = end_pfn; |
6422 | } | 6491 | } |
6423 | arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; | ||
6424 | arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; | ||
6425 | 6492 | ||
6426 | /* Find the PFNs that ZONE_MOVABLE begins at in each node */ | 6493 | /* Find the PFNs that ZONE_MOVABLE begins at in each node */ |
6427 | memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); | 6494 | memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); |
@@ -7157,8 +7224,9 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
7157 | * If @count is not zero, it is okay to include less @count unmovable pages | 7224 | * If @count is not zero, it is okay to include less @count unmovable pages |
7158 | * | 7225 | * |
7159 | * PageLRU check without isolation or lru_lock could race so that | 7226 | * PageLRU check without isolation or lru_lock could race so that |
7160 | * MIGRATE_MOVABLE block might include unmovable pages. It means you can't | 7227 | * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable |
7161 | * expect this function should be exact. | 7228 | * check without lock_page also may miss some movable non-lru pages at |
7229 | * race condition. So you can't expect this function should be exact. | ||
7162 | */ | 7230 | */ |
7163 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | 7231 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
7164 | bool skip_hwpoisoned_pages) | 7232 | bool skip_hwpoisoned_pages) |
@@ -7214,6 +7282,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7214 | if (skip_hwpoisoned_pages && PageHWPoison(page)) | 7282 | if (skip_hwpoisoned_pages && PageHWPoison(page)) |
7215 | continue; | 7283 | continue; |
7216 | 7284 | ||
7285 | if (__PageMovable(page)) | ||
7286 | continue; | ||
7287 | |||
7217 | if (!PageLRU(page)) | 7288 | if (!PageLRU(page)) |
7218 | found++; | 7289 | found++; |
7219 | /* | 7290 | /* |
@@ -7325,6 +7396,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, | |||
7325 | * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks | 7396 | * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks |
7326 | * in range must have the same migratetype and it must | 7397 | * in range must have the same migratetype and it must |
7327 | * be either of the two. | 7398 | * be either of the two. |
7399 | * @gfp_mask: GFP mask to use during compaction | ||
7328 | * | 7400 | * |
7329 | * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES | 7401 | * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES |
7330 | * aligned, however it's the caller's responsibility to guarantee that | 7402 | * aligned, however it's the caller's responsibility to guarantee that |
@@ -7338,7 +7410,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, | |||
7338 | * need to be freed with free_contig_range(). | 7410 | * need to be freed with free_contig_range(). |
7339 | */ | 7411 | */ |
7340 | int alloc_contig_range(unsigned long start, unsigned long end, | 7412 | int alloc_contig_range(unsigned long start, unsigned long end, |
7341 | unsigned migratetype) | 7413 | unsigned migratetype, gfp_t gfp_mask) |
7342 | { | 7414 | { |
7343 | unsigned long outer_start, outer_end; | 7415 | unsigned long outer_start, outer_end; |
7344 | unsigned int order; | 7416 | unsigned int order; |
@@ -7350,7 +7422,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
7350 | .zone = page_zone(pfn_to_page(start)), | 7422 | .zone = page_zone(pfn_to_page(start)), |
7351 | .mode = MIGRATE_SYNC, | 7423 | .mode = MIGRATE_SYNC, |
7352 | .ignore_skip_hint = true, | 7424 | .ignore_skip_hint = true, |
7353 | .gfp_mask = GFP_KERNEL, | 7425 | .gfp_mask = memalloc_noio_flags(gfp_mask), |
7354 | }; | 7426 | }; |
7355 | INIT_LIST_HEAD(&cc.migratepages); | 7427 | INIT_LIST_HEAD(&cc.migratepages); |
7356 | 7428 | ||
diff --git a/mm/page_idle.c b/mm/page_idle.c index ae11aa914e55..b0ee56c56b58 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c | |||
@@ -54,27 +54,27 @@ static int page_idle_clear_pte_refs_one(struct page *page, | |||
54 | struct vm_area_struct *vma, | 54 | struct vm_area_struct *vma, |
55 | unsigned long addr, void *arg) | 55 | unsigned long addr, void *arg) |
56 | { | 56 | { |
57 | struct mm_struct *mm = vma->vm_mm; | 57 | struct page_vma_mapped_walk pvmw = { |
58 | pmd_t *pmd; | 58 | .page = page, |
59 | pte_t *pte; | 59 | .vma = vma, |
60 | spinlock_t *ptl; | 60 | .address = addr, |
61 | }; | ||
61 | bool referenced = false; | 62 | bool referenced = false; |
62 | 63 | ||
63 | if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl)) | 64 | while (page_vma_mapped_walk(&pvmw)) { |
64 | return SWAP_AGAIN; | 65 | addr = pvmw.address; |
65 | 66 | if (pvmw.pte) { | |
66 | if (pte) { | 67 | referenced = ptep_clear_young_notify(vma, addr, |
67 | referenced = ptep_clear_young_notify(vma, addr, pte); | 68 | pvmw.pte); |
68 | pte_unmap(pte); | 69 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
69 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { | 70 | referenced = pmdp_clear_young_notify(vma, addr, |
70 | referenced = pmdp_clear_young_notify(vma, addr, pmd); | 71 | pvmw.pmd); |
71 | } else { | 72 | } else { |
72 | /* unexpected pmd-mapped page? */ | 73 | /* unexpected pmd-mapped page? */ |
73 | WARN_ON_ONCE(1); | 74 | WARN_ON_ONCE(1); |
75 | } | ||
74 | } | 76 | } |
75 | 77 | ||
76 | spin_unlock(ptl); | ||
77 | |||
78 | if (referenced) { | 78 | if (referenced) { |
79 | clear_page_idle(page); | 79 | clear_page_idle(page); |
80 | /* | 80 | /* |
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c new file mode 100644 index 000000000000..a23001a22c15 --- /dev/null +++ b/mm/page_vma_mapped.c | |||
@@ -0,0 +1,218 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/rmap.h> | ||
3 | #include <linux/hugetlb.h> | ||
4 | #include <linux/swap.h> | ||
5 | #include <linux/swapops.h> | ||
6 | |||
7 | #include "internal.h" | ||
8 | |||
9 | static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) | ||
10 | { | ||
11 | pmd_t pmde; | ||
12 | /* | ||
13 | * Make sure we don't re-load pmd between present and !trans_huge check. | ||
14 | * We need a consistent view. | ||
15 | */ | ||
16 | pmde = READ_ONCE(*pvmw->pmd); | ||
17 | return pmd_present(pmde) && !pmd_trans_huge(pmde); | ||
18 | } | ||
19 | |||
20 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) | ||
21 | { | ||
22 | page_vma_mapped_walk_done(pvmw); | ||
23 | return false; | ||
24 | } | ||
25 | |||
26 | static bool map_pte(struct page_vma_mapped_walk *pvmw) | ||
27 | { | ||
28 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); | ||
29 | if (!(pvmw->flags & PVMW_SYNC)) { | ||
30 | if (pvmw->flags & PVMW_MIGRATION) { | ||
31 | if (!is_swap_pte(*pvmw->pte)) | ||
32 | return false; | ||
33 | } else { | ||
34 | if (!pte_present(*pvmw->pte)) | ||
35 | return false; | ||
36 | } | ||
37 | } | ||
38 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); | ||
39 | spin_lock(pvmw->ptl); | ||
40 | return true; | ||
41 | } | ||
42 | |||
43 | static bool check_pte(struct page_vma_mapped_walk *pvmw) | ||
44 | { | ||
45 | if (pvmw->flags & PVMW_MIGRATION) { | ||
46 | #ifdef CONFIG_MIGRATION | ||
47 | swp_entry_t entry; | ||
48 | if (!is_swap_pte(*pvmw->pte)) | ||
49 | return false; | ||
50 | entry = pte_to_swp_entry(*pvmw->pte); | ||
51 | if (!is_migration_entry(entry)) | ||
52 | return false; | ||
53 | if (migration_entry_to_page(entry) - pvmw->page >= | ||
54 | hpage_nr_pages(pvmw->page)) { | ||
55 | return false; | ||
56 | } | ||
57 | if (migration_entry_to_page(entry) < pvmw->page) | ||
58 | return false; | ||
59 | #else | ||
60 | WARN_ON_ONCE(1); | ||
61 | #endif | ||
62 | } else { | ||
63 | if (!pte_present(*pvmw->pte)) | ||
64 | return false; | ||
65 | |||
66 | /* THP can be referenced by any subpage */ | ||
67 | if (pte_page(*pvmw->pte) - pvmw->page >= | ||
68 | hpage_nr_pages(pvmw->page)) { | ||
69 | return false; | ||
70 | } | ||
71 | if (pte_page(*pvmw->pte) < pvmw->page) | ||
72 | return false; | ||
73 | } | ||
74 | |||
75 | return true; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at | ||
80 | * @pvmw->address | ||
81 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | ||
82 | * must be set. pmd, pte and ptl must be NULL. | ||
83 | * | ||
84 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | ||
85 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | ||
86 | * adjusted if needed (for PTE-mapped THPs). | ||
87 | * | ||
88 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | ||
89 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | ||
90 | * a loop to find all PTEs that map the THP. | ||
91 | * | ||
92 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | ||
93 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | ||
94 | * NULL. | ||
95 | * | ||
96 | * Retruns false if there are no more page table entries for the page in | ||
97 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. | ||
98 | * | ||
99 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | ||
100 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | ||
101 | */ | ||
102 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | ||
103 | { | ||
104 | struct mm_struct *mm = pvmw->vma->vm_mm; | ||
105 | struct page *page = pvmw->page; | ||
106 | pgd_t *pgd; | ||
107 | pud_t *pud; | ||
108 | |||
109 | /* The only possible pmd mapping has been handled on last iteration */ | ||
110 | if (pvmw->pmd && !pvmw->pte) | ||
111 | return not_found(pvmw); | ||
112 | |||
113 | /* Only for THP, seek to next pte entry makes sense */ | ||
114 | if (pvmw->pte) { | ||
115 | if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) | ||
116 | return not_found(pvmw); | ||
117 | goto next_pte; | ||
118 | } | ||
119 | |||
120 | if (unlikely(PageHuge(pvmw->page))) { | ||
121 | /* when pud is not present, pte will be NULL */ | ||
122 | pvmw->pte = huge_pte_offset(mm, pvmw->address); | ||
123 | if (!pvmw->pte) | ||
124 | return false; | ||
125 | |||
126 | pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); | ||
127 | spin_lock(pvmw->ptl); | ||
128 | if (!check_pte(pvmw)) | ||
129 | return not_found(pvmw); | ||
130 | return true; | ||
131 | } | ||
132 | restart: | ||
133 | pgd = pgd_offset(mm, pvmw->address); | ||
134 | if (!pgd_present(*pgd)) | ||
135 | return false; | ||
136 | pud = pud_offset(pgd, pvmw->address); | ||
137 | if (!pud_present(*pud)) | ||
138 | return false; | ||
139 | pvmw->pmd = pmd_offset(pud, pvmw->address); | ||
140 | if (pmd_trans_huge(*pvmw->pmd)) { | ||
141 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | ||
142 | if (!pmd_present(*pvmw->pmd)) | ||
143 | return not_found(pvmw); | ||
144 | if (likely(pmd_trans_huge(*pvmw->pmd))) { | ||
145 | if (pvmw->flags & PVMW_MIGRATION) | ||
146 | return not_found(pvmw); | ||
147 | if (pmd_page(*pvmw->pmd) != page) | ||
148 | return not_found(pvmw); | ||
149 | return true; | ||
150 | } else { | ||
151 | /* THP pmd was split under us: handle on pte level */ | ||
152 | spin_unlock(pvmw->ptl); | ||
153 | pvmw->ptl = NULL; | ||
154 | } | ||
155 | } else { | ||
156 | if (!check_pmd(pvmw)) | ||
157 | return false; | ||
158 | } | ||
159 | if (!map_pte(pvmw)) | ||
160 | goto next_pte; | ||
161 | while (1) { | ||
162 | if (check_pte(pvmw)) | ||
163 | return true; | ||
164 | next_pte: do { | ||
165 | pvmw->address += PAGE_SIZE; | ||
166 | if (pvmw->address >= | ||
167 | __vma_address(pvmw->page, pvmw->vma) + | ||
168 | hpage_nr_pages(pvmw->page) * PAGE_SIZE) | ||
169 | return not_found(pvmw); | ||
170 | /* Did we cross page table boundary? */ | ||
171 | if (pvmw->address % PMD_SIZE == 0) { | ||
172 | pte_unmap(pvmw->pte); | ||
173 | if (pvmw->ptl) { | ||
174 | spin_unlock(pvmw->ptl); | ||
175 | pvmw->ptl = NULL; | ||
176 | } | ||
177 | goto restart; | ||
178 | } else { | ||
179 | pvmw->pte++; | ||
180 | } | ||
181 | } while (pte_none(*pvmw->pte)); | ||
182 | |||
183 | if (!pvmw->ptl) { | ||
184 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | ||
185 | spin_lock(pvmw->ptl); | ||
186 | } | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | ||
192 | * @page: the page to test | ||
193 | * @vma: the VMA to test | ||
194 | * | ||
195 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | ||
196 | * if the page is not mapped into the page tables of this VMA. Only | ||
197 | * valid for normal file or anonymous VMAs. | ||
198 | */ | ||
199 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | ||
200 | { | ||
201 | struct page_vma_mapped_walk pvmw = { | ||
202 | .page = page, | ||
203 | .vma = vma, | ||
204 | .flags = PVMW_SYNC, | ||
205 | }; | ||
206 | unsigned long start, end; | ||
207 | |||
208 | start = __vma_address(page, vma); | ||
209 | end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); | ||
210 | |||
211 | if (unlikely(end < vma->vm_start || start >= vma->vm_end)) | ||
212 | return 0; | ||
213 | pvmw.address = max(start, vma->vm_start); | ||
214 | if (!page_vma_mapped_walk(&pvmw)) | ||
215 | return 0; | ||
216 | page_vma_mapped_walk_done(&pvmw); | ||
217 | return 1; | ||
218 | } | ||
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 207244489a68..03761577ae86 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -78,14 +78,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
78 | 78 | ||
79 | pud = pud_offset(pgd, addr); | 79 | pud = pud_offset(pgd, addr); |
80 | do { | 80 | do { |
81 | again: | ||
81 | next = pud_addr_end(addr, end); | 82 | next = pud_addr_end(addr, end); |
82 | if (pud_none_or_clear_bad(pud)) { | 83 | if (pud_none(*pud) || !walk->vma) { |
83 | if (walk->pte_hole) | 84 | if (walk->pte_hole) |
84 | err = walk->pte_hole(addr, next, walk); | 85 | err = walk->pte_hole(addr, next, walk); |
85 | if (err) | 86 | if (err) |
86 | break; | 87 | break; |
87 | continue; | 88 | continue; |
88 | } | 89 | } |
90 | |||
91 | if (walk->pud_entry) { | ||
92 | spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); | ||
93 | |||
94 | if (ptl) { | ||
95 | err = walk->pud_entry(pud, addr, next, walk); | ||
96 | spin_unlock(ptl); | ||
97 | if (err) | ||
98 | break; | ||
99 | continue; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | split_huge_pud(walk->vma, pud, addr); | ||
104 | if (pud_none(*pud)) | ||
105 | goto again; | ||
106 | |||
89 | if (walk->pmd_entry || walk->pte_entry) | 107 | if (walk->pmd_entry || walk->pte_entry) |
90 | err = walk_pmd_range(pud, addr, next, walk); | 108 | err = walk_pmd_range(pud, addr, next, walk); |
91 | if (err) | 109 | if (err) |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 71c5f9109f2a..4ed5908c65b0 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -123,6 +123,20 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, | |||
123 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 123 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
124 | return pmd; | 124 | return pmd; |
125 | } | 125 | } |
126 | |||
127 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
128 | pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, | ||
129 | pud_t *pudp) | ||
130 | { | ||
131 | pud_t pud; | ||
132 | |||
133 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | ||
134 | VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); | ||
135 | pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); | ||
136 | flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); | ||
137 | return pud; | ||
138 | } | ||
139 | #endif | ||
126 | #endif | 140 | #endif |
127 | 141 | ||
128 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT | 142 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
@@ -607,8 +607,7 @@ void try_to_unmap_flush_dirty(void) | |||
607 | try_to_unmap_flush(); | 607 | try_to_unmap_flush(); |
608 | } | 608 | } |
609 | 609 | ||
610 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, | 610 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
611 | struct page *page, bool writable) | ||
612 | { | 611 | { |
613 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; | 612 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
614 | 613 | ||
@@ -643,8 +642,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) | |||
643 | return should_defer; | 642 | return should_defer; |
644 | } | 643 | } |
645 | #else | 644 | #else |
646 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, | 645 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
647 | struct page *page, bool writable) | ||
648 | { | 646 | { |
649 | } | 647 | } |
650 | 648 | ||
@@ -710,170 +708,6 @@ out: | |||
710 | return pmd; | 708 | return pmd; |
711 | } | 709 | } |
712 | 710 | ||
713 | /* | ||
714 | * Check that @page is mapped at @address into @mm. | ||
715 | * | ||
716 | * If @sync is false, page_check_address may perform a racy check to avoid | ||
717 | * the page table lock when the pte is not present (helpful when reclaiming | ||
718 | * highly shared pages). | ||
719 | * | ||
720 | * On success returns with pte mapped and locked. | ||
721 | */ | ||
722 | pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | ||
723 | unsigned long address, spinlock_t **ptlp, int sync) | ||
724 | { | ||
725 | pmd_t *pmd; | ||
726 | pte_t *pte; | ||
727 | spinlock_t *ptl; | ||
728 | |||
729 | if (unlikely(PageHuge(page))) { | ||
730 | /* when pud is not present, pte will be NULL */ | ||
731 | pte = huge_pte_offset(mm, address); | ||
732 | if (!pte) | ||
733 | return NULL; | ||
734 | |||
735 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | ||
736 | goto check; | ||
737 | } | ||
738 | |||
739 | pmd = mm_find_pmd(mm, address); | ||
740 | if (!pmd) | ||
741 | return NULL; | ||
742 | |||
743 | pte = pte_offset_map(pmd, address); | ||
744 | /* Make a quick check before getting the lock */ | ||
745 | if (!sync && !pte_present(*pte)) { | ||
746 | pte_unmap(pte); | ||
747 | return NULL; | ||
748 | } | ||
749 | |||
750 | ptl = pte_lockptr(mm, pmd); | ||
751 | check: | ||
752 | spin_lock(ptl); | ||
753 | if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { | ||
754 | *ptlp = ptl; | ||
755 | return pte; | ||
756 | } | ||
757 | pte_unmap_unlock(pte, ptl); | ||
758 | return NULL; | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | ||
763 | * @page: the page to test | ||
764 | * @vma: the VMA to test | ||
765 | * | ||
766 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | ||
767 | * if the page is not mapped into the page tables of this VMA. Only | ||
768 | * valid for normal file or anonymous VMAs. | ||
769 | */ | ||
770 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | ||
771 | { | ||
772 | unsigned long address; | ||
773 | pte_t *pte; | ||
774 | spinlock_t *ptl; | ||
775 | |||
776 | address = __vma_address(page, vma); | ||
777 | if (unlikely(address < vma->vm_start || address >= vma->vm_end)) | ||
778 | return 0; | ||
779 | pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); | ||
780 | if (!pte) /* the page is not in this mm */ | ||
781 | return 0; | ||
782 | pte_unmap_unlock(pte, ptl); | ||
783 | |||
784 | return 1; | ||
785 | } | ||
786 | |||
787 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
788 | /* | ||
789 | * Check that @page is mapped at @address into @mm. In contrast to | ||
790 | * page_check_address(), this function can handle transparent huge pages. | ||
791 | * | ||
792 | * On success returns true with pte mapped and locked. For PMD-mapped | ||
793 | * transparent huge pages *@ptep is set to NULL. | ||
794 | */ | ||
795 | bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, | ||
796 | unsigned long address, pmd_t **pmdp, | ||
797 | pte_t **ptep, spinlock_t **ptlp) | ||
798 | { | ||
799 | pgd_t *pgd; | ||
800 | pud_t *pud; | ||
801 | pmd_t *pmd; | ||
802 | pte_t *pte; | ||
803 | spinlock_t *ptl; | ||
804 | |||
805 | if (unlikely(PageHuge(page))) { | ||
806 | /* when pud is not present, pte will be NULL */ | ||
807 | pte = huge_pte_offset(mm, address); | ||
808 | if (!pte) | ||
809 | return false; | ||
810 | |||
811 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | ||
812 | pmd = NULL; | ||
813 | goto check_pte; | ||
814 | } | ||
815 | |||
816 | pgd = pgd_offset(mm, address); | ||
817 | if (!pgd_present(*pgd)) | ||
818 | return false; | ||
819 | pud = pud_offset(pgd, address); | ||
820 | if (!pud_present(*pud)) | ||
821 | return false; | ||
822 | pmd = pmd_offset(pud, address); | ||
823 | |||
824 | if (pmd_trans_huge(*pmd)) { | ||
825 | ptl = pmd_lock(mm, pmd); | ||
826 | if (!pmd_present(*pmd)) | ||
827 | goto unlock_pmd; | ||
828 | if (unlikely(!pmd_trans_huge(*pmd))) { | ||
829 | spin_unlock(ptl); | ||
830 | goto map_pte; | ||
831 | } | ||
832 | |||
833 | if (pmd_page(*pmd) != page) | ||
834 | goto unlock_pmd; | ||
835 | |||
836 | pte = NULL; | ||
837 | goto found; | ||
838 | unlock_pmd: | ||
839 | spin_unlock(ptl); | ||
840 | return false; | ||
841 | } else { | ||
842 | pmd_t pmde = *pmd; | ||
843 | |||
844 | barrier(); | ||
845 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) | ||
846 | return false; | ||
847 | } | ||
848 | map_pte: | ||
849 | pte = pte_offset_map(pmd, address); | ||
850 | if (!pte_present(*pte)) { | ||
851 | pte_unmap(pte); | ||
852 | return false; | ||
853 | } | ||
854 | |||
855 | ptl = pte_lockptr(mm, pmd); | ||
856 | check_pte: | ||
857 | spin_lock(ptl); | ||
858 | |||
859 | if (!pte_present(*pte)) { | ||
860 | pte_unmap_unlock(pte, ptl); | ||
861 | return false; | ||
862 | } | ||
863 | |||
864 | /* THP can be referenced by any subpage */ | ||
865 | if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { | ||
866 | pte_unmap_unlock(pte, ptl); | ||
867 | return false; | ||
868 | } | ||
869 | found: | ||
870 | *ptep = pte; | ||
871 | *pmdp = pmd; | ||
872 | *ptlp = ptl; | ||
873 | return true; | ||
874 | } | ||
875 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
876 | |||
877 | struct page_referenced_arg { | 711 | struct page_referenced_arg { |
878 | int mapcount; | 712 | int mapcount; |
879 | int referenced; | 713 | int referenced; |
@@ -886,45 +720,48 @@ struct page_referenced_arg { | |||
886 | static int page_referenced_one(struct page *page, struct vm_area_struct *vma, | 720 | static int page_referenced_one(struct page *page, struct vm_area_struct *vma, |
887 | unsigned long address, void *arg) | 721 | unsigned long address, void *arg) |
888 | { | 722 | { |
889 | struct mm_struct *mm = vma->vm_mm; | ||
890 | struct page_referenced_arg *pra = arg; | 723 | struct page_referenced_arg *pra = arg; |
891 | pmd_t *pmd; | 724 | struct page_vma_mapped_walk pvmw = { |
892 | pte_t *pte; | 725 | .page = page, |
893 | spinlock_t *ptl; | 726 | .vma = vma, |
727 | .address = address, | ||
728 | }; | ||
894 | int referenced = 0; | 729 | int referenced = 0; |
895 | 730 | ||
896 | if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl)) | 731 | while (page_vma_mapped_walk(&pvmw)) { |
897 | return SWAP_AGAIN; | 732 | address = pvmw.address; |
898 | 733 | ||
899 | if (vma->vm_flags & VM_LOCKED) { | 734 | if (vma->vm_flags & VM_LOCKED) { |
900 | if (pte) | 735 | page_vma_mapped_walk_done(&pvmw); |
901 | pte_unmap(pte); | 736 | pra->vm_flags |= VM_LOCKED; |
902 | spin_unlock(ptl); | 737 | return SWAP_FAIL; /* To break the loop */ |
903 | pra->vm_flags |= VM_LOCKED; | 738 | } |
904 | return SWAP_FAIL; /* To break the loop */ | ||
905 | } | ||
906 | 739 | ||
907 | if (pte) { | 740 | if (pvmw.pte) { |
908 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 741 | if (ptep_clear_flush_young_notify(vma, address, |
909 | /* | 742 | pvmw.pte)) { |
910 | * Don't treat a reference through a sequentially read | 743 | /* |
911 | * mapping as such. If the page has been used in | 744 | * Don't treat a reference through |
912 | * another mapping, we will catch it; if this other | 745 | * a sequentially read mapping as such. |
913 | * mapping is already gone, the unmap path will have | 746 | * If the page has been used in another mapping, |
914 | * set PG_referenced or activated the page. | 747 | * we will catch it; if this other mapping is |
915 | */ | 748 | * already gone, the unmap path will have set |
916 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) | 749 | * PG_referenced or activated the page. |
750 | */ | ||
751 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) | ||
752 | referenced++; | ||
753 | } | ||
754 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { | ||
755 | if (pmdp_clear_flush_young_notify(vma, address, | ||
756 | pvmw.pmd)) | ||
917 | referenced++; | 757 | referenced++; |
758 | } else { | ||
759 | /* unexpected pmd-mapped page? */ | ||
760 | WARN_ON_ONCE(1); | ||
918 | } | 761 | } |
919 | pte_unmap(pte); | 762 | |
920 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { | 763 | pra->mapcount--; |
921 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
922 | referenced++; | ||
923 | } else { | ||
924 | /* unexpected pmd-mapped page? */ | ||
925 | WARN_ON_ONCE(1); | ||
926 | } | 764 | } |
927 | spin_unlock(ptl); | ||
928 | 765 | ||
929 | if (referenced) | 766 | if (referenced) |
930 | clear_page_idle(page); | 767 | clear_page_idle(page); |
@@ -936,7 +773,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
936 | pra->vm_flags |= vma->vm_flags; | 773 | pra->vm_flags |= vma->vm_flags; |
937 | } | 774 | } |
938 | 775 | ||
939 | pra->mapcount--; | ||
940 | if (!pra->mapcount) | 776 | if (!pra->mapcount) |
941 | return SWAP_SUCCESS; /* To break the loop */ | 777 | return SWAP_SUCCESS; /* To break the loop */ |
942 | 778 | ||
@@ -1015,34 +851,56 @@ int page_referenced(struct page *page, | |||
1015 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, | 851 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, |
1016 | unsigned long address, void *arg) | 852 | unsigned long address, void *arg) |
1017 | { | 853 | { |
1018 | struct mm_struct *mm = vma->vm_mm; | 854 | struct page_vma_mapped_walk pvmw = { |
1019 | pte_t *pte; | 855 | .page = page, |
1020 | spinlock_t *ptl; | 856 | .vma = vma, |
1021 | int ret = 0; | 857 | .address = address, |
858 | .flags = PVMW_SYNC, | ||
859 | }; | ||
1022 | int *cleaned = arg; | 860 | int *cleaned = arg; |
1023 | 861 | ||
1024 | pte = page_check_address(page, mm, address, &ptl, 1); | 862 | while (page_vma_mapped_walk(&pvmw)) { |
1025 | if (!pte) | 863 | int ret = 0; |
1026 | goto out; | 864 | address = pvmw.address; |
1027 | 865 | if (pvmw.pte) { | |
1028 | if (pte_dirty(*pte) || pte_write(*pte)) { | 866 | pte_t entry; |
1029 | pte_t entry; | 867 | pte_t *pte = pvmw.pte; |
868 | |||
869 | if (!pte_dirty(*pte) && !pte_write(*pte)) | ||
870 | continue; | ||
871 | |||
872 | flush_cache_page(vma, address, pte_pfn(*pte)); | ||
873 | entry = ptep_clear_flush(vma, address, pte); | ||
874 | entry = pte_wrprotect(entry); | ||
875 | entry = pte_mkclean(entry); | ||
876 | set_pte_at(vma->vm_mm, address, pte, entry); | ||
877 | ret = 1; | ||
878 | } else { | ||
879 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE | ||
880 | pmd_t *pmd = pvmw.pmd; | ||
881 | pmd_t entry; | ||
882 | |||
883 | if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) | ||
884 | continue; | ||
885 | |||
886 | flush_cache_page(vma, address, page_to_pfn(page)); | ||
887 | entry = pmdp_huge_clear_flush(vma, address, pmd); | ||
888 | entry = pmd_wrprotect(entry); | ||
889 | entry = pmd_mkclean(entry); | ||
890 | set_pmd_at(vma->vm_mm, address, pmd, entry); | ||
891 | ret = 1; | ||
892 | #else | ||
893 | /* unexpected pmd-mapped page? */ | ||
894 | WARN_ON_ONCE(1); | ||
895 | #endif | ||
896 | } | ||
1030 | 897 | ||
1031 | flush_cache_page(vma, address, pte_pfn(*pte)); | 898 | if (ret) { |
1032 | entry = ptep_clear_flush(vma, address, pte); | 899 | mmu_notifier_invalidate_page(vma->vm_mm, address); |
1033 | entry = pte_wrprotect(entry); | 900 | (*cleaned)++; |
1034 | entry = pte_mkclean(entry); | 901 | } |
1035 | set_pte_at(mm, address, pte, entry); | ||
1036 | ret = 1; | ||
1037 | } | 902 | } |
1038 | 903 | ||
1039 | pte_unmap_unlock(pte, ptl); | ||
1040 | |||
1041 | if (ret) { | ||
1042 | mmu_notifier_invalidate_page(mm, address); | ||
1043 | (*cleaned)++; | ||
1044 | } | ||
1045 | out: | ||
1046 | return SWAP_AGAIN; | 904 | return SWAP_AGAIN; |
1047 | } | 905 | } |
1048 | 906 | ||
@@ -1435,155 +1293,163 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1435 | unsigned long address, void *arg) | 1293 | unsigned long address, void *arg) |
1436 | { | 1294 | { |
1437 | struct mm_struct *mm = vma->vm_mm; | 1295 | struct mm_struct *mm = vma->vm_mm; |
1438 | pte_t *pte; | 1296 | struct page_vma_mapped_walk pvmw = { |
1297 | .page = page, | ||
1298 | .vma = vma, | ||
1299 | .address = address, | ||
1300 | }; | ||
1439 | pte_t pteval; | 1301 | pte_t pteval; |
1440 | spinlock_t *ptl; | 1302 | struct page *subpage; |
1441 | int ret = SWAP_AGAIN; | 1303 | int ret = SWAP_AGAIN; |
1442 | struct rmap_private *rp = arg; | 1304 | struct rmap_private *rp = arg; |
1443 | enum ttu_flags flags = rp->flags; | 1305 | enum ttu_flags flags = rp->flags; |
1444 | 1306 | ||
1445 | /* munlock has nothing to gain from examining un-locked vmas */ | 1307 | /* munlock has nothing to gain from examining un-locked vmas */ |
1446 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) | 1308 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) |
1447 | goto out; | 1309 | return SWAP_AGAIN; |
1448 | 1310 | ||
1449 | if (flags & TTU_SPLIT_HUGE_PMD) { | 1311 | if (flags & TTU_SPLIT_HUGE_PMD) { |
1450 | split_huge_pmd_address(vma, address, | 1312 | split_huge_pmd_address(vma, address, |
1451 | flags & TTU_MIGRATION, page); | 1313 | flags & TTU_MIGRATION, page); |
1452 | /* check if we have anything to do after split */ | ||
1453 | if (page_mapcount(page) == 0) | ||
1454 | goto out; | ||
1455 | } | 1314 | } |
1456 | 1315 | ||
1457 | pte = page_check_address(page, mm, address, &ptl, | 1316 | while (page_vma_mapped_walk(&pvmw)) { |
1458 | PageTransCompound(page)); | 1317 | subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); |
1459 | if (!pte) | 1318 | address = pvmw.address; |
1460 | goto out; | ||
1461 | 1319 | ||
1462 | /* | 1320 | /* Unexpected PMD-mapped THP? */ |
1463 | * If the page is mlock()d, we cannot swap it out. | 1321 | VM_BUG_ON_PAGE(!pvmw.pte, page); |
1464 | * If it's recently referenced (perhaps page_referenced | ||
1465 | * skipped over this mm) then we should reactivate it. | ||
1466 | */ | ||
1467 | if (!(flags & TTU_IGNORE_MLOCK)) { | ||
1468 | if (vma->vm_flags & VM_LOCKED) { | ||
1469 | /* PTE-mapped THP are never mlocked */ | ||
1470 | if (!PageTransCompound(page)) { | ||
1471 | /* | ||
1472 | * Holding pte lock, we do *not* need | ||
1473 | * mmap_sem here | ||
1474 | */ | ||
1475 | mlock_vma_page(page); | ||
1476 | } | ||
1477 | ret = SWAP_MLOCK; | ||
1478 | goto out_unmap; | ||
1479 | } | ||
1480 | if (flags & TTU_MUNLOCK) | ||
1481 | goto out_unmap; | ||
1482 | } | ||
1483 | if (!(flags & TTU_IGNORE_ACCESS)) { | ||
1484 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | ||
1485 | ret = SWAP_FAIL; | ||
1486 | goto out_unmap; | ||
1487 | } | ||
1488 | } | ||
1489 | 1322 | ||
1490 | /* Nuke the page table entry. */ | ||
1491 | flush_cache_page(vma, address, page_to_pfn(page)); | ||
1492 | if (should_defer_flush(mm, flags)) { | ||
1493 | /* | 1323 | /* |
1494 | * We clear the PTE but do not flush so potentially a remote | 1324 | * If the page is mlock()d, we cannot swap it out. |
1495 | * CPU could still be writing to the page. If the entry was | 1325 | * If it's recently referenced (perhaps page_referenced |
1496 | * previously clean then the architecture must guarantee that | 1326 | * skipped over this mm) then we should reactivate it. |
1497 | * a clear->dirty transition on a cached TLB entry is written | ||
1498 | * through and traps if the PTE is unmapped. | ||
1499 | */ | 1327 | */ |
1500 | pteval = ptep_get_and_clear(mm, address, pte); | 1328 | if (!(flags & TTU_IGNORE_MLOCK)) { |
1501 | 1329 | if (vma->vm_flags & VM_LOCKED) { | |
1502 | set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval)); | 1330 | /* PTE-mapped THP are never mlocked */ |
1503 | } else { | 1331 | if (!PageTransCompound(page)) { |
1504 | pteval = ptep_clear_flush(vma, address, pte); | 1332 | /* |
1505 | } | 1333 | * Holding pte lock, we do *not* need |
1334 | * mmap_sem here | ||
1335 | */ | ||
1336 | mlock_vma_page(page); | ||
1337 | } | ||
1338 | ret = SWAP_MLOCK; | ||
1339 | page_vma_mapped_walk_done(&pvmw); | ||
1340 | break; | ||
1341 | } | ||
1342 | if (flags & TTU_MUNLOCK) | ||
1343 | continue; | ||
1344 | } | ||
1506 | 1345 | ||
1507 | /* Move the dirty bit to the physical page now the pte is gone. */ | 1346 | if (!(flags & TTU_IGNORE_ACCESS)) { |
1508 | if (pte_dirty(pteval)) | 1347 | if (ptep_clear_flush_young_notify(vma, address, |
1509 | set_page_dirty(page); | 1348 | pvmw.pte)) { |
1349 | ret = SWAP_FAIL; | ||
1350 | page_vma_mapped_walk_done(&pvmw); | ||
1351 | break; | ||
1352 | } | ||
1353 | } | ||
1510 | 1354 | ||
1511 | /* Update high watermark before we lower rss */ | 1355 | /* Nuke the page table entry. */ |
1512 | update_hiwater_rss(mm); | 1356 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
1357 | if (should_defer_flush(mm, flags)) { | ||
1358 | /* | ||
1359 | * We clear the PTE but do not flush so potentially | ||
1360 | * a remote CPU could still be writing to the page. | ||
1361 | * If the entry was previously clean then the | ||
1362 | * architecture must guarantee that a clear->dirty | ||
1363 | * transition on a cached TLB entry is written through | ||
1364 | * and traps if the PTE is unmapped. | ||
1365 | */ | ||
1366 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); | ||
1513 | 1367 | ||
1514 | if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { | 1368 | set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); |
1515 | if (PageHuge(page)) { | ||
1516 | hugetlb_count_sub(1 << compound_order(page), mm); | ||
1517 | } else { | 1369 | } else { |
1518 | dec_mm_counter(mm, mm_counter(page)); | 1370 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
1519 | } | 1371 | } |
1520 | set_pte_at(mm, address, pte, | ||
1521 | swp_entry_to_pte(make_hwpoison_entry(page))); | ||
1522 | } else if (pte_unused(pteval)) { | ||
1523 | /* | ||
1524 | * The guest indicated that the page content is of no | ||
1525 | * interest anymore. Simply discard the pte, vmscan | ||
1526 | * will take care of the rest. | ||
1527 | */ | ||
1528 | dec_mm_counter(mm, mm_counter(page)); | ||
1529 | } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) { | ||
1530 | swp_entry_t entry; | ||
1531 | pte_t swp_pte; | ||
1532 | /* | ||
1533 | * Store the pfn of the page in a special migration | ||
1534 | * pte. do_swap_page() will wait until the migration | ||
1535 | * pte is removed and then restart fault handling. | ||
1536 | */ | ||
1537 | entry = make_migration_entry(page, pte_write(pteval)); | ||
1538 | swp_pte = swp_entry_to_pte(entry); | ||
1539 | if (pte_soft_dirty(pteval)) | ||
1540 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1541 | set_pte_at(mm, address, pte, swp_pte); | ||
1542 | } else if (PageAnon(page)) { | ||
1543 | swp_entry_t entry = { .val = page_private(page) }; | ||
1544 | pte_t swp_pte; | ||
1545 | /* | ||
1546 | * Store the swap location in the pte. | ||
1547 | * See handle_pte_fault() ... | ||
1548 | */ | ||
1549 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | ||
1550 | 1372 | ||
1551 | if (!PageDirty(page) && (flags & TTU_LZFREE)) { | 1373 | /* Move the dirty bit to the page. Now the pte is gone. */ |
1552 | /* It's a freeable page by MADV_FREE */ | 1374 | if (pte_dirty(pteval)) |
1553 | dec_mm_counter(mm, MM_ANONPAGES); | 1375 | set_page_dirty(page); |
1554 | rp->lazyfreed++; | ||
1555 | goto discard; | ||
1556 | } | ||
1557 | 1376 | ||
1558 | if (swap_duplicate(entry) < 0) { | 1377 | /* Update high watermark before we lower rss */ |
1559 | set_pte_at(mm, address, pte, pteval); | 1378 | update_hiwater_rss(mm); |
1560 | ret = SWAP_FAIL; | ||
1561 | goto out_unmap; | ||
1562 | } | ||
1563 | if (list_empty(&mm->mmlist)) { | ||
1564 | spin_lock(&mmlist_lock); | ||
1565 | if (list_empty(&mm->mmlist)) | ||
1566 | list_add(&mm->mmlist, &init_mm.mmlist); | ||
1567 | spin_unlock(&mmlist_lock); | ||
1568 | } | ||
1569 | dec_mm_counter(mm, MM_ANONPAGES); | ||
1570 | inc_mm_counter(mm, MM_SWAPENTS); | ||
1571 | swp_pte = swp_entry_to_pte(entry); | ||
1572 | if (pte_soft_dirty(pteval)) | ||
1573 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1574 | set_pte_at(mm, address, pte, swp_pte); | ||
1575 | } else | ||
1576 | dec_mm_counter(mm, mm_counter_file(page)); | ||
1577 | 1379 | ||
1578 | discard: | 1380 | if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { |
1579 | page_remove_rmap(page, PageHuge(page)); | 1381 | if (PageHuge(page)) { |
1580 | put_page(page); | 1382 | int nr = 1 << compound_order(page); |
1383 | hugetlb_count_sub(nr, mm); | ||
1384 | } else { | ||
1385 | dec_mm_counter(mm, mm_counter(page)); | ||
1386 | } | ||
1387 | |||
1388 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); | ||
1389 | set_pte_at(mm, address, pvmw.pte, pteval); | ||
1390 | } else if (pte_unused(pteval)) { | ||
1391 | /* | ||
1392 | * The guest indicated that the page content is of no | ||
1393 | * interest anymore. Simply discard the pte, vmscan | ||
1394 | * will take care of the rest. | ||
1395 | */ | ||
1396 | dec_mm_counter(mm, mm_counter(page)); | ||
1397 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | ||
1398 | (flags & TTU_MIGRATION)) { | ||
1399 | swp_entry_t entry; | ||
1400 | pte_t swp_pte; | ||
1401 | /* | ||
1402 | * Store the pfn of the page in a special migration | ||
1403 | * pte. do_swap_page() will wait until the migration | ||
1404 | * pte is removed and then restart fault handling. | ||
1405 | */ | ||
1406 | entry = make_migration_entry(subpage, | ||
1407 | pte_write(pteval)); | ||
1408 | swp_pte = swp_entry_to_pte(entry); | ||
1409 | if (pte_soft_dirty(pteval)) | ||
1410 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1411 | set_pte_at(mm, address, pvmw.pte, swp_pte); | ||
1412 | } else if (PageAnon(page)) { | ||
1413 | swp_entry_t entry = { .val = page_private(subpage) }; | ||
1414 | pte_t swp_pte; | ||
1415 | /* | ||
1416 | * Store the swap location in the pte. | ||
1417 | * See handle_pte_fault() ... | ||
1418 | */ | ||
1419 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | ||
1420 | |||
1421 | if (!PageDirty(page) && (flags & TTU_LZFREE)) { | ||
1422 | /* It's a freeable page by MADV_FREE */ | ||
1423 | dec_mm_counter(mm, MM_ANONPAGES); | ||
1424 | rp->lazyfreed++; | ||
1425 | goto discard; | ||
1426 | } | ||
1581 | 1427 | ||
1582 | out_unmap: | 1428 | if (swap_duplicate(entry) < 0) { |
1583 | pte_unmap_unlock(pte, ptl); | 1429 | set_pte_at(mm, address, pvmw.pte, pteval); |
1584 | if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK)) | 1430 | ret = SWAP_FAIL; |
1431 | page_vma_mapped_walk_done(&pvmw); | ||
1432 | break; | ||
1433 | } | ||
1434 | if (list_empty(&mm->mmlist)) { | ||
1435 | spin_lock(&mmlist_lock); | ||
1436 | if (list_empty(&mm->mmlist)) | ||
1437 | list_add(&mm->mmlist, &init_mm.mmlist); | ||
1438 | spin_unlock(&mmlist_lock); | ||
1439 | } | ||
1440 | dec_mm_counter(mm, MM_ANONPAGES); | ||
1441 | inc_mm_counter(mm, MM_SWAPENTS); | ||
1442 | swp_pte = swp_entry_to_pte(entry); | ||
1443 | if (pte_soft_dirty(pteval)) | ||
1444 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1445 | set_pte_at(mm, address, pvmw.pte, swp_pte); | ||
1446 | } else | ||
1447 | dec_mm_counter(mm, mm_counter_file(page)); | ||
1448 | discard: | ||
1449 | page_remove_rmap(subpage, PageHuge(page)); | ||
1450 | put_page(page); | ||
1585 | mmu_notifier_invalidate_page(mm, address); | 1451 | mmu_notifier_invalidate_page(mm, address); |
1586 | out: | 1452 | } |
1587 | return ret; | 1453 | return ret; |
1588 | } | 1454 | } |
1589 | 1455 | ||
@@ -1608,7 +1474,7 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) | |||
1608 | 1474 | ||
1609 | static int page_mapcount_is_zero(struct page *page) | 1475 | static int page_mapcount_is_zero(struct page *page) |
1610 | { | 1476 | { |
1611 | return !page_mapcount(page); | 1477 | return !total_mapcount(page); |
1612 | } | 1478 | } |
1613 | 1479 | ||
1614 | /** | 1480 | /** |
@@ -1755,7 +1621,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1755 | bool locked) | 1621 | bool locked) |
1756 | { | 1622 | { |
1757 | struct anon_vma *anon_vma; | 1623 | struct anon_vma *anon_vma; |
1758 | pgoff_t pgoff; | 1624 | pgoff_t pgoff_start, pgoff_end; |
1759 | struct anon_vma_chain *avc; | 1625 | struct anon_vma_chain *avc; |
1760 | int ret = SWAP_AGAIN; | 1626 | int ret = SWAP_AGAIN; |
1761 | 1627 | ||
@@ -1769,8 +1635,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1769 | if (!anon_vma) | 1635 | if (!anon_vma) |
1770 | return ret; | 1636 | return ret; |
1771 | 1637 | ||
1772 | pgoff = page_to_pgoff(page); | 1638 | pgoff_start = page_to_pgoff(page); |
1773 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { | 1639 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; |
1640 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, | ||
1641 | pgoff_start, pgoff_end) { | ||
1774 | struct vm_area_struct *vma = avc->vma; | 1642 | struct vm_area_struct *vma = avc->vma; |
1775 | unsigned long address = vma_address(page, vma); | 1643 | unsigned long address = vma_address(page, vma); |
1776 | 1644 | ||
@@ -1808,7 +1676,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | |||
1808 | bool locked) | 1676 | bool locked) |
1809 | { | 1677 | { |
1810 | struct address_space *mapping = page_mapping(page); | 1678 | struct address_space *mapping = page_mapping(page); |
1811 | pgoff_t pgoff; | 1679 | pgoff_t pgoff_start, pgoff_end; |
1812 | struct vm_area_struct *vma; | 1680 | struct vm_area_struct *vma; |
1813 | int ret = SWAP_AGAIN; | 1681 | int ret = SWAP_AGAIN; |
1814 | 1682 | ||
@@ -1823,10 +1691,12 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | |||
1823 | if (!mapping) | 1691 | if (!mapping) |
1824 | return ret; | 1692 | return ret; |
1825 | 1693 | ||
1826 | pgoff = page_to_pgoff(page); | 1694 | pgoff_start = page_to_pgoff(page); |
1695 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; | ||
1827 | if (!locked) | 1696 | if (!locked) |
1828 | i_mmap_lock_read(mapping); | 1697 | i_mmap_lock_read(mapping); |
1829 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { | 1698 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
1699 | pgoff_start, pgoff_end) { | ||
1830 | unsigned long address = vma_address(page, vma); | 1700 | unsigned long address = vma_address(page, vma); |
1831 | 1701 | ||
1832 | cond_resched(); | 1702 | cond_resched(); |
diff --git a/mm/shmem.c b/mm/shmem.c index 9c6d22ff44e2..a26649a6633f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1908,8 +1908,9 @@ static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync | |||
1908 | return ret; | 1908 | return ret; |
1909 | } | 1909 | } |
1910 | 1910 | ||
1911 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1911 | static int shmem_fault(struct vm_fault *vmf) |
1912 | { | 1912 | { |
1913 | struct vm_area_struct *vma = vmf->vma; | ||
1913 | struct inode *inode = file_inode(vma->vm_file); | 1914 | struct inode *inode = file_inode(vma->vm_file); |
1914 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); | 1915 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); |
1915 | enum sgp_type sgp; | 1916 | enum sgp_type sgp; |
@@ -2330,7 +2331,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, | |||
2330 | pgoff_t index = pos >> PAGE_SHIFT; | 2331 | pgoff_t index = pos >> PAGE_SHIFT; |
2331 | 2332 | ||
2332 | /* i_mutex is held by caller */ | 2333 | /* i_mutex is held by caller */ |
2333 | if (unlikely(info->seals)) { | 2334 | if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) { |
2334 | if (info->seals & F_SEAL_WRITE) | 2335 | if (info->seals & F_SEAL_WRITE) |
2335 | return -EPERM; | 2336 | return -EPERM; |
2336 | if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) | 2337 | if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 23ff74e61838..09d0e849b07f 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -528,6 +528,9 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) | |||
528 | 528 | ||
529 | static int shutdown_cache(struct kmem_cache *s) | 529 | static int shutdown_cache(struct kmem_cache *s) |
530 | { | 530 | { |
531 | /* free asan quarantined objects */ | ||
532 | kasan_cache_shutdown(s); | ||
533 | |||
531 | if (__kmem_cache_shutdown(s) != 0) | 534 | if (__kmem_cache_shutdown(s) != 0) |
532 | return -EBUSY; | 535 | return -EBUSY; |
533 | 536 | ||
@@ -816,7 +819,6 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
816 | get_online_cpus(); | 819 | get_online_cpus(); |
817 | get_online_mems(); | 820 | get_online_mems(); |
818 | 821 | ||
819 | kasan_cache_destroy(s); | ||
820 | mutex_lock(&slab_mutex); | 822 | mutex_lock(&slab_mutex); |
821 | 823 | ||
822 | s->refcount--; | 824 | s->refcount--; |
@@ -209,9 +209,10 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, | |||
209 | { | 209 | { |
210 | int *pgmoved = arg; | 210 | int *pgmoved = arg; |
211 | 211 | ||
212 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 212 | if (PageLRU(page) && !PageUnevictable(page)) { |
213 | enum lru_list lru = page_lru_base_type(page); | 213 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
214 | list_move_tail(&page->lru, &lruvec->lists[lru]); | 214 | ClearPageActive(page); |
215 | add_page_to_lru_list_tail(page, lruvec, page_lru(page)); | ||
215 | (*pgmoved)++; | 216 | (*pgmoved)++; |
216 | } | 217 | } |
217 | } | 218 | } |
@@ -235,7 +236,7 @@ static void pagevec_move_tail(struct pagevec *pvec) | |||
235 | */ | 236 | */ |
236 | void rotate_reclaimable_page(struct page *page) | 237 | void rotate_reclaimable_page(struct page *page) |
237 | { | 238 | { |
238 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && | 239 | if (!PageLocked(page) && !PageDirty(page) && |
239 | !PageUnevictable(page) && PageLRU(page)) { | 240 | !PageUnevictable(page) && PageLRU(page)) { |
240 | struct pagevec *pvec; | 241 | struct pagevec *pvec; |
241 | unsigned long flags; | 242 | unsigned long flags; |
diff --git a/mm/truncate.c b/mm/truncate.c index dd7b24e083c5..f2db67465495 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/task_io_accounting_ops.h> | 20 | #include <linux/task_io_accounting_ops.h> |
21 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | 21 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
22 | do_invalidatepage */ | 22 | do_invalidatepage */ |
23 | #include <linux/shmem_fs.h> | ||
23 | #include <linux/cleancache.h> | 24 | #include <linux/cleancache.h> |
24 | #include <linux/rmap.h> | 25 | #include <linux/rmap.h> |
25 | #include "internal.h" | 26 | #include "internal.h" |
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 1e5c2f94e8a3..9f0ad2a4f102 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
@@ -197,22 +197,25 @@ retry: | |||
197 | * retry, dst_vma will be set to NULL and we must lookup again. | 197 | * retry, dst_vma will be set to NULL and we must lookup again. |
198 | */ | 198 | */ |
199 | if (!dst_vma) { | 199 | if (!dst_vma) { |
200 | err = -EINVAL; | 200 | err = -ENOENT; |
201 | dst_vma = find_vma(dst_mm, dst_start); | 201 | dst_vma = find_vma(dst_mm, dst_start); |
202 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) | 202 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
203 | goto out_unlock; | 203 | goto out_unlock; |
204 | |||
205 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | ||
206 | goto out_unlock; | ||
207 | |||
208 | /* | 204 | /* |
209 | * Make sure the remaining dst range is both valid and | 205 | * Only allow __mcopy_atomic_hugetlb on userfaultfd |
210 | * fully within a single existing vma. | 206 | * registered ranges. |
211 | */ | 207 | */ |
208 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | ||
209 | goto out_unlock; | ||
210 | |||
212 | if (dst_start < dst_vma->vm_start || | 211 | if (dst_start < dst_vma->vm_start || |
213 | dst_start + len > dst_vma->vm_end) | 212 | dst_start + len > dst_vma->vm_end) |
214 | goto out_unlock; | 213 | goto out_unlock; |
215 | 214 | ||
215 | err = -EINVAL; | ||
216 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | ||
217 | goto out_unlock; | ||
218 | |||
216 | vm_shared = dst_vma->vm_flags & VM_SHARED; | 219 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
217 | } | 220 | } |
218 | 221 | ||
@@ -221,12 +224,6 @@ retry: | |||
221 | goto out_unlock; | 224 | goto out_unlock; |
222 | 225 | ||
223 | /* | 226 | /* |
224 | * Only allow __mcopy_atomic_hugetlb on userfaultfd registered ranges. | ||
225 | */ | ||
226 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | ||
227 | goto out_unlock; | ||
228 | |||
229 | /* | ||
230 | * If not shared, ensure the dst_vma has a anon_vma. | 227 | * If not shared, ensure the dst_vma has a anon_vma. |
231 | */ | 228 | */ |
232 | err = -ENOMEM; | 229 | err = -ENOMEM; |
@@ -404,22 +401,35 @@ retry: | |||
404 | * Make sure the vma is not shared, that the dst range is | 401 | * Make sure the vma is not shared, that the dst range is |
405 | * both valid and fully within a single existing vma. | 402 | * both valid and fully within a single existing vma. |
406 | */ | 403 | */ |
407 | err = -EINVAL; | 404 | err = -ENOENT; |
408 | dst_vma = find_vma(dst_mm, dst_start); | 405 | dst_vma = find_vma(dst_mm, dst_start); |
409 | if (!dst_vma) | 406 | if (!dst_vma) |
410 | goto out_unlock; | 407 | goto out_unlock; |
411 | /* | 408 | /* |
412 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | 409 | * Be strict and only allow __mcopy_atomic on userfaultfd |
413 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | 410 | * registered ranges to prevent userland errors going |
411 | * unnoticed. As far as the VM consistency is concerned, it | ||
412 | * would be perfectly safe to remove this check, but there's | ||
413 | * no useful usage for __mcopy_atomic ouside of userfaultfd | ||
414 | * registered ranges. This is after all why these are ioctls | ||
415 | * belonging to the userfaultfd and not syscalls. | ||
414 | */ | 416 | */ |
415 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | 417 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
416 | dst_vma->vm_flags & VM_SHARED)) | ||
417 | goto out_unlock; | 418 | goto out_unlock; |
418 | 419 | ||
419 | if (dst_start < dst_vma->vm_start || | 420 | if (dst_start < dst_vma->vm_start || |
420 | dst_start + len > dst_vma->vm_end) | 421 | dst_start + len > dst_vma->vm_end) |
421 | goto out_unlock; | 422 | goto out_unlock; |
422 | 423 | ||
424 | err = -EINVAL; | ||
425 | /* | ||
426 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | ||
427 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | ||
428 | */ | ||
429 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | ||
430 | dst_vma->vm_flags & VM_SHARED)) | ||
431 | goto out_unlock; | ||
432 | |||
423 | /* | 433 | /* |
424 | * If this is a HUGETLB vma, pass off to appropriate routine | 434 | * If this is a HUGETLB vma, pass off to appropriate routine |
425 | */ | 435 | */ |
@@ -427,18 +437,6 @@ retry: | |||
427 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, | 437 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, |
428 | src_start, len, zeropage); | 438 | src_start, len, zeropage); |
429 | 439 | ||
430 | /* | ||
431 | * Be strict and only allow __mcopy_atomic on userfaultfd | ||
432 | * registered ranges to prevent userland errors going | ||
433 | * unnoticed. As far as the VM consistency is concerned, it | ||
434 | * would be perfectly safe to remove this check, but there's | ||
435 | * no useful usage for __mcopy_atomic ouside of userfaultfd | ||
436 | * registered ranges. This is after all why these are ioctls | ||
437 | * belonging to the userfaultfd and not syscalls. | ||
438 | */ | ||
439 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | ||
440 | goto out_unlock; | ||
441 | |||
442 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) | 440 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
443 | goto out_unlock; | 441 | goto out_unlock; |
444 | 442 | ||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/mman.h> | 11 | #include <linux/mman.h> |
12 | #include <linux/hugetlb.h> | 12 | #include <linux/hugetlb.h> |
13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
14 | #include <linux/userfaultfd_k.h> | ||
14 | 15 | ||
15 | #include <asm/sections.h> | 16 | #include <asm/sections.h> |
16 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
@@ -297,14 +298,16 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, | |||
297 | unsigned long ret; | 298 | unsigned long ret; |
298 | struct mm_struct *mm = current->mm; | 299 | struct mm_struct *mm = current->mm; |
299 | unsigned long populate; | 300 | unsigned long populate; |
301 | LIST_HEAD(uf); | ||
300 | 302 | ||
301 | ret = security_mmap_file(file, prot, flag); | 303 | ret = security_mmap_file(file, prot, flag); |
302 | if (!ret) { | 304 | if (!ret) { |
303 | if (down_write_killable(&mm->mmap_sem)) | 305 | if (down_write_killable(&mm->mmap_sem)) |
304 | return -EINTR; | 306 | return -EINTR; |
305 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, | 307 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, |
306 | &populate); | 308 | &populate, &uf); |
307 | up_write(&mm->mmap_sem); | 309 | up_write(&mm->mmap_sem); |
310 | userfaultfd_unmap_complete(mm, &uf); | ||
308 | if (populate) | 311 | if (populate) |
309 | mm_populate(ret, populate); | 312 | mm_populate(ret, populate); |
310 | } | 313 | } |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d89034a393f2..be93949b4885 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1642,6 +1642,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
1642 | for (i = 0; i < area->nr_pages; i++) { | 1642 | for (i = 0; i < area->nr_pages; i++) { |
1643 | struct page *page; | 1643 | struct page *page; |
1644 | 1644 | ||
1645 | if (fatal_signal_pending(current)) { | ||
1646 | area->nr_pages = i; | ||
1647 | goto fail; | ||
1648 | } | ||
1649 | |||
1645 | if (node == NUMA_NO_NODE) | 1650 | if (node == NUMA_NO_NODE) |
1646 | page = alloc_page(alloc_mask); | 1651 | page = alloc_page(alloc_mask); |
1647 | else | 1652 | else |
@@ -2654,7 +2659,7 @@ static int s_show(struct seq_file *m, void *p) | |||
2654 | seq_printf(m, " pages=%d", v->nr_pages); | 2659 | seq_printf(m, " pages=%d", v->nr_pages); |
2655 | 2660 | ||
2656 | if (v->phys_addr) | 2661 | if (v->phys_addr) |
2657 | seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); | 2662 | seq_printf(m, " phys=%pa", &v->phys_addr); |
2658 | 2663 | ||
2659 | if (v->flags & VM_IOREMAP) | 2664 | if (v->flags & VM_IOREMAP) |
2660 | seq_puts(m, " ioremap"); | 2665 | seq_puts(m, " ioremap"); |
diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 149fdf6c5c56..6063581f705c 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c | |||
@@ -112,9 +112,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, | |||
112 | unsigned long reclaimed) | 112 | unsigned long reclaimed) |
113 | { | 113 | { |
114 | unsigned long scale = scanned + reclaimed; | 114 | unsigned long scale = scanned + reclaimed; |
115 | unsigned long pressure; | 115 | unsigned long pressure = 0; |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * reclaimed can be greater than scanned in cases | ||
119 | * like THP, where the scanned is 1 and reclaimed | ||
120 | * could be 512 | ||
121 | */ | ||
122 | if (reclaimed >= scanned) | ||
123 | goto out; | ||
124 | /* | ||
118 | * We calculate the ratio (in percents) of how many pages were | 125 | * We calculate the ratio (in percents) of how many pages were |
119 | * scanned vs. reclaimed in a given time frame (window). Note that | 126 | * scanned vs. reclaimed in a given time frame (window). Note that |
120 | * time is in VM reclaimer's "ticks", i.e. number of pages | 127 | * time is in VM reclaimer's "ticks", i.e. number of pages |
@@ -124,6 +131,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, | |||
124 | pressure = scale - (reclaimed * scale / scanned); | 131 | pressure = scale - (reclaimed * scale / scanned); |
125 | pressure = pressure * 100 / scale; | 132 | pressure = pressure * 100 / scale; |
126 | 133 | ||
134 | out: | ||
127 | pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, | 135 | pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, |
128 | scanned, reclaimed); | 136 | scanned, reclaimed); |
129 | 137 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7bb23ff229b6..70aa739c6b68 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -87,6 +87,7 @@ struct scan_control { | |||
87 | /* The highest zone to isolate pages for reclaim from */ | 87 | /* The highest zone to isolate pages for reclaim from */ |
88 | enum zone_type reclaim_idx; | 88 | enum zone_type reclaim_idx; |
89 | 89 | ||
90 | /* Writepage batching in laptop mode; RECLAIM_WRITE */ | ||
90 | unsigned int may_writepage:1; | 91 | unsigned int may_writepage:1; |
91 | 92 | ||
92 | /* Can mapped pages be reclaimed? */ | 93 | /* Can mapped pages be reclaimed? */ |
@@ -1055,6 +1056,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1055 | * throttling so we could easily OOM just because too many | 1056 | * throttling so we could easily OOM just because too many |
1056 | * pages are in writeback and there is nothing else to | 1057 | * pages are in writeback and there is nothing else to |
1057 | * reclaim. Wait for the writeback to complete. | 1058 | * reclaim. Wait for the writeback to complete. |
1059 | * | ||
1060 | * In cases 1) and 2) we activate the pages to get them out of | ||
1061 | * the way while we continue scanning for clean pages on the | ||
1062 | * inactive list and refilling from the active list. The | ||
1063 | * observation here is that waiting for disk writes is more | ||
1064 | * expensive than potentially causing reloads down the line. | ||
1065 | * Since they're marked for immediate reclaim, they won't put | ||
1066 | * memory pressure on the cache working set any longer than it | ||
1067 | * takes to write them to disk. | ||
1058 | */ | 1068 | */ |
1059 | if (PageWriteback(page)) { | 1069 | if (PageWriteback(page)) { |
1060 | /* Case 1 above */ | 1070 | /* Case 1 above */ |
@@ -1062,7 +1072,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1062 | PageReclaim(page) && | 1072 | PageReclaim(page) && |
1063 | test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { | 1073 | test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { |
1064 | nr_immediate++; | 1074 | nr_immediate++; |
1065 | goto keep_locked; | 1075 | goto activate_locked; |
1066 | 1076 | ||
1067 | /* Case 2 above */ | 1077 | /* Case 2 above */ |
1068 | } else if (sane_reclaim(sc) || | 1078 | } else if (sane_reclaim(sc) || |
@@ -1080,7 +1090,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1080 | */ | 1090 | */ |
1081 | SetPageReclaim(page); | 1091 | SetPageReclaim(page); |
1082 | nr_writeback++; | 1092 | nr_writeback++; |
1083 | goto keep_locked; | 1093 | goto activate_locked; |
1084 | 1094 | ||
1085 | /* Case 3 above */ | 1095 | /* Case 3 above */ |
1086 | } else { | 1096 | } else { |
@@ -1152,13 +1162,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1152 | 1162 | ||
1153 | if (PageDirty(page)) { | 1163 | if (PageDirty(page)) { |
1154 | /* | 1164 | /* |
1155 | * Only kswapd can writeback filesystem pages to | 1165 | * Only kswapd can writeback filesystem pages |
1156 | * avoid risk of stack overflow but only writeback | 1166 | * to avoid risk of stack overflow. But avoid |
1157 | * if many dirty pages have been encountered. | 1167 | * injecting inefficient single-page IO into |
1168 | * flusher writeback as much as possible: only | ||
1169 | * write pages when we've encountered many | ||
1170 | * dirty pages, and when we've already scanned | ||
1171 | * the rest of the LRU for clean pages and see | ||
1172 | * the same dirty pages again (PageReclaim). | ||
1158 | */ | 1173 | */ |
1159 | if (page_is_file_cache(page) && | 1174 | if (page_is_file_cache(page) && |
1160 | (!current_is_kswapd() || | 1175 | (!current_is_kswapd() || !PageReclaim(page) || |
1161 | !test_bit(PGDAT_DIRTY, &pgdat->flags))) { | 1176 | !test_bit(PGDAT_DIRTY, &pgdat->flags))) { |
1162 | /* | 1177 | /* |
1163 | * Immediately reclaim when written back. | 1178 | * Immediately reclaim when written back. |
1164 | * Similar in principal to deactivate_page() | 1179 | * Similar in principal to deactivate_page() |
@@ -1168,7 +1183,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1168 | inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); | 1183 | inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); |
1169 | SetPageReclaim(page); | 1184 | SetPageReclaim(page); |
1170 | 1185 | ||
1171 | goto keep_locked; | 1186 | goto activate_locked; |
1172 | } | 1187 | } |
1173 | 1188 | ||
1174 | if (references == PAGEREF_RECLAIM_CLEAN) | 1189 | if (references == PAGEREF_RECLAIM_CLEAN) |
@@ -1373,13 +1388,10 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) | |||
1373 | * wants to isolate pages it will be able to operate on without | 1388 | * wants to isolate pages it will be able to operate on without |
1374 | * blocking - clean pages for the most part. | 1389 | * blocking - clean pages for the most part. |
1375 | * | 1390 | * |
1376 | * ISOLATE_CLEAN means that only clean pages should be isolated. This | ||
1377 | * is used by reclaim when it is cannot write to backing storage | ||
1378 | * | ||
1379 | * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages | 1391 | * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages |
1380 | * that it is possible to migrate without blocking | 1392 | * that it is possible to migrate without blocking |
1381 | */ | 1393 | */ |
1382 | if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { | 1394 | if (mode & ISOLATE_ASYNC_MIGRATE) { |
1383 | /* All the caller can do on PageWriteback is block */ | 1395 | /* All the caller can do on PageWriteback is block */ |
1384 | if (PageWriteback(page)) | 1396 | if (PageWriteback(page)) |
1385 | return ret; | 1397 | return ret; |
@@ -1387,10 +1399,6 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) | |||
1387 | if (PageDirty(page)) { | 1399 | if (PageDirty(page)) { |
1388 | struct address_space *mapping; | 1400 | struct address_space *mapping; |
1389 | 1401 | ||
1390 | /* ISOLATE_CLEAN means only clean pages */ | ||
1391 | if (mode & ISOLATE_CLEAN) | ||
1392 | return ret; | ||
1393 | |||
1394 | /* | 1402 | /* |
1395 | * Only pages without mappings or that have a | 1403 | * Only pages without mappings or that have a |
1396 | * ->migratepage callback are possible to migrate | 1404 | * ->migratepage callback are possible to migrate |
@@ -1731,8 +1739,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1731 | 1739 | ||
1732 | if (!sc->may_unmap) | 1740 | if (!sc->may_unmap) |
1733 | isolate_mode |= ISOLATE_UNMAPPED; | 1741 | isolate_mode |= ISOLATE_UNMAPPED; |
1734 | if (!sc->may_writepage) | ||
1735 | isolate_mode |= ISOLATE_CLEAN; | ||
1736 | 1742 | ||
1737 | spin_lock_irq(&pgdat->lru_lock); | 1743 | spin_lock_irq(&pgdat->lru_lock); |
1738 | 1744 | ||
@@ -1806,12 +1812,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1806 | 1812 | ||
1807 | /* | 1813 | /* |
1808 | * If dirty pages are scanned that are not queued for IO, it | 1814 | * If dirty pages are scanned that are not queued for IO, it |
1809 | * implies that flushers are not keeping up. In this case, flag | 1815 | * implies that flushers are not doing their job. This can |
1810 | * the pgdat PGDAT_DIRTY and kswapd will start writing pages from | 1816 | * happen when memory pressure pushes dirty pages to the end of |
1811 | * reclaim context. | 1817 | * the LRU before the dirty limits are breached and the dirty |
1818 | * data has expired. It can also happen when the proportion of | ||
1819 | * dirty pages grows not through writes but through memory | ||
1820 | * pressure reclaiming all the clean cache. And in some cases, | ||
1821 | * the flushers simply cannot keep up with the allocation | ||
1822 | * rate. Nudge the flusher threads in case they are asleep, but | ||
1823 | * also allow kswapd to start writing pages during reclaim. | ||
1812 | */ | 1824 | */ |
1813 | if (stat.nr_unqueued_dirty == nr_taken) | 1825 | if (stat.nr_unqueued_dirty == nr_taken) { |
1826 | wakeup_flusher_threads(0, WB_REASON_VMSCAN); | ||
1814 | set_bit(PGDAT_DIRTY, &pgdat->flags); | 1827 | set_bit(PGDAT_DIRTY, &pgdat->flags); |
1828 | } | ||
1815 | 1829 | ||
1816 | /* | 1830 | /* |
1817 | * If kswapd scans pages marked marked for immediate | 1831 | * If kswapd scans pages marked marked for immediate |
@@ -1929,8 +1943,6 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
1929 | 1943 | ||
1930 | if (!sc->may_unmap) | 1944 | if (!sc->may_unmap) |
1931 | isolate_mode |= ISOLATE_UNMAPPED; | 1945 | isolate_mode |= ISOLATE_UNMAPPED; |
1932 | if (!sc->may_writepage) | ||
1933 | isolate_mode |= ISOLATE_CLEAN; | ||
1934 | 1946 | ||
1935 | spin_lock_irq(&pgdat->lru_lock); | 1947 | spin_lock_irq(&pgdat->lru_lock); |
1936 | 1948 | ||
@@ -2759,8 +2771,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2759 | struct scan_control *sc) | 2771 | struct scan_control *sc) |
2760 | { | 2772 | { |
2761 | int initial_priority = sc->priority; | 2773 | int initial_priority = sc->priority; |
2762 | unsigned long total_scanned = 0; | ||
2763 | unsigned long writeback_threshold; | ||
2764 | retry: | 2774 | retry: |
2765 | delayacct_freepages_start(); | 2775 | delayacct_freepages_start(); |
2766 | 2776 | ||
@@ -2773,7 +2783,6 @@ retry: | |||
2773 | sc->nr_scanned = 0; | 2783 | sc->nr_scanned = 0; |
2774 | shrink_zones(zonelist, sc); | 2784 | shrink_zones(zonelist, sc); |
2775 | 2785 | ||
2776 | total_scanned += sc->nr_scanned; | ||
2777 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) | 2786 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) |
2778 | break; | 2787 | break; |
2779 | 2788 | ||
@@ -2786,20 +2795,6 @@ retry: | |||
2786 | */ | 2795 | */ |
2787 | if (sc->priority < DEF_PRIORITY - 2) | 2796 | if (sc->priority < DEF_PRIORITY - 2) |
2788 | sc->may_writepage = 1; | 2797 | sc->may_writepage = 1; |
2789 | |||
2790 | /* | ||
2791 | * Try to write back as many pages as we just scanned. This | ||
2792 | * tends to cause slow streaming writers to write data to the | ||
2793 | * disk smoothly, at the dirtying rate, which is nice. But | ||
2794 | * that's undesirable in laptop mode, where we *want* lumpy | ||
2795 | * writeout. So in laptop mode, write out the whole world. | ||
2796 | */ | ||
2797 | writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; | ||
2798 | if (total_scanned > writeback_threshold) { | ||
2799 | wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, | ||
2800 | WB_REASON_TRY_TO_FREE_PAGES); | ||
2801 | sc->may_writepage = 1; | ||
2802 | } | ||
2803 | } while (--sc->priority >= 0); | 2798 | } while (--sc->priority >= 0); |
2804 | 2799 | ||
2805 | delayacct_freepages_end(); | 2800 | delayacct_freepages_end(); |
@@ -3101,6 +3096,7 @@ static bool zone_balanced(struct zone *zone, int order, int classzone_idx) | |||
3101 | */ | 3096 | */ |
3102 | clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags); | 3097 | clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags); |
3103 | clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags); | 3098 | clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags); |
3099 | clear_bit(PGDAT_WRITEBACK, &zone->zone_pgdat->flags); | ||
3104 | 3100 | ||
3105 | return true; | 3101 | return true; |
3106 | } | 3102 | } |
diff --git a/mm/workingset.c b/mm/workingset.c index a67f5796b995..79ed5364375d 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/memcontrol.h> | 7 | #include <linux/memcontrol.h> |
8 | #include <linux/writeback.h> | 8 | #include <linux/writeback.h> |
9 | #include <linux/shmem_fs.h> | ||
9 | #include <linux/pagemap.h> | 10 | #include <linux/pagemap.h> |
10 | #include <linux/atomic.h> | 11 | #include <linux/atomic.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 207e5ddc87a2..8970a2fd3b1a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -34,29 +34,62 @@ | |||
34 | /***************** | 34 | /***************** |
35 | * Structures | 35 | * Structures |
36 | *****************/ | 36 | *****************/ |
37 | struct z3fold_pool; | ||
38 | struct z3fold_ops { | ||
39 | int (*evict)(struct z3fold_pool *pool, unsigned long handle); | ||
40 | }; | ||
41 | |||
42 | enum buddy { | ||
43 | HEADLESS = 0, | ||
44 | FIRST, | ||
45 | MIDDLE, | ||
46 | LAST, | ||
47 | BUDDIES_MAX | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * struct z3fold_header - z3fold page metadata occupying the first chunk of each | ||
52 | * z3fold page, except for HEADLESS pages | ||
53 | * @buddy: links the z3fold page into the relevant list in the pool | ||
54 | * @page_lock: per-page lock | ||
55 | * @refcount: reference cound for the z3fold page | ||
56 | * @first_chunks: the size of the first buddy in chunks, 0 if free | ||
57 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | ||
58 | * @last_chunks: the size of the last buddy in chunks, 0 if free | ||
59 | * @first_num: the starting number (for the first handle) | ||
60 | */ | ||
61 | struct z3fold_header { | ||
62 | struct list_head buddy; | ||
63 | spinlock_t page_lock; | ||
64 | struct kref refcount; | ||
65 | unsigned short first_chunks; | ||
66 | unsigned short middle_chunks; | ||
67 | unsigned short last_chunks; | ||
68 | unsigned short start_middle; | ||
69 | unsigned short first_num:2; | ||
70 | }; | ||
71 | |||
37 | /* | 72 | /* |
38 | * NCHUNKS_ORDER determines the internal allocation granularity, effectively | 73 | * NCHUNKS_ORDER determines the internal allocation granularity, effectively |
39 | * adjusting internal fragmentation. It also determines the number of | 74 | * adjusting internal fragmentation. It also determines the number of |
40 | * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the | 75 | * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the |
41 | * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk | 76 | * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks |
42 | * in allocated page is occupied by z3fold header, NCHUNKS will be calculated | 77 | * in the beginning of an allocated page are occupied by z3fold header, so |
43 | * to 63 which shows the max number of free chunks in z3fold page, also there | 78 | * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), |
44 | * will be 63 freelists per pool. | 79 | * which shows the max number of free chunks in z3fold page, also there will |
80 | * be 63, or 62, respectively, freelists per pool. | ||
45 | */ | 81 | */ |
46 | #define NCHUNKS_ORDER 6 | 82 | #define NCHUNKS_ORDER 6 |
47 | 83 | ||
48 | #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) | 84 | #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) |
49 | #define CHUNK_SIZE (1 << CHUNK_SHIFT) | 85 | #define CHUNK_SIZE (1 << CHUNK_SHIFT) |
50 | #define ZHDR_SIZE_ALIGNED CHUNK_SIZE | 86 | #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) |
87 | #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) | ||
88 | #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) | ||
51 | #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) | 89 | #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) |
52 | 90 | ||
53 | #define BUDDY_MASK (0x3) | 91 | #define BUDDY_MASK (0x3) |
54 | 92 | ||
55 | struct z3fold_pool; | ||
56 | struct z3fold_ops { | ||
57 | int (*evict)(struct z3fold_pool *pool, unsigned long handle); | ||
58 | }; | ||
59 | |||
60 | /** | 93 | /** |
61 | * struct z3fold_pool - stores metadata for each z3fold pool | 94 | * struct z3fold_pool - stores metadata for each z3fold pool |
62 | * @lock: protects all pool fields and first|last_chunk fields of any | 95 | * @lock: protects all pool fields and first|last_chunk fields of any |
@@ -64,8 +97,6 @@ struct z3fold_ops { | |||
64 | * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies; | 97 | * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies; |
65 | * the lists each z3fold page is added to depends on the size of | 98 | * the lists each z3fold page is added to depends on the size of |
66 | * its free region. | 99 | * its free region. |
67 | * @buddied: list tracking the z3fold pages that contain 3 buddies; | ||
68 | * these z3fold pages are full | ||
69 | * @lru: list tracking the z3fold pages in LRU order by most recently | 100 | * @lru: list tracking the z3fold pages in LRU order by most recently |
70 | * added buddy. | 101 | * added buddy. |
71 | * @pages_nr: number of z3fold pages in the pool. | 102 | * @pages_nr: number of z3fold pages in the pool. |
@@ -78,49 +109,22 @@ struct z3fold_ops { | |||
78 | struct z3fold_pool { | 109 | struct z3fold_pool { |
79 | spinlock_t lock; | 110 | spinlock_t lock; |
80 | struct list_head unbuddied[NCHUNKS]; | 111 | struct list_head unbuddied[NCHUNKS]; |
81 | struct list_head buddied; | ||
82 | struct list_head lru; | 112 | struct list_head lru; |
83 | u64 pages_nr; | 113 | atomic64_t pages_nr; |
84 | const struct z3fold_ops *ops; | 114 | const struct z3fold_ops *ops; |
85 | struct zpool *zpool; | 115 | struct zpool *zpool; |
86 | const struct zpool_ops *zpool_ops; | 116 | const struct zpool_ops *zpool_ops; |
87 | }; | 117 | }; |
88 | 118 | ||
89 | enum buddy { | ||
90 | HEADLESS = 0, | ||
91 | FIRST, | ||
92 | MIDDLE, | ||
93 | LAST, | ||
94 | BUDDIES_MAX | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * struct z3fold_header - z3fold page metadata occupying the first chunk of each | ||
99 | * z3fold page, except for HEADLESS pages | ||
100 | * @buddy: links the z3fold page into the relevant list in the pool | ||
101 | * @first_chunks: the size of the first buddy in chunks, 0 if free | ||
102 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | ||
103 | * @last_chunks: the size of the last buddy in chunks, 0 if free | ||
104 | * @first_num: the starting number (for the first handle) | ||
105 | */ | ||
106 | struct z3fold_header { | ||
107 | struct list_head buddy; | ||
108 | unsigned short first_chunks; | ||
109 | unsigned short middle_chunks; | ||
110 | unsigned short last_chunks; | ||
111 | unsigned short start_middle; | ||
112 | unsigned short first_num:2; | ||
113 | }; | ||
114 | |||
115 | /* | 119 | /* |
116 | * Internal z3fold page flags | 120 | * Internal z3fold page flags |
117 | */ | 121 | */ |
118 | enum z3fold_page_flags { | 122 | enum z3fold_page_flags { |
119 | UNDER_RECLAIM = 0, | 123 | PAGE_HEADLESS = 0, |
120 | PAGE_HEADLESS, | ||
121 | MIDDLE_CHUNK_MAPPED, | 124 | MIDDLE_CHUNK_MAPPED, |
122 | }; | 125 | }; |
123 | 126 | ||
127 | |||
124 | /***************** | 128 | /***************** |
125 | * Helpers | 129 | * Helpers |
126 | *****************/ | 130 | *****************/ |
@@ -140,10 +144,11 @@ static struct z3fold_header *init_z3fold_page(struct page *page) | |||
140 | struct z3fold_header *zhdr = page_address(page); | 144 | struct z3fold_header *zhdr = page_address(page); |
141 | 145 | ||
142 | INIT_LIST_HEAD(&page->lru); | 146 | INIT_LIST_HEAD(&page->lru); |
143 | clear_bit(UNDER_RECLAIM, &page->private); | ||
144 | clear_bit(PAGE_HEADLESS, &page->private); | 147 | clear_bit(PAGE_HEADLESS, &page->private); |
145 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 148 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
146 | 149 | ||
150 | spin_lock_init(&zhdr->page_lock); | ||
151 | kref_init(&zhdr->refcount); | ||
147 | zhdr->first_chunks = 0; | 152 | zhdr->first_chunks = 0; |
148 | zhdr->middle_chunks = 0; | 153 | zhdr->middle_chunks = 0; |
149 | zhdr->last_chunks = 0; | 154 | zhdr->last_chunks = 0; |
@@ -154,9 +159,36 @@ static struct z3fold_header *init_z3fold_page(struct page *page) | |||
154 | } | 159 | } |
155 | 160 | ||
156 | /* Resets the struct page fields and frees the page */ | 161 | /* Resets the struct page fields and frees the page */ |
157 | static void free_z3fold_page(struct z3fold_header *zhdr) | 162 | static void free_z3fold_page(struct page *page) |
163 | { | ||
164 | __free_page(page); | ||
165 | } | ||
166 | |||
167 | static void release_z3fold_page(struct kref *ref) | ||
168 | { | ||
169 | struct z3fold_header *zhdr; | ||
170 | struct page *page; | ||
171 | |||
172 | zhdr = container_of(ref, struct z3fold_header, refcount); | ||
173 | page = virt_to_page(zhdr); | ||
174 | |||
175 | if (!list_empty(&zhdr->buddy)) | ||
176 | list_del(&zhdr->buddy); | ||
177 | if (!list_empty(&page->lru)) | ||
178 | list_del(&page->lru); | ||
179 | free_z3fold_page(page); | ||
180 | } | ||
181 | |||
182 | /* Lock a z3fold page */ | ||
183 | static inline void z3fold_page_lock(struct z3fold_header *zhdr) | ||
184 | { | ||
185 | spin_lock(&zhdr->page_lock); | ||
186 | } | ||
187 | |||
188 | /* Unlock a z3fold page */ | ||
189 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) | ||
158 | { | 190 | { |
159 | __free_page(virt_to_page(zhdr)); | 191 | spin_unlock(&zhdr->page_lock); |
160 | } | 192 | } |
161 | 193 | ||
162 | /* | 194 | /* |
@@ -204,9 +236,10 @@ static int num_free_chunks(struct z3fold_header *zhdr) | |||
204 | */ | 236 | */ |
205 | if (zhdr->middle_chunks != 0) { | 237 | if (zhdr->middle_chunks != 0) { |
206 | int nfree_before = zhdr->first_chunks ? | 238 | int nfree_before = zhdr->first_chunks ? |
207 | 0 : zhdr->start_middle - 1; | 239 | 0 : zhdr->start_middle - ZHDR_CHUNKS; |
208 | int nfree_after = zhdr->last_chunks ? | 240 | int nfree_after = zhdr->last_chunks ? |
209 | 0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks; | 241 | 0 : TOTAL_CHUNKS - |
242 | (zhdr->start_middle + zhdr->middle_chunks); | ||
210 | nfree = max(nfree_before, nfree_after); | 243 | nfree = max(nfree_before, nfree_after); |
211 | } else | 244 | } else |
212 | nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; | 245 | nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; |
@@ -236,9 +269,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp, | |||
236 | spin_lock_init(&pool->lock); | 269 | spin_lock_init(&pool->lock); |
237 | for_each_unbuddied_list(i, 0) | 270 | for_each_unbuddied_list(i, 0) |
238 | INIT_LIST_HEAD(&pool->unbuddied[i]); | 271 | INIT_LIST_HEAD(&pool->unbuddied[i]); |
239 | INIT_LIST_HEAD(&pool->buddied); | ||
240 | INIT_LIST_HEAD(&pool->lru); | 272 | INIT_LIST_HEAD(&pool->lru); |
241 | pool->pages_nr = 0; | 273 | atomic64_set(&pool->pages_nr, 0); |
242 | pool->ops = ops; | 274 | pool->ops = ops; |
243 | return pool; | 275 | return pool; |
244 | } | 276 | } |
@@ -254,25 +286,58 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool) | |||
254 | kfree(pool); | 286 | kfree(pool); |
255 | } | 287 | } |
256 | 288 | ||
289 | static inline void *mchunk_memmove(struct z3fold_header *zhdr, | ||
290 | unsigned short dst_chunk) | ||
291 | { | ||
292 | void *beg = zhdr; | ||
293 | return memmove(beg + (dst_chunk << CHUNK_SHIFT), | ||
294 | beg + (zhdr->start_middle << CHUNK_SHIFT), | ||
295 | zhdr->middle_chunks << CHUNK_SHIFT); | ||
296 | } | ||
297 | |||
298 | #define BIG_CHUNK_GAP 3 | ||
257 | /* Has to be called with lock held */ | 299 | /* Has to be called with lock held */ |
258 | static int z3fold_compact_page(struct z3fold_header *zhdr) | 300 | static int z3fold_compact_page(struct z3fold_header *zhdr) |
259 | { | 301 | { |
260 | struct page *page = virt_to_page(zhdr); | 302 | struct page *page = virt_to_page(zhdr); |
261 | void *beg = zhdr; | ||
262 | 303 | ||
304 | if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) | ||
305 | return 0; /* can't move middle chunk, it's used */ | ||
306 | |||
307 | if (zhdr->middle_chunks == 0) | ||
308 | return 0; /* nothing to compact */ | ||
263 | 309 | ||
264 | if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) && | 310 | if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { |
265 | zhdr->middle_chunks != 0 && | 311 | /* move to the beginning */ |
266 | zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { | 312 | mchunk_memmove(zhdr, ZHDR_CHUNKS); |
267 | memmove(beg + ZHDR_SIZE_ALIGNED, | ||
268 | beg + (zhdr->start_middle << CHUNK_SHIFT), | ||
269 | zhdr->middle_chunks << CHUNK_SHIFT); | ||
270 | zhdr->first_chunks = zhdr->middle_chunks; | 313 | zhdr->first_chunks = zhdr->middle_chunks; |
271 | zhdr->middle_chunks = 0; | 314 | zhdr->middle_chunks = 0; |
272 | zhdr->start_middle = 0; | 315 | zhdr->start_middle = 0; |
273 | zhdr->first_num++; | 316 | zhdr->first_num++; |
274 | return 1; | 317 | return 1; |
275 | } | 318 | } |
319 | |||
320 | /* | ||
321 | * moving data is expensive, so let's only do that if | ||
322 | * there's substantial gain (at least BIG_CHUNK_GAP chunks) | ||
323 | */ | ||
324 | if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && | ||
325 | zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= | ||
326 | BIG_CHUNK_GAP) { | ||
327 | mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); | ||
328 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; | ||
329 | return 1; | ||
330 | } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && | ||
331 | TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle | ||
332 | + zhdr->middle_chunks) >= | ||
333 | BIG_CHUNK_GAP) { | ||
334 | unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - | ||
335 | zhdr->middle_chunks; | ||
336 | mchunk_memmove(zhdr, new_start); | ||
337 | zhdr->start_middle = new_start; | ||
338 | return 1; | ||
339 | } | ||
340 | |||
276 | return 0; | 341 | return 0; |
277 | } | 342 | } |
278 | 343 | ||
@@ -313,50 +378,63 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
313 | bud = HEADLESS; | 378 | bud = HEADLESS; |
314 | else { | 379 | else { |
315 | chunks = size_to_chunks(size); | 380 | chunks = size_to_chunks(size); |
316 | spin_lock(&pool->lock); | ||
317 | 381 | ||
318 | /* First, try to find an unbuddied z3fold page. */ | 382 | /* First, try to find an unbuddied z3fold page. */ |
319 | zhdr = NULL; | 383 | zhdr = NULL; |
320 | for_each_unbuddied_list(i, chunks) { | 384 | for_each_unbuddied_list(i, chunks) { |
321 | if (!list_empty(&pool->unbuddied[i])) { | 385 | spin_lock(&pool->lock); |
322 | zhdr = list_first_entry(&pool->unbuddied[i], | 386 | zhdr = list_first_entry_or_null(&pool->unbuddied[i], |
323 | struct z3fold_header, buddy); | 387 | struct z3fold_header, buddy); |
324 | page = virt_to_page(zhdr); | 388 | if (!zhdr) { |
325 | if (zhdr->first_chunks == 0) { | 389 | spin_unlock(&pool->lock); |
326 | if (zhdr->middle_chunks != 0 && | 390 | continue; |
327 | chunks >= zhdr->start_middle) | 391 | } |
328 | bud = LAST; | 392 | kref_get(&zhdr->refcount); |
329 | else | 393 | list_del_init(&zhdr->buddy); |
330 | bud = FIRST; | 394 | spin_unlock(&pool->lock); |
331 | } else if (zhdr->last_chunks == 0) | 395 | |
396 | page = virt_to_page(zhdr); | ||
397 | z3fold_page_lock(zhdr); | ||
398 | if (zhdr->first_chunks == 0) { | ||
399 | if (zhdr->middle_chunks != 0 && | ||
400 | chunks >= zhdr->start_middle) | ||
332 | bud = LAST; | 401 | bud = LAST; |
333 | else if (zhdr->middle_chunks == 0) | 402 | else |
334 | bud = MIDDLE; | 403 | bud = FIRST; |
335 | else { | 404 | } else if (zhdr->last_chunks == 0) |
336 | pr_err("No free chunks in unbuddied\n"); | 405 | bud = LAST; |
337 | WARN_ON(1); | 406 | else if (zhdr->middle_chunks == 0) |
338 | continue; | 407 | bud = MIDDLE; |
339 | } | 408 | else { |
340 | list_del(&zhdr->buddy); | 409 | z3fold_page_unlock(zhdr); |
341 | goto found; | 410 | spin_lock(&pool->lock); |
411 | if (kref_put(&zhdr->refcount, | ||
412 | release_z3fold_page)) | ||
413 | atomic64_dec(&pool->pages_nr); | ||
414 | spin_unlock(&pool->lock); | ||
415 | pr_err("No free chunks in unbuddied\n"); | ||
416 | WARN_ON(1); | ||
417 | continue; | ||
342 | } | 418 | } |
419 | goto found; | ||
343 | } | 420 | } |
344 | bud = FIRST; | 421 | bud = FIRST; |
345 | spin_unlock(&pool->lock); | ||
346 | } | 422 | } |
347 | 423 | ||
348 | /* Couldn't find unbuddied z3fold page, create new one */ | 424 | /* Couldn't find unbuddied z3fold page, create new one */ |
349 | page = alloc_page(gfp); | 425 | page = alloc_page(gfp); |
350 | if (!page) | 426 | if (!page) |
351 | return -ENOMEM; | 427 | return -ENOMEM; |
352 | spin_lock(&pool->lock); | 428 | |
353 | pool->pages_nr++; | 429 | atomic64_inc(&pool->pages_nr); |
354 | zhdr = init_z3fold_page(page); | 430 | zhdr = init_z3fold_page(page); |
355 | 431 | ||
356 | if (bud == HEADLESS) { | 432 | if (bud == HEADLESS) { |
357 | set_bit(PAGE_HEADLESS, &page->private); | 433 | set_bit(PAGE_HEADLESS, &page->private); |
434 | spin_lock(&pool->lock); | ||
358 | goto headless; | 435 | goto headless; |
359 | } | 436 | } |
437 | z3fold_page_lock(zhdr); | ||
360 | 438 | ||
361 | found: | 439 | found: |
362 | if (bud == FIRST) | 440 | if (bud == FIRST) |
@@ -365,17 +443,15 @@ found: | |||
365 | zhdr->last_chunks = chunks; | 443 | zhdr->last_chunks = chunks; |
366 | else { | 444 | else { |
367 | zhdr->middle_chunks = chunks; | 445 | zhdr->middle_chunks = chunks; |
368 | zhdr->start_middle = zhdr->first_chunks + 1; | 446 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
369 | } | 447 | } |
370 | 448 | ||
449 | spin_lock(&pool->lock); | ||
371 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || | 450 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || |
372 | zhdr->middle_chunks == 0) { | 451 | zhdr->middle_chunks == 0) { |
373 | /* Add to unbuddied list */ | 452 | /* Add to unbuddied list */ |
374 | freechunks = num_free_chunks(zhdr); | 453 | freechunks = num_free_chunks(zhdr); |
375 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 454 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); |
376 | } else { | ||
377 | /* Add to buddied list */ | ||
378 | list_add(&zhdr->buddy, &pool->buddied); | ||
379 | } | 455 | } |
380 | 456 | ||
381 | headless: | 457 | headless: |
@@ -387,6 +463,8 @@ headless: | |||
387 | 463 | ||
388 | *handle = encode_handle(zhdr, bud); | 464 | *handle = encode_handle(zhdr, bud); |
389 | spin_unlock(&pool->lock); | 465 | spin_unlock(&pool->lock); |
466 | if (bud != HEADLESS) | ||
467 | z3fold_page_unlock(zhdr); | ||
390 | 468 | ||
391 | return 0; | 469 | return 0; |
392 | } | 470 | } |
@@ -408,7 +486,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
408 | struct page *page; | 486 | struct page *page; |
409 | enum buddy bud; | 487 | enum buddy bud; |
410 | 488 | ||
411 | spin_lock(&pool->lock); | ||
412 | zhdr = handle_to_z3fold_header(handle); | 489 | zhdr = handle_to_z3fold_header(handle); |
413 | page = virt_to_page(zhdr); | 490 | page = virt_to_page(zhdr); |
414 | 491 | ||
@@ -416,6 +493,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
416 | /* HEADLESS page stored */ | 493 | /* HEADLESS page stored */ |
417 | bud = HEADLESS; | 494 | bud = HEADLESS; |
418 | } else { | 495 | } else { |
496 | z3fold_page_lock(zhdr); | ||
419 | bud = handle_to_buddy(handle); | 497 | bud = handle_to_buddy(handle); |
420 | 498 | ||
421 | switch (bud) { | 499 | switch (bud) { |
@@ -432,38 +510,36 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
432 | default: | 510 | default: |
433 | pr_err("%s: unknown bud %d\n", __func__, bud); | 511 | pr_err("%s: unknown bud %d\n", __func__, bud); |
434 | WARN_ON(1); | 512 | WARN_ON(1); |
435 | spin_unlock(&pool->lock); | 513 | z3fold_page_unlock(zhdr); |
436 | return; | 514 | return; |
437 | } | 515 | } |
438 | } | 516 | } |
439 | 517 | ||
440 | if (test_bit(UNDER_RECLAIM, &page->private)) { | 518 | if (bud == HEADLESS) { |
441 | /* z3fold page is under reclaim, reclaim will free */ | 519 | spin_lock(&pool->lock); |
442 | spin_unlock(&pool->lock); | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | if (bud != HEADLESS) { | ||
447 | /* Remove from existing buddy list */ | ||
448 | list_del(&zhdr->buddy); | ||
449 | } | ||
450 | |||
451 | if (bud == HEADLESS || | ||
452 | (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 && | ||
453 | zhdr->last_chunks == 0)) { | ||
454 | /* z3fold page is empty, free */ | ||
455 | list_del(&page->lru); | 520 | list_del(&page->lru); |
456 | clear_bit(PAGE_HEADLESS, &page->private); | 521 | spin_unlock(&pool->lock); |
457 | free_z3fold_page(zhdr); | 522 | free_z3fold_page(page); |
458 | pool->pages_nr--; | 523 | atomic64_dec(&pool->pages_nr); |
459 | } else { | 524 | } else { |
460 | z3fold_compact_page(zhdr); | 525 | if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 || |
461 | /* Add to the unbuddied list */ | 526 | zhdr->last_chunks != 0) { |
462 | freechunks = num_free_chunks(zhdr); | 527 | z3fold_compact_page(zhdr); |
463 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 528 | /* Add to the unbuddied list */ |
529 | spin_lock(&pool->lock); | ||
530 | if (!list_empty(&zhdr->buddy)) | ||
531 | list_del(&zhdr->buddy); | ||
532 | freechunks = num_free_chunks(zhdr); | ||
533 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | ||
534 | spin_unlock(&pool->lock); | ||
535 | } | ||
536 | z3fold_page_unlock(zhdr); | ||
537 | spin_lock(&pool->lock); | ||
538 | if (kref_put(&zhdr->refcount, release_z3fold_page)) | ||
539 | atomic64_dec(&pool->pages_nr); | ||
540 | spin_unlock(&pool->lock); | ||
464 | } | 541 | } |
465 | 542 | ||
466 | spin_unlock(&pool->lock); | ||
467 | } | 543 | } |
468 | 544 | ||
469 | /** | 545 | /** |
@@ -510,20 +586,25 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
510 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; | 586 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; |
511 | 587 | ||
512 | spin_lock(&pool->lock); | 588 | spin_lock(&pool->lock); |
513 | if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || | 589 | if (!pool->ops || !pool->ops->evict || retries == 0) { |
514 | retries == 0) { | ||
515 | spin_unlock(&pool->lock); | 590 | spin_unlock(&pool->lock); |
516 | return -EINVAL; | 591 | return -EINVAL; |
517 | } | 592 | } |
518 | for (i = 0; i < retries; i++) { | 593 | for (i = 0; i < retries; i++) { |
594 | if (list_empty(&pool->lru)) { | ||
595 | spin_unlock(&pool->lock); | ||
596 | return -EINVAL; | ||
597 | } | ||
519 | page = list_last_entry(&pool->lru, struct page, lru); | 598 | page = list_last_entry(&pool->lru, struct page, lru); |
520 | list_del(&page->lru); | 599 | list_del_init(&page->lru); |
521 | 600 | ||
522 | /* Protect z3fold page against free */ | ||
523 | set_bit(UNDER_RECLAIM, &page->private); | ||
524 | zhdr = page_address(page); | 601 | zhdr = page_address(page); |
525 | if (!test_bit(PAGE_HEADLESS, &page->private)) { | 602 | if (!test_bit(PAGE_HEADLESS, &page->private)) { |
526 | list_del(&zhdr->buddy); | 603 | if (!list_empty(&zhdr->buddy)) |
604 | list_del_init(&zhdr->buddy); | ||
605 | kref_get(&zhdr->refcount); | ||
606 | spin_unlock(&pool->lock); | ||
607 | z3fold_page_lock(zhdr); | ||
527 | /* | 608 | /* |
528 | * We need encode the handles before unlocking, since | 609 | * We need encode the handles before unlocking, since |
529 | * we can race with free that will set | 610 | * we can race with free that will set |
@@ -538,13 +619,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
538 | middle_handle = encode_handle(zhdr, MIDDLE); | 619 | middle_handle = encode_handle(zhdr, MIDDLE); |
539 | if (zhdr->last_chunks) | 620 | if (zhdr->last_chunks) |
540 | last_handle = encode_handle(zhdr, LAST); | 621 | last_handle = encode_handle(zhdr, LAST); |
622 | z3fold_page_unlock(zhdr); | ||
541 | } else { | 623 | } else { |
542 | first_handle = encode_handle(zhdr, HEADLESS); | 624 | first_handle = encode_handle(zhdr, HEADLESS); |
543 | last_handle = middle_handle = 0; | 625 | last_handle = middle_handle = 0; |
626 | spin_unlock(&pool->lock); | ||
544 | } | 627 | } |
545 | 628 | ||
546 | spin_unlock(&pool->lock); | ||
547 | |||
548 | /* Issue the eviction callback(s) */ | 629 | /* Issue the eviction callback(s) */ |
549 | if (middle_handle) { | 630 | if (middle_handle) { |
550 | ret = pool->ops->evict(pool, middle_handle); | 631 | ret = pool->ops->evict(pool, middle_handle); |
@@ -562,36 +643,40 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
562 | goto next; | 643 | goto next; |
563 | } | 644 | } |
564 | next: | 645 | next: |
565 | spin_lock(&pool->lock); | 646 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
566 | clear_bit(UNDER_RECLAIM, &page->private); | 647 | if (ret == 0) { |
567 | if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) || | 648 | free_z3fold_page(page); |
568 | (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 && | 649 | return 0; |
569 | zhdr->middle_chunks == 0)) { | ||
570 | /* | ||
571 | * All buddies are now free, free the z3fold page and | ||
572 | * return success. | ||
573 | */ | ||
574 | clear_bit(PAGE_HEADLESS, &page->private); | ||
575 | free_z3fold_page(zhdr); | ||
576 | pool->pages_nr--; | ||
577 | spin_unlock(&pool->lock); | ||
578 | return 0; | ||
579 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { | ||
580 | if (zhdr->first_chunks != 0 && | ||
581 | zhdr->last_chunks != 0 && | ||
582 | zhdr->middle_chunks != 0) { | ||
583 | /* Full, add to buddied list */ | ||
584 | list_add(&zhdr->buddy, &pool->buddied); | ||
585 | } else { | 650 | } else { |
651 | spin_lock(&pool->lock); | ||
652 | } | ||
653 | } else { | ||
654 | z3fold_page_lock(zhdr); | ||
655 | if ((zhdr->first_chunks || zhdr->last_chunks || | ||
656 | zhdr->middle_chunks) && | ||
657 | !(zhdr->first_chunks && zhdr->last_chunks && | ||
658 | zhdr->middle_chunks)) { | ||
586 | z3fold_compact_page(zhdr); | 659 | z3fold_compact_page(zhdr); |
587 | /* add to unbuddied list */ | 660 | /* add to unbuddied list */ |
661 | spin_lock(&pool->lock); | ||
588 | freechunks = num_free_chunks(zhdr); | 662 | freechunks = num_free_chunks(zhdr); |
589 | list_add(&zhdr->buddy, | 663 | list_add(&zhdr->buddy, |
590 | &pool->unbuddied[freechunks]); | 664 | &pool->unbuddied[freechunks]); |
665 | spin_unlock(&pool->lock); | ||
666 | } | ||
667 | z3fold_page_unlock(zhdr); | ||
668 | spin_lock(&pool->lock); | ||
669 | if (kref_put(&zhdr->refcount, release_z3fold_page)) { | ||
670 | atomic64_dec(&pool->pages_nr); | ||
671 | return 0; | ||
591 | } | 672 | } |
592 | } | 673 | } |
593 | 674 | ||
594 | /* add to beginning of LRU */ | 675 | /* |
676 | * Add to the beginning of LRU. | ||
677 | * Pool lock has to be kept here to ensure the page has | ||
678 | * not already been released | ||
679 | */ | ||
595 | list_add(&page->lru, &pool->lru); | 680 | list_add(&page->lru, &pool->lru); |
596 | } | 681 | } |
597 | spin_unlock(&pool->lock); | 682 | spin_unlock(&pool->lock); |
@@ -615,7 +700,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
615 | void *addr; | 700 | void *addr; |
616 | enum buddy buddy; | 701 | enum buddy buddy; |
617 | 702 | ||
618 | spin_lock(&pool->lock); | ||
619 | zhdr = handle_to_z3fold_header(handle); | 703 | zhdr = handle_to_z3fold_header(handle); |
620 | addr = zhdr; | 704 | addr = zhdr; |
621 | page = virt_to_page(zhdr); | 705 | page = virt_to_page(zhdr); |
@@ -623,6 +707,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
623 | if (test_bit(PAGE_HEADLESS, &page->private)) | 707 | if (test_bit(PAGE_HEADLESS, &page->private)) |
624 | goto out; | 708 | goto out; |
625 | 709 | ||
710 | z3fold_page_lock(zhdr); | ||
626 | buddy = handle_to_buddy(handle); | 711 | buddy = handle_to_buddy(handle); |
627 | switch (buddy) { | 712 | switch (buddy) { |
628 | case FIRST: | 713 | case FIRST: |
@@ -641,8 +726,9 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
641 | addr = NULL; | 726 | addr = NULL; |
642 | break; | 727 | break; |
643 | } | 728 | } |
729 | |||
730 | z3fold_page_unlock(zhdr); | ||
644 | out: | 731 | out: |
645 | spin_unlock(&pool->lock); | ||
646 | return addr; | 732 | return addr; |
647 | } | 733 | } |
648 | 734 | ||
@@ -657,31 +743,28 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) | |||
657 | struct page *page; | 743 | struct page *page; |
658 | enum buddy buddy; | 744 | enum buddy buddy; |
659 | 745 | ||
660 | spin_lock(&pool->lock); | ||
661 | zhdr = handle_to_z3fold_header(handle); | 746 | zhdr = handle_to_z3fold_header(handle); |
662 | page = virt_to_page(zhdr); | 747 | page = virt_to_page(zhdr); |
663 | 748 | ||
664 | if (test_bit(PAGE_HEADLESS, &page->private)) { | 749 | if (test_bit(PAGE_HEADLESS, &page->private)) |
665 | spin_unlock(&pool->lock); | ||
666 | return; | 750 | return; |
667 | } | ||
668 | 751 | ||
752 | z3fold_page_lock(zhdr); | ||
669 | buddy = handle_to_buddy(handle); | 753 | buddy = handle_to_buddy(handle); |
670 | if (buddy == MIDDLE) | 754 | if (buddy == MIDDLE) |
671 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 755 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
672 | spin_unlock(&pool->lock); | 756 | z3fold_page_unlock(zhdr); |
673 | } | 757 | } |
674 | 758 | ||
675 | /** | 759 | /** |
676 | * z3fold_get_pool_size() - gets the z3fold pool size in pages | 760 | * z3fold_get_pool_size() - gets the z3fold pool size in pages |
677 | * @pool: pool whose size is being queried | 761 | * @pool: pool whose size is being queried |
678 | * | 762 | * |
679 | * Returns: size in pages of the given pool. The pool lock need not be | 763 | * Returns: size in pages of the given pool. |
680 | * taken to access pages_nr. | ||
681 | */ | 764 | */ |
682 | static u64 z3fold_get_pool_size(struct z3fold_pool *pool) | 765 | static u64 z3fold_get_pool_size(struct z3fold_pool *pool) |
683 | { | 766 | { |
684 | return pool->pages_nr; | 767 | return atomic64_read(&pool->pages_nr); |
685 | } | 768 | } |
686 | 769 | ||
687 | /***************** | 770 | /***************** |
@@ -780,8 +863,8 @@ MODULE_ALIAS("zpool-z3fold"); | |||
780 | 863 | ||
781 | static int __init init_z3fold(void) | 864 | static int __init init_z3fold(void) |
782 | { | 865 | { |
783 | /* Make sure the z3fold header will fit in one chunk */ | 866 | /* Make sure the z3fold header is not larger than the page size */ |
784 | BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED); | 867 | BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); |
785 | zpool_register_driver(&z3fold_zpool_driver); | 868 | zpool_register_driver(&z3fold_zpool_driver); |
786 | 869 | ||
787 | return 0; | 870 | return 0; |
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index a1f24989ac23..b7b1fb6c8c21 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * | 24 | * |
25 | * Usage of struct page flags: | 25 | * Usage of struct page flags: |
26 | * PG_private: identifies the first component page | 26 | * PG_private: identifies the first component page |
27 | * PG_private2: identifies the last component page | ||
28 | * PG_owner_priv_1: identifies the huge component page | 27 | * PG_owner_priv_1: identifies the huge component page |
29 | * | 28 | * |
30 | */ | 29 | */ |
@@ -268,10 +267,6 @@ struct zs_pool { | |||
268 | #endif | 267 | #endif |
269 | }; | 268 | }; |
270 | 269 | ||
271 | /* | ||
272 | * A zspage's class index and fullness group | ||
273 | * are encoded in its (first)page->mapping | ||
274 | */ | ||
275 | #define FULLNESS_BITS 2 | 270 | #define FULLNESS_BITS 2 |
276 | #define CLASS_BITS 8 | 271 | #define CLASS_BITS 8 |
277 | #define ISOLATED_BITS 3 | 272 | #define ISOLATED_BITS 3 |
@@ -938,7 +933,6 @@ static void reset_page(struct page *page) | |||
938 | { | 933 | { |
939 | __ClearPageMovable(page); | 934 | __ClearPageMovable(page); |
940 | ClearPagePrivate(page); | 935 | ClearPagePrivate(page); |
941 | ClearPagePrivate2(page); | ||
942 | set_page_private(page, 0); | 936 | set_page_private(page, 0); |
943 | page_mapcount_reset(page); | 937 | page_mapcount_reset(page); |
944 | ClearPageHugeObject(page); | 938 | ClearPageHugeObject(page); |
@@ -1085,7 +1079,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage, | |||
1085 | * 2. each sub-page point to zspage using page->private | 1079 | * 2. each sub-page point to zspage using page->private |
1086 | * | 1080 | * |
1087 | * we set PG_private to identify the first page (i.e. no other sub-page | 1081 | * we set PG_private to identify the first page (i.e. no other sub-page |
1088 | * has this flag set) and PG_private_2 to identify the last page. | 1082 | * has this flag set). |
1089 | */ | 1083 | */ |
1090 | for (i = 0; i < nr_pages; i++) { | 1084 | for (i = 0; i < nr_pages; i++) { |
1091 | page = pages[i]; | 1085 | page = pages[i]; |
@@ -1100,8 +1094,6 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage, | |||
1100 | } else { | 1094 | } else { |
1101 | prev_page->freelist = page; | 1095 | prev_page->freelist = page; |
1102 | } | 1096 | } |
1103 | if (i == nr_pages - 1) | ||
1104 | SetPagePrivate2(page); | ||
1105 | prev_page = page; | 1097 | prev_page = page; |
1106 | } | 1098 | } |
1107 | } | 1099 | } |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 982c52ca6473..918259a55f65 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -424,7 +424,7 @@ our $typeTypedefs = qr{(?x: | |||
424 | our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b}; | 424 | our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b}; |
425 | 425 | ||
426 | our $logFunctions = qr{(?x: | 426 | our $logFunctions = qr{(?x: |
427 | printk(?:_ratelimited|_once|)| | 427 | printk(?:_ratelimited|_once|_deferred_once|_deferred|)| |
428 | (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)| | 428 | (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)| |
429 | WARN(?:_RATELIMIT|_ONCE|)| | 429 | WARN(?:_RATELIMIT|_ONCE|)| |
430 | panic| | 430 | panic| |
@@ -2134,7 +2134,7 @@ sub process { | |||
2134 | my $in_header_lines = $file ? 0 : 1; | 2134 | my $in_header_lines = $file ? 0 : 1; |
2135 | my $in_commit_log = 0; #Scanning lines before patch | 2135 | my $in_commit_log = 0; #Scanning lines before patch |
2136 | my $has_commit_log = 0; #Encountered lines before patch | 2136 | my $has_commit_log = 0; #Encountered lines before patch |
2137 | my $commit_log_possible_stack_dump = 0; | 2137 | my $commit_log_possible_stack_dump = 0; |
2138 | my $commit_log_long_line = 0; | 2138 | my $commit_log_long_line = 0; |
2139 | my $commit_log_has_diff = 0; | 2139 | my $commit_log_has_diff = 0; |
2140 | my $reported_maintainer_file = 0; | 2140 | my $reported_maintainer_file = 0; |
@@ -2154,6 +2154,7 @@ sub process { | |||
2154 | my $realline = 0; | 2154 | my $realline = 0; |
2155 | my $realcnt = 0; | 2155 | my $realcnt = 0; |
2156 | my $here = ''; | 2156 | my $here = ''; |
2157 | my $context_function; #undef'd unless there's a known function | ||
2157 | my $in_comment = 0; | 2158 | my $in_comment = 0; |
2158 | my $comment_edge = 0; | 2159 | my $comment_edge = 0; |
2159 | my $first_line = 0; | 2160 | my $first_line = 0; |
@@ -2192,7 +2193,8 @@ sub process { | |||
2192 | } | 2193 | } |
2193 | #next; | 2194 | #next; |
2194 | } | 2195 | } |
2195 | if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) { | 2196 | if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) { |
2197 | my $context = $4; | ||
2196 | $realline=$1-1; | 2198 | $realline=$1-1; |
2197 | if (defined $2) { | 2199 | if (defined $2) { |
2198 | $realcnt=$3+1; | 2200 | $realcnt=$3+1; |
@@ -2201,6 +2203,12 @@ sub process { | |||
2201 | } | 2203 | } |
2202 | $in_comment = 0; | 2204 | $in_comment = 0; |
2203 | 2205 | ||
2206 | if ($context =~ /\b(\w+)\s*\(/) { | ||
2207 | $context_function = $1; | ||
2208 | } else { | ||
2209 | undef $context_function; | ||
2210 | } | ||
2211 | |||
2204 | # Guestimate if this is a continuing comment. Run | 2212 | # Guestimate if this is a continuing comment. Run |
2205 | # the context looking for a comment "edge". If this | 2213 | # the context looking for a comment "edge". If this |
2206 | # edge is a close comment then we must be in a comment | 2214 | # edge is a close comment then we must be in a comment |
@@ -2695,6 +2703,7 @@ sub process { | |||
2695 | 2703 | ||
2696 | # Check for FSF mailing addresses. | 2704 | # Check for FSF mailing addresses. |
2697 | if ($rawline =~ /\bwrite to the Free/i || | 2705 | if ($rawline =~ /\bwrite to the Free/i || |
2706 | $rawline =~ /\b675\s+Mass\s+Ave/i || | ||
2698 | $rawline =~ /\b59\s+Temple\s+Pl/i || | 2707 | $rawline =~ /\b59\s+Temple\s+Pl/i || |
2699 | $rawline =~ /\b51\s+Franklin\s+St/i) { | 2708 | $rawline =~ /\b51\s+Franklin\s+St/i) { |
2700 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 2709 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
@@ -5095,6 +5104,12 @@ sub process { | |||
5095 | } | 5104 | } |
5096 | } | 5105 | } |
5097 | 5106 | ||
5107 | # check for single line unbalanced braces | ||
5108 | if ($sline =~ /^.\s*\}\s*else\s*$/ || | ||
5109 | $sline =~ /^.\s*else\s*\{\s*$/) { | ||
5110 | CHK("BRACES", "Unbalanced braces around else statement\n" . $herecurr); | ||
5111 | } | ||
5112 | |||
5098 | # check for unnecessary blank lines around braces | 5113 | # check for unnecessary blank lines around braces |
5099 | if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) { | 5114 | if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) { |
5100 | if (CHK("BRACES", | 5115 | if (CHK("BRACES", |
@@ -5157,6 +5172,16 @@ sub process { | |||
5157 | "break quoted strings at a space character\n" . $hereprev); | 5172 | "break quoted strings at a space character\n" . $hereprev); |
5158 | } | 5173 | } |
5159 | 5174 | ||
5175 | #check for an embedded function name in a string when the function is known | ||
5176 | # as part of a diff. This does not work for -f --file checking as it | ||
5177 | #depends on patch context providing the function name | ||
5178 | if ($line =~ /^\+.*$String/ && | ||
5179 | defined($context_function) && | ||
5180 | get_quoted_string($line, $rawline) =~ /\b$context_function\b/) { | ||
5181 | WARN("EMBEDDED_FUNCTION_NAME", | ||
5182 | "Prefer using \"%s\", __func__ to embedded function names\n" . $herecurr); | ||
5183 | } | ||
5184 | |||
5160 | # check for spaces before a quoted newline | 5185 | # check for spaces before a quoted newline |
5161 | if ($rawline =~ /^.*\".*\s\\n/) { | 5186 | if ($rawline =~ /^.*\".*\s\\n/) { |
5162 | if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE", | 5187 | if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE", |
@@ -5269,6 +5294,12 @@ sub process { | |||
5269 | } | 5294 | } |
5270 | } | 5295 | } |
5271 | 5296 | ||
5297 | # check for logging continuations | ||
5298 | if ($line =~ /\bprintk\s*\(\s*KERN_CONT\b|\bpr_cont\s*\(/) { | ||
5299 | WARN("LOGGING_CONTINUATION", | ||
5300 | "Avoid logging continuation uses where feasible\n" . $herecurr); | ||
5301 | } | ||
5302 | |||
5272 | # check for mask then right shift without a parentheses | 5303 | # check for mask then right shift without a parentheses |
5273 | if ($^V && $^V ge 5.10.0 && | 5304 | if ($^V && $^V ge 5.10.0 && |
5274 | $line =~ /$LvalOrFunc\s*\&\s*($LvalOrFunc)\s*>>/ && | 5305 | $line =~ /$LvalOrFunc\s*\&\s*($LvalOrFunc)\s*>>/ && |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index c354807381c1..c9e8a9898ce4 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -424,10 +424,9 @@ out: | |||
424 | return ret; | 424 | return ret; |
425 | } | 425 | } |
426 | 426 | ||
427 | static int sel_mmap_policy_fault(struct vm_area_struct *vma, | 427 | static int sel_mmap_policy_fault(struct vm_fault *vmf) |
428 | struct vm_fault *vmf) | ||
429 | { | 428 | { |
430 | struct policy_load_memory *plm = vma->vm_file->private_data; | 429 | struct policy_load_memory *plm = vmf->vma->vm_file->private_data; |
431 | unsigned long offset; | 430 | unsigned long offset; |
432 | struct page *page; | 431 | struct page *page; |
433 | 432 | ||
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 9d33c1e85c79..aec9c92250fd 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -3245,10 +3245,9 @@ static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) | |||
3245 | /* | 3245 | /* |
3246 | * mmap status record | 3246 | * mmap status record |
3247 | */ | 3247 | */ |
3248 | static int snd_pcm_mmap_status_fault(struct vm_area_struct *area, | 3248 | static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) |
3249 | struct vm_fault *vmf) | ||
3250 | { | 3249 | { |
3251 | struct snd_pcm_substream *substream = area->vm_private_data; | 3250 | struct snd_pcm_substream *substream = vmf->vma->vm_private_data; |
3252 | struct snd_pcm_runtime *runtime; | 3251 | struct snd_pcm_runtime *runtime; |
3253 | 3252 | ||
3254 | if (substream == NULL) | 3253 | if (substream == NULL) |
@@ -3282,10 +3281,9 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file | |||
3282 | /* | 3281 | /* |
3283 | * mmap control record | 3282 | * mmap control record |
3284 | */ | 3283 | */ |
3285 | static int snd_pcm_mmap_control_fault(struct vm_area_struct *area, | 3284 | static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) |
3286 | struct vm_fault *vmf) | ||
3287 | { | 3285 | { |
3288 | struct snd_pcm_substream *substream = area->vm_private_data; | 3286 | struct snd_pcm_substream *substream = vmf->vma->vm_private_data; |
3289 | struct snd_pcm_runtime *runtime; | 3287 | struct snd_pcm_runtime *runtime; |
3290 | 3288 | ||
3291 | if (substream == NULL) | 3289 | if (substream == NULL) |
@@ -3341,10 +3339,9 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) | |||
3341 | /* | 3339 | /* |
3342 | * fault callback for mmapping a RAM page | 3340 | * fault callback for mmapping a RAM page |
3343 | */ | 3341 | */ |
3344 | static int snd_pcm_mmap_data_fault(struct vm_area_struct *area, | 3342 | static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) |
3345 | struct vm_fault *vmf) | ||
3346 | { | 3343 | { |
3347 | struct snd_pcm_substream *substream = area->vm_private_data; | 3344 | struct snd_pcm_substream *substream = vmf->vma->vm_private_data; |
3348 | struct snd_pcm_runtime *runtime; | 3345 | struct snd_pcm_runtime *runtime; |
3349 | unsigned long offset; | 3346 | unsigned long offset; |
3350 | struct page * page; | 3347 | struct page * page; |
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index cf5dc33f4a6d..cf45bf1f7ee0 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c | |||
@@ -137,13 +137,12 @@ static void usb_stream_hwdep_vm_open(struct vm_area_struct *area) | |||
137 | snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); | 137 | snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); |
138 | } | 138 | } |
139 | 139 | ||
140 | static int usb_stream_hwdep_vm_fault(struct vm_area_struct *area, | 140 | static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) |
141 | struct vm_fault *vmf) | ||
142 | { | 141 | { |
143 | unsigned long offset; | 142 | unsigned long offset; |
144 | struct page *page; | 143 | struct page *page; |
145 | void *vaddr; | 144 | void *vaddr; |
146 | struct us122l *us122l = area->vm_private_data; | 145 | struct us122l *us122l = vmf->vma->vm_private_data; |
147 | struct usb_stream *s; | 146 | struct usb_stream *s; |
148 | 147 | ||
149 | mutex_lock(&us122l->mutex); | 148 | mutex_lock(&us122l->mutex); |
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c index 0b34dbc8f302..605e1047c01d 100644 --- a/sound/usb/usx2y/usX2Yhwdep.c +++ b/sound/usb/usx2y/usX2Yhwdep.c | |||
@@ -31,19 +31,18 @@ | |||
31 | #include "usbusx2y.h" | 31 | #include "usbusx2y.h" |
32 | #include "usX2Yhwdep.h" | 32 | #include "usX2Yhwdep.h" |
33 | 33 | ||
34 | static int snd_us428ctls_vm_fault(struct vm_area_struct *area, | 34 | static int snd_us428ctls_vm_fault(struct vm_fault *vmf) |
35 | struct vm_fault *vmf) | ||
36 | { | 35 | { |
37 | unsigned long offset; | 36 | unsigned long offset; |
38 | struct page * page; | 37 | struct page * page; |
39 | void *vaddr; | 38 | void *vaddr; |
40 | 39 | ||
41 | snd_printdd("ENTER, start %lXh, pgoff %ld\n", | 40 | snd_printdd("ENTER, start %lXh, pgoff %ld\n", |
42 | area->vm_start, | 41 | vmf->vma->vm_start, |
43 | vmf->pgoff); | 42 | vmf->pgoff); |
44 | 43 | ||
45 | offset = vmf->pgoff << PAGE_SHIFT; | 44 | offset = vmf->pgoff << PAGE_SHIFT; |
46 | vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset; | 45 | vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; |
47 | page = virt_to_page(vaddr); | 46 | page = virt_to_page(vaddr); |
48 | get_page(page); | 47 | get_page(page); |
49 | vmf->page = page; | 48 | vmf->page = page; |
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c index 90766a92e7fd..f95164b91152 100644 --- a/sound/usb/usx2y/usx2yhwdeppcm.c +++ b/sound/usb/usx2y/usx2yhwdeppcm.c | |||
@@ -652,14 +652,13 @@ static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area) | |||
652 | } | 652 | } |
653 | 653 | ||
654 | 654 | ||
655 | static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_area_struct *area, | 655 | static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) |
656 | struct vm_fault *vmf) | ||
657 | { | 656 | { |
658 | unsigned long offset; | 657 | unsigned long offset; |
659 | void *vaddr; | 658 | void *vaddr; |
660 | 659 | ||
661 | offset = vmf->pgoff << PAGE_SHIFT; | 660 | offset = vmf->pgoff << PAGE_SHIFT; |
662 | vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->hwdep_pcm_shm + offset; | 661 | vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset; |
663 | vmf->page = virt_to_page(vaddr); | 662 | vmf->page = virt_to_page(vaddr); |
664 | get_page(vmf->page); | 663 | get_page(vmf->page); |
665 | return 0; | 664 | return 0; |
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c index 6d8b8f22cf55..42c15f906aac 100644 --- a/tools/lib/find_bit.c +++ b/tools/lib/find_bit.c | |||
@@ -34,7 +34,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
34 | { | 34 | { |
35 | unsigned long tmp; | 35 | unsigned long tmp; |
36 | 36 | ||
37 | if (!nbits || start >= nbits) | 37 | if (unlikely(start >= nbits)) |
38 | return nbits; | 38 | return nbits; |
39 | 39 | ||
40 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 40 | tmp = addr[start / BITS_PER_LONG] ^ invert; |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 5a840a605a16..e9449c801888 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
@@ -398,12 +398,12 @@ static void *uffd_poll_thread(void *arg) | |||
398 | uffd = msg.arg.fork.ufd; | 398 | uffd = msg.arg.fork.ufd; |
399 | pollfd[0].fd = uffd; | 399 | pollfd[0].fd = uffd; |
400 | break; | 400 | break; |
401 | case UFFD_EVENT_MADVDONTNEED: | 401 | case UFFD_EVENT_REMOVE: |
402 | uffd_reg.range.start = msg.arg.madv_dn.start; | 402 | uffd_reg.range.start = msg.arg.remove.start; |
403 | uffd_reg.range.len = msg.arg.madv_dn.end - | 403 | uffd_reg.range.len = msg.arg.remove.end - |
404 | msg.arg.madv_dn.start; | 404 | msg.arg.remove.start; |
405 | if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range)) | 405 | if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range)) |
406 | fprintf(stderr, "madv_dn failure\n"), exit(1); | 406 | fprintf(stderr, "remove failure\n"), exit(1); |
407 | break; | 407 | break; |
408 | case UFFD_EVENT_REMAP: | 408 | case UFFD_EVENT_REMAP: |
409 | area_dst = (char *)(unsigned long)msg.arg.remap.to; | 409 | area_dst = (char *)(unsigned long)msg.arg.remap.to; |
@@ -569,9 +569,9 @@ static int userfaultfd_open(int features) | |||
569 | * part is accessed after mremap. Since hugetlbfs does not support | 569 | * part is accessed after mremap. Since hugetlbfs does not support |
570 | * mremap, the entire monitored area is accessed in a single pass for | 570 | * mremap, the entire monitored area is accessed in a single pass for |
571 | * HUGETLB_TEST. | 571 | * HUGETLB_TEST. |
572 | * The release of the pages currently generates event only for | 572 | * The release of the pages currently generates event for shmem and |
573 | * anonymous memory (UFFD_EVENT_MADVDONTNEED), hence it is not checked | 573 | * anonymous memory (UFFD_EVENT_REMOVE), hence it is not checked |
574 | * for hugetlb and shmem. | 574 | * for hugetlb. |
575 | */ | 575 | */ |
576 | static int faulting_process(void) | 576 | static int faulting_process(void) |
577 | { | 577 | { |
@@ -610,7 +610,6 @@ static int faulting_process(void) | |||
610 | } | 610 | } |
611 | } | 611 | } |
612 | 612 | ||
613 | #ifndef SHMEM_TEST | ||
614 | if (release_pages(area_dst)) | 613 | if (release_pages(area_dst)) |
615 | return 1; | 614 | return 1; |
616 | 615 | ||
@@ -618,7 +617,6 @@ static int faulting_process(void) | |||
618 | if (my_bcmp(area_dst + nr * page_size, zeropage, page_size)) | 617 | if (my_bcmp(area_dst + nr * page_size, zeropage, page_size)) |
619 | fprintf(stderr, "nr %lu is not zero\n", nr), exit(1); | 618 | fprintf(stderr, "nr %lu is not zero\n", nr), exit(1); |
620 | } | 619 | } |
621 | #endif /* SHMEM_TEST */ | ||
622 | 620 | ||
623 | #endif /* HUGETLB_TEST */ | 621 | #endif /* HUGETLB_TEST */ |
624 | 622 | ||
@@ -715,14 +713,14 @@ static int userfaultfd_events_test(void) | |||
715 | pid_t pid; | 713 | pid_t pid; |
716 | char c; | 714 | char c; |
717 | 715 | ||
718 | printf("testing events (fork, remap, madv_dn): "); | 716 | printf("testing events (fork, remap, remove): "); |
719 | fflush(stdout); | 717 | fflush(stdout); |
720 | 718 | ||
721 | if (release_pages(area_dst)) | 719 | if (release_pages(area_dst)) |
722 | return 1; | 720 | return 1; |
723 | 721 | ||
724 | features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP | | 722 | features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP | |
725 | UFFD_FEATURE_EVENT_MADVDONTNEED; | 723 | UFFD_FEATURE_EVENT_REMOVE; |
726 | if (userfaultfd_open(features) < 0) | 724 | if (userfaultfd_open(features) < 0) |
727 | return 1; | 725 | return 1; |
728 | fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); | 726 | fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc4d6e0dd2a2..5b0dd4a9b2cb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -2350,9 +2350,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
2350 | } | 2350 | } |
2351 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); | 2351 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
2352 | 2352 | ||
2353 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2353 | static int kvm_vcpu_fault(struct vm_fault *vmf) |
2354 | { | 2354 | { |
2355 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | 2355 | struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; |
2356 | struct page *page; | 2356 | struct page *page; |
2357 | 2357 | ||
2358 | if (vmf->pgoff == 0) | 2358 | if (vmf->pgoff == 0) |