diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 11:58:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 11:58:04 -0400 |
commit | 57a8ec387e1441ea5e1232bc0749fb99a8cba7e7 (patch) | |
tree | b5fb03fc6bc5754de8b5b1f8b0e4f36d67c8315c | |
parent | 0a8ad0ffa4d80a544f6cbff703bf6394339afcdf (diff) | |
parent | 43e11fa2d1d3b6e35629fa556eb7d571edba2010 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
"VM:
- z3fold fixes and enhancements by Henry Burns and Vitaly Wool
- more accurate reclaimed slab caches calculations by Yafang Shao
- fix MAP_UNINITIALIZED UAPI symbol to not depend on config, by
Christoph Hellwig
- !CONFIG_MMU fixes by Christoph Hellwig
- new novmcoredd parameter to omit device dumps from vmcore, by
Kairui Song
- new test_meminit module for testing heap and pagealloc
initialization, by Alexander Potapenko
- ioremap improvements for huge mappings, by Anshuman Khandual
- generalize kprobe page fault handling, by Anshuman Khandual
- device-dax hotplug fixes and improvements, by Pavel Tatashin
- enable synchronous DAX fault on powerpc, by Aneesh Kumar K.V
- add pte_devmap() support for arm64, by Robin Murphy
- unify locked_vm accounting with a helper, by Daniel Jordan
- several misc fixes
core/lib:
- new typeof_member() macro including some users, by Alexey Dobriyan
- make BIT() and GENMASK() available in asm, by Masahiro Yamada
- changed LIST_POISON2 on x86_64 to 0xdead000000000122 for better
code generation, by Alexey Dobriyan
- rbtree code size optimizations, by Michel Lespinasse
- convert struct pid count to refcount_t, by Joel Fernandes
get_maintainer.pl:
- add --no-moderated switch to skip moderated ML's, by Joe Perches
misc:
- ptrace PTRACE_GET_SYSCALL_INFO interface
- coda updates
- gdb scripts, various"
[ Using merge message suggestion from Vlastimil Babka, with some editing - Linus ]
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits)
fs/select.c: use struct_size() in kmalloc()
mm: add account_locked_vm utility function
arm64: mm: implement pte_devmap support
mm: introduce ARCH_HAS_PTE_DEVMAP
mm: clean up is_device_*_page() definitions
mm/mmap: move common defines to mman-common.h
mm: move MAP_SYNC to asm-generic/mman-common.h
device-dax: "Hotremove" persistent memory that is used like normal RAM
mm/hotplug: make remove_memory() interface usable
device-dax: fix memory and resource leak if hotplug fails
include/linux/lz4.h: fix spelling and copy-paste errors in documentation
ipc/mqueue.c: only perform resource calculation if user valid
include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures
scripts/gdb: add helpers to find and list devices
scripts/gdb: add lx-genpd-summary command
drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl
kernel/pid.c: convert struct pid count to refcount_t
drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings
select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining()
select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR
...
151 files changed, 2674 insertions, 1225 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 34a363f91b46..a5f4004e8705 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -2877,6 +2877,17 @@ | |||
2877 | /sys/module/printk/parameters/console_suspend) to | 2877 | /sys/module/printk/parameters/console_suspend) to |
2878 | turn on/off it dynamically. | 2878 | turn on/off it dynamically. |
2879 | 2879 | ||
2880 | novmcoredd [KNL,KDUMP] | ||
2881 | Disable device dump. Device dump allows drivers to | ||
2882 | append dump data to vmcore so you can collect driver | ||
2883 | specified debug info. Drivers can append the data | ||
2884 | without any limit and this data is stored in memory, | ||
2885 | so this may cause significant memory stress. Disabling | ||
2886 | device dump can help save memory but the driver debug | ||
2887 | data will be no longer available. This parameter | ||
2888 | is only available when CONFIG_PROC_VMCORE_DEVICE_DUMP | ||
2889 | is set. | ||
2890 | |||
2880 | noaliencache [MM, NUMA, SLAB] Disables the allocation of alien | 2891 | noaliencache [MM, NUMA, SLAB] Disables the allocation of alien |
2881 | caches in the slab allocator. Saves per-node memory, | 2892 | caches in the slab allocator. Saves per-node memory, |
2882 | but will impact performance. | 2893 | but will impact performance. |
diff --git a/Documentation/devicetree/bindings/usb/s3c2410-usb.txt b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt index e45b38ce2986..26c85afd0b53 100644 --- a/Documentation/devicetree/bindings/usb/s3c2410-usb.txt +++ b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt | |||
@@ -4,7 +4,7 @@ OHCI | |||
4 | 4 | ||
5 | Required properties: | 5 | Required properties: |
6 | - compatible: should be "samsung,s3c2410-ohci" for USB host controller | 6 | - compatible: should be "samsung,s3c2410-ohci" for USB host controller |
7 | - reg: address and lenght of the controller memory mapped region | 7 | - reg: address and length of the controller memory mapped region |
8 | - interrupts: interrupt number for the USB OHCI controller | 8 | - interrupts: interrupt number for the USB OHCI controller |
9 | - clocks: Should reference the bus and host clocks | 9 | - clocks: Should reference the bus and host clocks |
10 | - clock-names: Should contain two strings | 10 | - clock-names: Should contain two strings |
diff --git a/Documentation/filesystems/coda.txt b/Documentation/filesystems/coda.txt index 61311356025d..545262c167c3 100644 --- a/Documentation/filesystems/coda.txt +++ b/Documentation/filesystems/coda.txt | |||
@@ -481,7 +481,10 @@ kernel support. | |||
481 | 481 | ||
482 | 482 | ||
483 | 483 | ||
484 | 484 | struct coda_timespec { | |
485 | int64_t tv_sec; /* seconds */ | ||
486 | long tv_nsec; /* nanoseconds */ | ||
487 | }; | ||
485 | 488 | ||
486 | struct coda_vattr { | 489 | struct coda_vattr { |
487 | enum coda_vtype va_type; /* vnode type (for create) */ | 490 | enum coda_vtype va_type; /* vnode type (for create) */ |
@@ -493,9 +496,9 @@ kernel support. | |||
493 | long va_fileid; /* file id */ | 496 | long va_fileid; /* file id */ |
494 | u_quad_t va_size; /* file size in bytes */ | 497 | u_quad_t va_size; /* file size in bytes */ |
495 | long va_blocksize; /* blocksize preferred for i/o */ | 498 | long va_blocksize; /* blocksize preferred for i/o */ |
496 | struct timespec va_atime; /* time of last access */ | 499 | struct coda_timespec va_atime; /* time of last access */ |
497 | struct timespec va_mtime; /* time of last modification */ | 500 | struct coda_timespec va_mtime; /* time of last modification */ |
498 | struct timespec va_ctime; /* time file changed */ | 501 | struct coda_timespec va_ctime; /* time file changed */ |
499 | u_long va_gen; /* generation number of file */ | 502 | u_long va_gen; /* generation number of file */ |
500 | u_long va_flags; /* flags defined for file */ | 503 | u_long va_flags; /* flags defined for file */ |
501 | dev_t va_rdev; /* device special file represents */ | 504 | dev_t va_rdev; /* device special file represents */ |
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index ccf9d65166bb..af2c0063dc75 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h | |||
@@ -93,11 +93,6 @@ static inline void * phys_to_virt(unsigned long address) | |||
93 | 93 | ||
94 | #define page_to_phys(page) page_to_pa(page) | 94 | #define page_to_phys(page) page_to_pa(page) |
95 | 95 | ||
96 | static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) | ||
97 | { | ||
98 | return page_to_phys(page); | ||
99 | } | ||
100 | |||
101 | /* Maximum PIO space address supported? */ | 96 | /* Maximum PIO space address supported? */ |
102 | #define IO_SPACE_LIMIT 0xffff | 97 | #define IO_SPACE_LIMIT 0xffff |
103 | 98 | ||
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index da446180f17b..1d87c18a2976 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #ifndef _ASM_ARC_PGTABLE_H | 32 | #ifndef _ASM_ARC_PGTABLE_H |
33 | #define _ASM_ARC_PGTABLE_H | 33 | #define _ASM_ARC_PGTABLE_H |
34 | 34 | ||
35 | #include <linux/const.h> | 35 | #include <linux/bits.h> |
36 | #define __ARCH_USE_5LEVEL_HACK | 36 | #define __ARCH_USE_5LEVEL_HACK |
37 | #include <asm-generic/pgtable-nopmd.h> | 37 | #include <asm-generic/pgtable-nopmd.h> |
38 | #include <asm/page.h> | 38 | #include <asm/page.h> |
@@ -215,11 +215,11 @@ | |||
215 | #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) | 215 | #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) |
216 | #define BITS_FOR_PGD (32 - PGDIR_SHIFT) | 216 | #define BITS_FOR_PGD (32 - PGDIR_SHIFT) |
217 | 217 | ||
218 | #define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */ | 218 | #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */ |
219 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 219 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
220 | 220 | ||
221 | #define PTRS_PER_PTE _BITUL(BITS_FOR_PTE) | 221 | #define PTRS_PER_PTE BIT(BITS_FOR_PTE) |
222 | #define PTRS_PER_PGD _BITUL(BITS_FOR_PGD) | 222 | #define PTRS_PER_PGD BIT(BITS_FOR_PGD) |
223 | 223 | ||
224 | /* | 224 | /* |
225 | * Number of entries a user land program use. | 225 | * Number of entries a user land program use. |
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index 309a994f64f0..a4a61531c7fb 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #error "Incorrect ctop.h include" | 10 | #error "Incorrect ctop.h include" |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | #include <linux/bits.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <soc/nps/common.h> | 15 | #include <soc/nps/common.h> |
15 | 16 | ||
@@ -51,19 +52,19 @@ | |||
51 | #define CTOP_INST_AXOR_DI_R2_R2_R3 0x4A664C06 | 52 | #define CTOP_INST_AXOR_DI_R2_R2_R3 0x4A664C06 |
52 | 53 | ||
53 | /* Do not use D$ for address in 2G-3G */ | 54 | /* Do not use D$ for address in 2G-3G */ |
54 | #define HW_COMPLY_KRN_NOT_D_CACHED _BITUL(28) | 55 | #define HW_COMPLY_KRN_NOT_D_CACHED BIT(28) |
55 | 56 | ||
56 | #define NPS_MSU_EN_CFG 0x80 | 57 | #define NPS_MSU_EN_CFG 0x80 |
57 | #define NPS_CRG_BLKID 0x480 | 58 | #define NPS_CRG_BLKID 0x480 |
58 | #define NPS_CRG_SYNC_BIT _BITUL(0) | 59 | #define NPS_CRG_SYNC_BIT BIT(0) |
59 | #define NPS_GIM_BLKID 0x5C0 | 60 | #define NPS_GIM_BLKID 0x5C0 |
60 | 61 | ||
61 | /* GIM registers and fields*/ | 62 | /* GIM registers and fields*/ |
62 | #define NPS_GIM_UART_LINE _BITUL(7) | 63 | #define NPS_GIM_UART_LINE BIT(7) |
63 | #define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE _BITUL(10) | 64 | #define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE BIT(10) |
64 | #define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE _BITUL(11) | 65 | #define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE BIT(11) |
65 | #define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE _BITUL(25) | 66 | #define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE BIT(25) |
66 | #define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE _BITUL(26) | 67 | #define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE BIT(26) |
67 | 68 | ||
68 | #ifndef __ASSEMBLY__ | 69 | #ifndef __ASSEMBLY__ |
69 | /* Functional registers definition */ | 70 | /* Functional registers definition */ |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index f11c35cf0b74..7a0596fcb2e7 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -30,7 +30,6 @@ | |||
30 | * ISA I/O bus memory addresses are 1:1 with the physical address. | 30 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
31 | */ | 31 | */ |
32 | #define isa_virt_to_bus virt_to_phys | 32 | #define isa_virt_to_bus virt_to_phys |
33 | #define isa_page_to_bus page_to_phys | ||
34 | #define isa_bus_to_virt phys_to_virt | 33 | #define isa_bus_to_virt phys_to_virt |
35 | 34 | ||
36 | /* | 35 | /* |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0e417233dad7..890eeaac3cbb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -27,28 +27,6 @@ | |||
27 | 27 | ||
28 | #ifdef CONFIG_MMU | 28 | #ifdef CONFIG_MMU |
29 | 29 | ||
30 | #ifdef CONFIG_KPROBES | ||
31 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) | ||
32 | { | ||
33 | int ret = 0; | ||
34 | |||
35 | if (!user_mode(regs)) { | ||
36 | /* kprobe_running() needs smp_processor_id() */ | ||
37 | preempt_disable(); | ||
38 | if (kprobe_running() && kprobe_fault_handler(regs, fsr)) | ||
39 | ret = 1; | ||
40 | preempt_enable(); | ||
41 | } | ||
42 | |||
43 | return ret; | ||
44 | } | ||
45 | #else | ||
46 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | /* | 30 | /* |
53 | * This is useful to dump out the page tables associated with | 31 | * This is useful to dump out the page tables associated with |
54 | * 'addr' in mm 'mm'. | 32 | * 'addr' in mm 'mm'. |
@@ -265,7 +243,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
265 | vm_fault_t fault; | 243 | vm_fault_t fault; |
266 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 244 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
267 | 245 | ||
268 | if (notify_page_fault(regs, fsr)) | 246 | if (kprobe_page_fault(regs, fsr)) |
269 | return 0; | 247 | return 0; |
270 | 248 | ||
271 | tsk = current; | 249 | tsk = current; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 86f81b5afd95..e1ea69994e0f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -24,6 +24,7 @@ config ARM64 | |||
24 | select ARCH_HAS_KCOV | 24 | select ARCH_HAS_KCOV |
25 | select ARCH_HAS_KEEPINITRD | 25 | select ARCH_HAS_KEEPINITRD |
26 | select ARCH_HAS_MEMBARRIER_SYNC_CORE | 26 | select ARCH_HAS_MEMBARRIER_SYNC_CORE |
27 | select ARCH_HAS_PTE_DEVMAP | ||
27 | select ARCH_HAS_PTE_SPECIAL | 28 | select ARCH_HAS_PTE_SPECIAL |
28 | select ARCH_HAS_SETUP_DMA_OPS | 29 | select ARCH_HAS_SETUP_DMA_OPS |
29 | select ARCH_HAS_SET_DIRECT_MAP | 30 | select ARCH_HAS_SET_DIRECT_MAP |
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index f318258a14be..92d2e9f28f28 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ | 16 | #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ |
17 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 17 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
18 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 18 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
19 | #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) | ||
19 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ | 20 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
20 | 21 | ||
21 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3052381baaeb..87a4b2ddc1a1 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -79,6 +79,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
79 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | 79 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
80 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 80 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
81 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) | 81 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
82 | #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) | ||
82 | 83 | ||
83 | #define pte_cont_addr_end(addr, end) \ | 84 | #define pte_cont_addr_end(addr, end) \ |
84 | ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ | 85 | ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ |
@@ -206,6 +207,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) | |||
206 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); | 207 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); |
207 | } | 208 | } |
208 | 209 | ||
210 | static inline pte_t pte_mkdevmap(pte_t pte) | ||
211 | { | ||
212 | return set_pte_bit(pte, __pgprot(PTE_DEVMAP)); | ||
213 | } | ||
214 | |||
209 | static inline void set_pte(pte_t *ptep, pte_t pte) | 215 | static inline void set_pte(pte_t *ptep, pte_t pte) |
210 | { | 216 | { |
211 | WRITE_ONCE(*ptep, pte); | 217 | WRITE_ONCE(*ptep, pte); |
@@ -388,6 +394,11 @@ static inline int pmd_protnone(pmd_t pmd) | |||
388 | 394 | ||
389 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | 395 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) |
390 | 396 | ||
397 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
398 | #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) | ||
399 | #endif | ||
400 | #define pmd_mkdevmap(pmd) pte_pmd(pte_mkdevmap(pmd_pte(pmd))) | ||
401 | |||
391 | #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) | 402 | #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) |
392 | #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) | 403 | #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) |
393 | #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) | 404 | #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) |
@@ -673,6 +684,16 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
673 | { | 684 | { |
674 | return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); | 685 | return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); |
675 | } | 686 | } |
687 | |||
688 | static inline int pud_devmap(pud_t pud) | ||
689 | { | ||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | static inline int pgd_devmap(pgd_t pgd) | ||
694 | { | ||
695 | return 0; | ||
696 | } | ||
676 | #endif | 697 | #endif |
677 | 698 | ||
678 | /* | 699 | /* |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a7522fca1105..06ebcfef73df 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifndef __ASM_SYSREG_H | 9 | #ifndef __ASM_SYSREG_H |
10 | #define __ASM_SYSREG_H | 10 | #define __ASM_SYSREG_H |
11 | 11 | ||
12 | #include <linux/const.h> | 12 | #include <linux/bits.h> |
13 | #include <linux/stringify.h> | 13 | #include <linux/stringify.h> |
14 | 14 | ||
15 | /* | 15 | /* |
@@ -478,31 +478,31 @@ | |||
478 | #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) | 478 | #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) |
479 | 479 | ||
480 | /* Common SCTLR_ELx flags. */ | 480 | /* Common SCTLR_ELx flags. */ |
481 | #define SCTLR_ELx_DSSBS (_BITUL(44)) | 481 | #define SCTLR_ELx_DSSBS (BIT(44)) |
482 | #define SCTLR_ELx_ENIA (_BITUL(31)) | 482 | #define SCTLR_ELx_ENIA (BIT(31)) |
483 | #define SCTLR_ELx_ENIB (_BITUL(30)) | 483 | #define SCTLR_ELx_ENIB (BIT(30)) |
484 | #define SCTLR_ELx_ENDA (_BITUL(27)) | 484 | #define SCTLR_ELx_ENDA (BIT(27)) |
485 | #define SCTLR_ELx_EE (_BITUL(25)) | 485 | #define SCTLR_ELx_EE (BIT(25)) |
486 | #define SCTLR_ELx_IESB (_BITUL(21)) | 486 | #define SCTLR_ELx_IESB (BIT(21)) |
487 | #define SCTLR_ELx_WXN (_BITUL(19)) | 487 | #define SCTLR_ELx_WXN (BIT(19)) |
488 | #define SCTLR_ELx_ENDB (_BITUL(13)) | 488 | #define SCTLR_ELx_ENDB (BIT(13)) |
489 | #define SCTLR_ELx_I (_BITUL(12)) | 489 | #define SCTLR_ELx_I (BIT(12)) |
490 | #define SCTLR_ELx_SA (_BITUL(3)) | 490 | #define SCTLR_ELx_SA (BIT(3)) |
491 | #define SCTLR_ELx_C (_BITUL(2)) | 491 | #define SCTLR_ELx_C (BIT(2)) |
492 | #define SCTLR_ELx_A (_BITUL(1)) | 492 | #define SCTLR_ELx_A (BIT(1)) |
493 | #define SCTLR_ELx_M (_BITUL(0)) | 493 | #define SCTLR_ELx_M (BIT(0)) |
494 | 494 | ||
495 | #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ | 495 | #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ |
496 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB) | 496 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB) |
497 | 497 | ||
498 | /* SCTLR_EL2 specific flags. */ | 498 | /* SCTLR_EL2 specific flags. */ |
499 | #define SCTLR_EL2_RES1 ((_BITUL(4)) | (_BITUL(5)) | (_BITUL(11)) | (_BITUL(16)) | \ | 499 | #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ |
500 | (_BITUL(18)) | (_BITUL(22)) | (_BITUL(23)) | (_BITUL(28)) | \ | 500 | (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ |
501 | (_BITUL(29))) | 501 | (BIT(29))) |
502 | #define SCTLR_EL2_RES0 ((_BITUL(6)) | (_BITUL(7)) | (_BITUL(8)) | (_BITUL(9)) | \ | 502 | #define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \ |
503 | (_BITUL(10)) | (_BITUL(13)) | (_BITUL(14)) | (_BITUL(15)) | \ | 503 | (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \ |
504 | (_BITUL(17)) | (_BITUL(20)) | (_BITUL(24)) | (_BITUL(26)) | \ | 504 | (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \ |
505 | (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \ | 505 | (BIT(27)) | (BIT(30)) | (BIT(31)) | \ |
506 | (0xffffefffUL << 32)) | 506 | (0xffffefffUL << 32)) |
507 | 507 | ||
508 | #ifdef CONFIG_CPU_BIG_ENDIAN | 508 | #ifdef CONFIG_CPU_BIG_ENDIAN |
@@ -524,23 +524,23 @@ | |||
524 | #endif | 524 | #endif |
525 | 525 | ||
526 | /* SCTLR_EL1 specific flags. */ | 526 | /* SCTLR_EL1 specific flags. */ |
527 | #define SCTLR_EL1_UCI (_BITUL(26)) | 527 | #define SCTLR_EL1_UCI (BIT(26)) |
528 | #define SCTLR_EL1_E0E (_BITUL(24)) | 528 | #define SCTLR_EL1_E0E (BIT(24)) |
529 | #define SCTLR_EL1_SPAN (_BITUL(23)) | 529 | #define SCTLR_EL1_SPAN (BIT(23)) |
530 | #define SCTLR_EL1_NTWE (_BITUL(18)) | 530 | #define SCTLR_EL1_NTWE (BIT(18)) |
531 | #define SCTLR_EL1_NTWI (_BITUL(16)) | 531 | #define SCTLR_EL1_NTWI (BIT(16)) |
532 | #define SCTLR_EL1_UCT (_BITUL(15)) | 532 | #define SCTLR_EL1_UCT (BIT(15)) |
533 | #define SCTLR_EL1_DZE (_BITUL(14)) | 533 | #define SCTLR_EL1_DZE (BIT(14)) |
534 | #define SCTLR_EL1_UMA (_BITUL(9)) | 534 | #define SCTLR_EL1_UMA (BIT(9)) |
535 | #define SCTLR_EL1_SED (_BITUL(8)) | 535 | #define SCTLR_EL1_SED (BIT(8)) |
536 | #define SCTLR_EL1_ITD (_BITUL(7)) | 536 | #define SCTLR_EL1_ITD (BIT(7)) |
537 | #define SCTLR_EL1_CP15BEN (_BITUL(5)) | 537 | #define SCTLR_EL1_CP15BEN (BIT(5)) |
538 | #define SCTLR_EL1_SA0 (_BITUL(4)) | 538 | #define SCTLR_EL1_SA0 (BIT(4)) |
539 | 539 | ||
540 | #define SCTLR_EL1_RES1 ((_BITUL(11)) | (_BITUL(20)) | (_BITUL(22)) | (_BITUL(28)) | \ | 540 | #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ |
541 | (_BITUL(29))) | 541 | (BIT(29))) |
542 | #define SCTLR_EL1_RES0 ((_BITUL(6)) | (_BITUL(10)) | (_BITUL(13)) | (_BITUL(17)) | \ | 542 | #define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \ |
543 | (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \ | 543 | (BIT(27)) | (BIT(30)) | (BIT(31)) | \ |
544 | (0xffffefffUL << 32)) | 544 | (0xffffefffUL << 32)) |
545 | 545 | ||
546 | #ifdef CONFIG_CPU_BIG_ENDIAN | 546 | #ifdef CONFIG_CPU_BIG_ENDIAN |
@@ -756,13 +756,13 @@ | |||
756 | #define ZCR_ELx_LEN_SIZE 9 | 756 | #define ZCR_ELx_LEN_SIZE 9 |
757 | #define ZCR_ELx_LEN_MASK 0x1ff | 757 | #define ZCR_ELx_LEN_MASK 0x1ff |
758 | 758 | ||
759 | #define CPACR_EL1_ZEN_EL1EN (_BITUL(16)) /* enable EL1 access */ | 759 | #define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ |
760 | #define CPACR_EL1_ZEN_EL0EN (_BITUL(17)) /* enable EL0 access, if EL1EN set */ | 760 | #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ |
761 | #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) | 761 | #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) |
762 | 762 | ||
763 | 763 | ||
764 | /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ | 764 | /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ |
765 | #define SYS_MPIDR_SAFE_VAL (_BITUL(31)) | 765 | #define SYS_MPIDR_SAFE_VAL (BIT(31)) |
766 | 766 | ||
767 | #ifdef __ASSEMBLY__ | 767 | #ifdef __ASSEMBLY__ |
768 | 768 | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c8c61b1eb479..9568c116ac7f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -59,28 +59,6 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) | |||
59 | return debug_fault_info + DBG_ESR_EVT(esr); | 59 | return debug_fault_info + DBG_ESR_EVT(esr); |
60 | } | 60 | } |
61 | 61 | ||
62 | #ifdef CONFIG_KPROBES | ||
63 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) | ||
64 | { | ||
65 | int ret = 0; | ||
66 | |||
67 | /* kprobe_running() needs smp_processor_id() */ | ||
68 | if (!user_mode(regs)) { | ||
69 | preempt_disable(); | ||
70 | if (kprobe_running() && kprobe_fault_handler(regs, esr)) | ||
71 | ret = 1; | ||
72 | preempt_enable(); | ||
73 | } | ||
74 | |||
75 | return ret; | ||
76 | } | ||
77 | #else | ||
78 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) | ||
79 | { | ||
80 | return 0; | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | static void data_abort_decode(unsigned int esr) | 62 | static void data_abort_decode(unsigned int esr) |
85 | { | 63 | { |
86 | pr_alert("Data abort info:\n"); | 64 | pr_alert("Data abort info:\n"); |
@@ -434,7 +412,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
434 | unsigned long vm_flags = VM_READ | VM_WRITE; | 412 | unsigned long vm_flags = VM_READ | VM_WRITE; |
435 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 413 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
436 | 414 | ||
437 | if (notify_page_fault(regs, esr)) | 415 | if (kprobe_page_fault(regs, esr)) |
438 | return 0; | 416 | return 0; |
439 | 417 | ||
440 | /* | 418 | /* |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1b49c08dfa2b..e661469cabdd 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -942,6 +942,11 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) | |||
942 | return dt_virt; | 942 | return dt_virt; |
943 | } | 943 | } |
944 | 944 | ||
945 | int __init arch_ioremap_p4d_supported(void) | ||
946 | { | ||
947 | return 0; | ||
948 | } | ||
949 | |||
945 | int __init arch_ioremap_pud_supported(void) | 950 | int __init arch_ioremap_pud_supported(void) |
946 | { | 951 | { |
947 | /* | 952 | /* |
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h index 4f054b1ddef5..f6e454f18038 100644 --- a/arch/hexagon/include/asm/syscall.h +++ b/arch/hexagon/include/asm/syscall.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #define _ASM_HEXAGON_SYSCALL_H | 9 | #define _ASM_HEXAGON_SYSCALL_H |
10 | 10 | ||
11 | #include <uapi/linux/audit.h> | 11 | #include <uapi/linux/audit.h> |
12 | #include <linux/err.h> | ||
13 | #include <asm/ptrace.h> | ||
12 | 14 | ||
13 | typedef long (*syscall_fn)(unsigned long, unsigned long, | 15 | typedef long (*syscall_fn)(unsigned long, unsigned long, |
14 | unsigned long, unsigned long, | 16 | unsigned long, unsigned long, |
@@ -31,6 +33,18 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
31 | memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); | 33 | memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); |
32 | } | 34 | } |
33 | 35 | ||
36 | static inline long syscall_get_error(struct task_struct *task, | ||
37 | struct pt_regs *regs) | ||
38 | { | ||
39 | return IS_ERR_VALUE(regs->r00) ? regs->r00 : 0; | ||
40 | } | ||
41 | |||
42 | static inline long syscall_get_return_value(struct task_struct *task, | ||
43 | struct pt_regs *regs) | ||
44 | { | ||
45 | return regs->r00; | ||
46 | } | ||
47 | |||
34 | static inline int syscall_get_arch(struct task_struct *task) | 48 | static inline int syscall_get_arch(struct task_struct *task) |
35 | { | 49 | { |
36 | return AUDIT_ARCH_HEXAGON; | 50 | return AUDIT_ARCH_HEXAGON; |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3c3a283d3172..c2f299fe9e04 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -21,28 +21,6 @@ | |||
21 | 21 | ||
22 | extern int die(char *, struct pt_regs *, long); | 22 | extern int die(char *, struct pt_regs *, long); |
23 | 23 | ||
24 | #ifdef CONFIG_KPROBES | ||
25 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | if (!user_mode(regs)) { | ||
30 | /* kprobe_running() needs smp_processor_id() */ | ||
31 | preempt_disable(); | ||
32 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | ||
33 | ret = 1; | ||
34 | preempt_enable(); | ||
35 | } | ||
36 | |||
37 | return ret; | ||
38 | } | ||
39 | #else | ||
40 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
41 | { | ||
42 | return 0; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | /* | 24 | /* |
47 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | 25 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment |
48 | * (inside region 5, on ia64) and that page is present. | 26 | * (inside region 5, on ia64) and that page is present. |
@@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
116 | /* | 94 | /* |
117 | * This is to handle the kprobes on user space access instructions | 95 | * This is to handle the kprobes on user space access instructions |
118 | */ | 96 | */ |
119 | if (notify_page_fault(regs, TRAP_BRKPT)) | 97 | if (kprobe_page_fault(regs, TRAP_BRKPT)) |
120 | return; | 98 | return; |
121 | 99 | ||
122 | if (user_mode(regs)) | 100 | if (user_mode(regs)) |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29997e42480e..1790274c27eb 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
@@ -149,8 +149,6 @@ static inline void *isa_bus_to_virt(unsigned long address) | |||
149 | return phys_to_virt(address); | 149 | return phys_to_virt(address); |
150 | } | 150 | } |
151 | 151 | ||
152 | #define isa_page_to_bus page_to_phys | ||
153 | |||
154 | /* | 152 | /* |
155 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | 153 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
156 | * are forbidden in portable PCI drivers. | 154 | * are forbidden in portable PCI drivers. |
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index 3cf8e4d5fa28..68b1e5d458cf 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h | |||
@@ -41,6 +41,7 @@ do { \ | |||
41 | #define kretprobe_blacklist_size 0 | 41 | #define kretprobe_blacklist_size 0 |
42 | 42 | ||
43 | void arch_remove_kprobe(struct kprobe *p); | 43 | void arch_remove_kprobe(struct kprobe *p); |
44 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | ||
44 | 45 | ||
45 | /* Architecture specific copy of original instruction*/ | 46 | /* Architecture specific copy of original instruction*/ |
46 | struct arch_specific_insn { | 47 | struct arch_specific_insn { |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index acf80ae0a430..83bb439597d8 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
@@ -89,6 +89,12 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, | |||
89 | unreachable(); | 89 | unreachable(); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline long syscall_get_error(struct task_struct *task, | ||
93 | struct pt_regs *regs) | ||
94 | { | ||
95 | return regs->regs[7] ? -regs->regs[2] : 0; | ||
96 | } | ||
97 | |||
92 | static inline long syscall_get_return_value(struct task_struct *task, | 98 | static inline long syscall_get_return_value(struct task_struct *task, |
93 | struct pt_regs *regs) | 99 | struct pt_regs *regs) |
94 | { | 100 | { |
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 81ba1d3c367c..6cfae2411c04 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c | |||
@@ -398,7 +398,7 @@ out: | |||
398 | return 1; | 398 | return 1; |
399 | } | 399 | } |
400 | 400 | ||
401 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 401 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
402 | { | 402 | { |
403 | struct kprobe *cur = kprobe_running(); | 403 | struct kprobe *cur = kprobe_running(); |
404 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 404 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index 899b2fb4b52f..7b5180d78e20 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h | |||
@@ -26,7 +26,8 @@ struct pt_regs; | |||
26 | * | 26 | * |
27 | * It's only valid to call this when @task is known to be blocked. | 27 | * It's only valid to call this when @task is known to be blocked. |
28 | */ | 28 | */ |
29 | int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | 29 | static inline int |
30 | syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | ||
30 | { | 31 | { |
31 | return regs->syscallno; | 32 | return regs->syscallno; |
32 | } | 33 | } |
@@ -47,7 +48,8 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | |||
47 | * system call instruction. This may not be the same as what the | 48 | * system call instruction. This may not be the same as what the |
48 | * register state looked like at system call entry tracing. | 49 | * register state looked like at system call entry tracing. |
49 | */ | 50 | */ |
50 | void syscall_rollback(struct task_struct *task, struct pt_regs *regs) | 51 | static inline void |
52 | syscall_rollback(struct task_struct *task, struct pt_regs *regs) | ||
51 | { | 53 | { |
52 | regs->uregs[0] = regs->orig_r0; | 54 | regs->uregs[0] = regs->orig_r0; |
53 | } | 55 | } |
@@ -62,7 +64,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs) | |||
62 | * It's only valid to call this when @task is stopped for tracing on exit | 64 | * It's only valid to call this when @task is stopped for tracing on exit |
63 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 65 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
64 | */ | 66 | */ |
65 | long syscall_get_error(struct task_struct *task, struct pt_regs *regs) | 67 | static inline long |
68 | syscall_get_error(struct task_struct *task, struct pt_regs *regs) | ||
66 | { | 69 | { |
67 | unsigned long error = regs->uregs[0]; | 70 | unsigned long error = regs->uregs[0]; |
68 | return IS_ERR_VALUE(error) ? error : 0; | 71 | return IS_ERR_VALUE(error) ? error : 0; |
@@ -79,7 +82,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs) | |||
79 | * It's only valid to call this when @task is stopped for tracing on exit | 82 | * It's only valid to call this when @task is stopped for tracing on exit |
80 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 83 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
81 | */ | 84 | */ |
82 | long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) | 85 | static inline long |
86 | syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) | ||
83 | { | 87 | { |
84 | return regs->uregs[0]; | 88 | return regs->uregs[0]; |
85 | } | 89 | } |
@@ -99,8 +103,9 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) | |||
99 | * It's only valid to call this when @task is stopped for tracing on exit | 103 | * It's only valid to call this when @task is stopped for tracing on exit |
100 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 104 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
101 | */ | 105 | */ |
102 | void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | 106 | static inline void |
103 | int error, long val) | 107 | syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, |
108 | int error, long val) | ||
104 | { | 109 | { |
105 | regs->uregs[0] = (long)error ? error : val; | 110 | regs->uregs[0] = (long)error ? error : val; |
106 | } | 111 | } |
@@ -118,8 +123,9 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
118 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 123 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
119 | */ | 124 | */ |
120 | #define SYSCALL_MAX_ARGS 6 | 125 | #define SYSCALL_MAX_ARGS 6 |
121 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 126 | static inline void |
122 | unsigned long *args) | 127 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
128 | unsigned long *args) | ||
123 | { | 129 | { |
124 | args[0] = regs->orig_r0; | 130 | args[0] = regs->orig_r0; |
125 | args++; | 131 | args++; |
@@ -138,8 +144,9 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | |||
138 | * It's only valid to call this when @task is stopped for tracing on | 144 | * It's only valid to call this when @task is stopped for tracing on |
139 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 145 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
140 | */ | 146 | */ |
141 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 147 | static inline void |
142 | const unsigned long *args) | 148 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
149 | const unsigned long *args) | ||
143 | { | 150 | { |
144 | regs->orig_r0 = args[0]; | 151 | regs->orig_r0 = args[0]; |
145 | args++; | 152 | args++; |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 80757e43cf2c..00b127a5e09b 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h | |||
@@ -29,6 +29,13 @@ static inline void syscall_get_arguments(struct task_struct *tsk, | |||
29 | args[0] = regs->gr[26]; | 29 | args[0] = regs->gr[26]; |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline long syscall_get_error(struct task_struct *task, | ||
33 | struct pt_regs *regs) | ||
34 | { | ||
35 | unsigned long error = regs->gr[28]; | ||
36 | return IS_ERR_VALUE(error) ? error : 0; | ||
37 | } | ||
38 | |||
32 | static inline long syscall_get_return_value(struct task_struct *task, | 39 | static inline long syscall_get_return_value(struct task_struct *task, |
33 | struct pt_regs *regs) | 40 | struct pt_regs *regs) |
34 | { | 41 | { |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f516796dd819..d8dcd8820369 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -129,6 +129,7 @@ config PPC | |||
129 | select ARCH_HAS_MMIOWB if PPC64 | 129 | select ARCH_HAS_MMIOWB if PPC64 |
130 | select ARCH_HAS_PHYS_TO_DMA | 130 | select ARCH_HAS_PHYS_TO_DMA |
131 | select ARCH_HAS_PMEM_API if PPC64 | 131 | select ARCH_HAS_PMEM_API if PPC64 |
132 | select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 | ||
132 | select ARCH_HAS_PTE_SPECIAL | 133 | select ARCH_HAS_PTE_SPECIAL |
133 | select ARCH_HAS_MEMBARRIER_CALLBACKS | 134 | select ARCH_HAS_MEMBARRIER_CALLBACKS |
134 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 | 135 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 |
@@ -136,7 +137,6 @@ config PPC | |||
136 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 137 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
137 | select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 | 138 | select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 |
138 | select ARCH_HAS_UBSAN_SANITIZE_ALL | 139 | select ARCH_HAS_UBSAN_SANITIZE_ALL |
139 | select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64 | ||
140 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 140 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
141 | select ARCH_KEEP_MEMBLOCK | 141 | select ARCH_KEEP_MEMBLOCK |
142 | select ARCH_MIGHT_HAVE_PC_PARPORT | 142 | select ARCH_MIGHT_HAVE_PC_PARPORT |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 62e6ea0a7650..8308f32e9782 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -90,7 +90,6 @@ | |||
90 | #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ | 90 | #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ |
91 | #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ | 91 | #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ |
92 | #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ | 92 | #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ |
93 | #define __HAVE_ARCH_PTE_DEVMAP | ||
94 | 93 | ||
95 | /* | 94 | /* |
96 | * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE | 95 | * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE |
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 81abcf6a737b..38d62acfdce7 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
@@ -35,6 +35,16 @@ static inline void syscall_rollback(struct task_struct *task, | |||
35 | regs->gpr[3] = regs->orig_gpr3; | 35 | regs->gpr[3] = regs->orig_gpr3; |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline long syscall_get_error(struct task_struct *task, | ||
39 | struct pt_regs *regs) | ||
40 | { | ||
41 | /* | ||
42 | * If the system call failed, | ||
43 | * regs->gpr[3] contains a positive ERRORCODE. | ||
44 | */ | ||
45 | return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0; | ||
46 | } | ||
47 | |||
38 | static inline long syscall_get_return_value(struct task_struct *task, | 48 | static inline long syscall_get_return_value(struct task_struct *task, |
39 | struct pt_regs *regs) | 49 | struct pt_regs *regs) |
40 | { | 50 | { |
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index 65065ce32814..c0c737215b00 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h | |||
@@ -21,15 +21,11 @@ | |||
21 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | 21 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ |
22 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | 22 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ |
23 | 23 | ||
24 | |||
24 | #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ | 25 | #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ |
25 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ | 26 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ |
26 | #define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */ | 27 | #define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */ |
27 | 28 | ||
28 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
29 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
30 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
31 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
32 | |||
33 | /* Override any generic PKEY permission defines */ | 29 | /* Override any generic PKEY permission defines */ |
34 | #define PKEY_DISABLE_EXECUTE 0x4 | 30 | #define PKEY_DISABLE_EXECUTE 0x4 |
35 | #undef PKEY_ACCESS_MASK | 31 | #undef PKEY_ACCESS_MASK |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 5bf05cc774e2..e99a14798ab0 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/anon_inodes.h> | 19 | #include <linux/anon_inodes.h> |
20 | #include <linux/iommu.h> | 20 | #include <linux/iommu.h> |
21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
22 | #include <linux/mm.h> | ||
22 | 23 | ||
23 | #include <asm/kvm_ppc.h> | 24 | #include <asm/kvm_ppc.h> |
24 | #include <asm/kvm_book3s.h> | 25 | #include <asm/kvm_book3s.h> |
@@ -45,43 +46,6 @@ static unsigned long kvmppc_stt_pages(unsigned long tce_pages) | |||
45 | return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; | 46 | return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; |
46 | } | 47 | } |
47 | 48 | ||
48 | static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) | ||
49 | { | ||
50 | long ret = 0; | ||
51 | |||
52 | if (!current || !current->mm) | ||
53 | return ret; /* process exited */ | ||
54 | |||
55 | down_write(¤t->mm->mmap_sem); | ||
56 | |||
57 | if (inc) { | ||
58 | unsigned long locked, lock_limit; | ||
59 | |||
60 | locked = current->mm->locked_vm + stt_pages; | ||
61 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
62 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | ||
63 | ret = -ENOMEM; | ||
64 | else | ||
65 | current->mm->locked_vm += stt_pages; | ||
66 | } else { | ||
67 | if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) | ||
68 | stt_pages = current->mm->locked_vm; | ||
69 | |||
70 | current->mm->locked_vm -= stt_pages; | ||
71 | } | ||
72 | |||
73 | pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid, | ||
74 | inc ? '+' : '-', | ||
75 | stt_pages << PAGE_SHIFT, | ||
76 | current->mm->locked_vm << PAGE_SHIFT, | ||
77 | rlimit(RLIMIT_MEMLOCK), | ||
78 | ret ? " - exceeded" : ""); | ||
79 | |||
80 | up_write(¤t->mm->mmap_sem); | ||
81 | |||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) | 49 | static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) |
86 | { | 50 | { |
87 | struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, | 51 | struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, |
@@ -291,7 +255,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | |||
291 | 255 | ||
292 | kvm_put_kvm(stt->kvm); | 256 | kvm_put_kvm(stt->kvm); |
293 | 257 | ||
294 | kvmppc_account_memlimit( | 258 | account_locked_vm(current->mm, |
295 | kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); | 259 | kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); |
296 | call_rcu(&stt->rcu, release_spapr_tce_table); | 260 | call_rcu(&stt->rcu, release_spapr_tce_table); |
297 | 261 | ||
@@ -316,7 +280,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |||
316 | return -EINVAL; | 280 | return -EINVAL; |
317 | 281 | ||
318 | npages = kvmppc_tce_pages(size); | 282 | npages = kvmppc_tce_pages(size); |
319 | ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); | 283 | ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true); |
320 | if (ret) | 284 | if (ret) |
321 | return ret; | 285 | return ret; |
322 | 286 | ||
@@ -362,7 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |||
362 | 326 | ||
363 | kfree(stt); | 327 | kfree(stt); |
364 | fail_acct: | 328 | fail_acct: |
365 | kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); | 329 | account_locked_vm(current->mm, kvmppc_stt_pages(npages), false); |
366 | return ret; | 330 | return ret; |
367 | } | 331 | } |
368 | 332 | ||
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 90ee3a89722c..b056cae3388b 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/hugetlb.h> | 14 | #include <linux/hugetlb.h> |
15 | #include <linux/swap.h> | 15 | #include <linux/swap.h> |
16 | #include <linux/sizes.h> | 16 | #include <linux/sizes.h> |
17 | #include <linux/mm.h> | ||
17 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
18 | #include <asm/pte-walk.h> | 19 | #include <asm/pte-walk.h> |
19 | #include <linux/mm_inline.h> | 20 | #include <linux/mm_inline.h> |
@@ -46,40 +47,6 @@ struct mm_iommu_table_group_mem_t { | |||
46 | u64 dev_hpa; /* Device memory base address */ | 47 | u64 dev_hpa; /* Device memory base address */ |
47 | }; | 48 | }; |
48 | 49 | ||
49 | static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | ||
50 | unsigned long npages, bool incr) | ||
51 | { | ||
52 | long ret = 0, locked, lock_limit; | ||
53 | |||
54 | if (!npages) | ||
55 | return 0; | ||
56 | |||
57 | down_write(&mm->mmap_sem); | ||
58 | |||
59 | if (incr) { | ||
60 | locked = mm->locked_vm + npages; | ||
61 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
62 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | ||
63 | ret = -ENOMEM; | ||
64 | else | ||
65 | mm->locked_vm += npages; | ||
66 | } else { | ||
67 | if (WARN_ON_ONCE(npages > mm->locked_vm)) | ||
68 | npages = mm->locked_vm; | ||
69 | mm->locked_vm -= npages; | ||
70 | } | ||
71 | |||
72 | pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", | ||
73 | current ? current->pid : 0, | ||
74 | incr ? '+' : '-', | ||
75 | npages << PAGE_SHIFT, | ||
76 | mm->locked_vm << PAGE_SHIFT, | ||
77 | rlimit(RLIMIT_MEMLOCK)); | ||
78 | up_write(&mm->mmap_sem); | ||
79 | |||
80 | return ret; | ||
81 | } | ||
82 | |||
83 | bool mm_iommu_preregistered(struct mm_struct *mm) | 50 | bool mm_iommu_preregistered(struct mm_struct *mm) |
84 | { | 51 | { |
85 | return !list_empty(&mm->context.iommu_group_mem_list); | 52 | return !list_empty(&mm->context.iommu_group_mem_list); |
@@ -96,7 +63,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, | |||
96 | unsigned long entry, chunk; | 63 | unsigned long entry, chunk; |
97 | 64 | ||
98 | if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { | 65 | if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { |
99 | ret = mm_iommu_adjust_locked_vm(mm, entries, true); | 66 | ret = account_locked_vm(mm, entries, true); |
100 | if (ret) | 67 | if (ret) |
101 | return ret; | 68 | return ret; |
102 | 69 | ||
@@ -211,7 +178,7 @@ free_exit: | |||
211 | kfree(mem); | 178 | kfree(mem); |
212 | 179 | ||
213 | unlock_exit: | 180 | unlock_exit: |
214 | mm_iommu_adjust_locked_vm(mm, locked_entries, false); | 181 | account_locked_vm(mm, locked_entries, false); |
215 | 182 | ||
216 | return ret; | 183 | return ret; |
217 | } | 184 | } |
@@ -311,7 +278,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) | |||
311 | unlock_exit: | 278 | unlock_exit: |
312 | mutex_unlock(&mem_list_mutex); | 279 | mutex_unlock(&mem_list_mutex); |
313 | 280 | ||
314 | mm_iommu_adjust_locked_vm(mm, unlock_entries, false); | 281 | account_locked_vm(mm, unlock_entries, false); |
315 | 282 | ||
316 | return ret; | 283 | return ret; |
317 | } | 284 | } |
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 65c2ba1e1783..b4ca9e95e678 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c | |||
@@ -1237,3 +1237,8 @@ int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, | |||
1237 | return 0; | 1237 | return 0; |
1238 | } | 1238 | } |
1239 | } | 1239 | } |
1240 | |||
1241 | int __init arch_ioremap_p4d_supported(void) | ||
1242 | { | ||
1243 | return 0; | ||
1244 | } | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d989592b6fc8..8432c281de92 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -42,26 +42,6 @@ | |||
42 | #include <asm/debug.h> | 42 | #include <asm/debug.h> |
43 | #include <asm/kup.h> | 43 | #include <asm/kup.h> |
44 | 44 | ||
45 | static inline bool notify_page_fault(struct pt_regs *regs) | ||
46 | { | ||
47 | bool ret = false; | ||
48 | |||
49 | #ifdef CONFIG_KPROBES | ||
50 | /* kprobe_running() needs smp_processor_id() */ | ||
51 | if (!user_mode(regs)) { | ||
52 | preempt_disable(); | ||
53 | if (kprobe_running() && kprobe_fault_handler(regs, 11)) | ||
54 | ret = true; | ||
55 | preempt_enable(); | ||
56 | } | ||
57 | #endif /* CONFIG_KPROBES */ | ||
58 | |||
59 | if (unlikely(debugger_fault_handler(regs))) | ||
60 | ret = true; | ||
61 | |||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | /* | 45 | /* |
66 | * Check whether the instruction inst is a store using | 46 | * Check whether the instruction inst is a store using |
67 | * an update addressing form which will update r1. | 47 | * an update addressing form which will update r1. |
@@ -461,8 +441,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, | |||
461 | int is_write = page_fault_is_write(error_code); | 441 | int is_write = page_fault_is_write(error_code); |
462 | vm_fault_t fault, major = 0; | 442 | vm_fault_t fault, major = 0; |
463 | bool must_retry = false; | 443 | bool must_retry = false; |
444 | bool kprobe_fault = kprobe_page_fault(regs, 11); | ||
464 | 445 | ||
465 | if (notify_page_fault(regs)) | 446 | if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) |
466 | return 0; | 447 | return 0; |
467 | 448 | ||
468 | if (unlikely(page_fault_is_bad(error_code))) { | 449 | if (unlikely(page_fault_is_bad(error_code))) { |
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 0cf6b53587db..60f907516335 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h | |||
@@ -8,27 +8,27 @@ | |||
8 | #ifndef __ASM_CTL_REG_H | 8 | #ifndef __ASM_CTL_REG_H |
9 | #define __ASM_CTL_REG_H | 9 | #define __ASM_CTL_REG_H |
10 | 10 | ||
11 | #include <linux/const.h> | 11 | #include <linux/bits.h> |
12 | 12 | ||
13 | #define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10) | 13 | #define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) |
14 | #define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49) | 14 | #define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) |
15 | #define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50) | 15 | #define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) |
16 | #define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52) | 16 | #define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) |
17 | #define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53) | 17 | #define CR0_CPU_TIMER_SUBMASK BIT(63 - 53) |
18 | #define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54) | 18 | #define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54) |
19 | #define CR0_UNUSED_56 _BITUL(63 - 56) | 19 | #define CR0_UNUSED_56 BIT(63 - 56) |
20 | #define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57) | 20 | #define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57) |
21 | #define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58) | 21 | #define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58) |
22 | 22 | ||
23 | #define CR2_GUARDED_STORAGE _BITUL(63 - 59) | 23 | #define CR2_GUARDED_STORAGE BIT(63 - 59) |
24 | 24 | ||
25 | #define CR14_UNUSED_32 _BITUL(63 - 32) | 25 | #define CR14_UNUSED_32 BIT(63 - 32) |
26 | #define CR14_UNUSED_33 _BITUL(63 - 33) | 26 | #define CR14_UNUSED_33 BIT(63 - 33) |
27 | #define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35) | 27 | #define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35) |
28 | #define CR14_RECOVERY_SUBMASK _BITUL(63 - 36) | 28 | #define CR14_RECOVERY_SUBMASK BIT(63 - 36) |
29 | #define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37) | 29 | #define CR14_DEGRADATION_SUBMASK BIT(63 - 37) |
30 | #define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38) | 30 | #define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38) |
31 | #define CR14_WARNING_SUBMASK _BITUL(63 - 39) | 31 | #define CR14_WARNING_SUBMASK BIT(63 - 39) |
32 | 32 | ||
33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
34 | 34 | ||
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 1e5dc4537bf2..b160da8fa14b 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #ifndef _ASM_S390_NMI_H | 12 | #ifndef _ASM_S390_NMI_H |
13 | #define _ASM_S390_NMI_H | 13 | #define _ASM_S390_NMI_H |
14 | 14 | ||
15 | #include <linux/const.h> | 15 | #include <linux/bits.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | #define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \ | 18 | #define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \ |
@@ -20,15 +20,15 @@ | |||
20 | 1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \ | 20 | 1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \ |
21 | 1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \ | 21 | 1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \ |
22 | 1ULL<<45 | 1ULL<<44) | 22 | 1ULL<<45 | 1ULL<<44) |
23 | #define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63) | 23 | #define MCCK_CODE_SYSTEM_DAMAGE BIT(63) |
24 | #define MCCK_CODE_EXT_DAMAGE _BITUL(63 - 5) | 24 | #define MCCK_CODE_EXT_DAMAGE BIT(63 - 5) |
25 | #define MCCK_CODE_CP _BITUL(63 - 9) | 25 | #define MCCK_CODE_CP BIT(63 - 9) |
26 | #define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46) | 26 | #define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) |
27 | #define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20) | 27 | #define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20) |
28 | #define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23) | 28 | #define MCCK_CODE_PSW_IA_VALID BIT(63 - 23) |
29 | #define MCCK_CODE_CR_VALID _BITUL(63 - 29) | 29 | #define MCCK_CODE_CR_VALID BIT(63 - 29) |
30 | #define MCCK_CODE_GS_VALID _BITUL(63 - 36) | 30 | #define MCCK_CODE_GS_VALID BIT(63 - 36) |
31 | #define MCCK_CODE_FC_VALID _BITUL(63 - 43) | 31 | #define MCCK_CODE_FC_VALID BIT(63 - 43) |
32 | 32 | ||
33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
34 | 34 | ||
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 14883b1562e0..d56c519bc696 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #ifndef __ASM_S390_PROCESSOR_H | 12 | #ifndef __ASM_S390_PROCESSOR_H |
13 | #define __ASM_S390_PROCESSOR_H | 13 | #define __ASM_S390_PROCESSOR_H |
14 | 14 | ||
15 | #include <linux/const.h> | 15 | #include <linux/bits.h> |
16 | 16 | ||
17 | #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ | 17 | #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ |
18 | #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ | 18 | #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ |
@@ -24,15 +24,15 @@ | |||
24 | #define CIF_MCCK_GUEST 7 /* machine check happening in guest */ | 24 | #define CIF_MCCK_GUEST 7 /* machine check happening in guest */ |
25 | #define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */ | 25 | #define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */ |
26 | 26 | ||
27 | #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) | 27 | #define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING) |
28 | #define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY) | 28 | #define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY) |
29 | #define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY) | 29 | #define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY) |
30 | #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) | 30 | #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) |
31 | #define _CIF_FPU _BITUL(CIF_FPU) | 31 | #define _CIF_FPU BIT(CIF_FPU) |
32 | #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) | 32 | #define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ) |
33 | #define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT) | 33 | #define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) |
34 | #define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST) | 34 | #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) |
35 | #define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU) | 35 | #define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU) |
36 | 36 | ||
37 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
38 | 38 | ||
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 6f70d81c40f2..f009a13afe71 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #ifndef _S390_PTRACE_H | 7 | #ifndef _S390_PTRACE_H |
8 | #define _S390_PTRACE_H | 8 | #define _S390_PTRACE_H |
9 | 9 | ||
10 | #include <linux/const.h> | 10 | #include <linux/bits.h> |
11 | #include <uapi/asm/ptrace.h> | 11 | #include <uapi/asm/ptrace.h> |
12 | 12 | ||
13 | #define PIF_SYSCALL 0 /* inside a system call */ | 13 | #define PIF_SYSCALL 0 /* inside a system call */ |
@@ -15,10 +15,10 @@ | |||
15 | #define PIF_SYSCALL_RESTART 2 /* restart the current system call */ | 15 | #define PIF_SYSCALL_RESTART 2 /* restart the current system call */ |
16 | #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ | 16 | #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ |
17 | 17 | ||
18 | #define _PIF_SYSCALL _BITUL(PIF_SYSCALL) | 18 | #define _PIF_SYSCALL BIT(PIF_SYSCALL) |
19 | #define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) | 19 | #define _PIF_PER_TRAP BIT(PIF_PER_TRAP) |
20 | #define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART) | 20 | #define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART) |
21 | #define _PIF_GUEST_FAULT _BITUL(PIF_GUEST_FAULT) | 21 | #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) |
22 | 22 | ||
23 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
24 | 24 | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 925889d360c1..82deb8fc8319 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #ifndef _ASM_S390_SETUP_H | 6 | #ifndef _ASM_S390_SETUP_H |
7 | #define _ASM_S390_SETUP_H | 7 | #define _ASM_S390_SETUP_H |
8 | 8 | ||
9 | #include <linux/const.h> | 9 | #include <linux/bits.h> |
10 | #include <uapi/asm/setup.h> | 10 | #include <uapi/asm/setup.h> |
11 | 11 | ||
12 | #define EP_OFFSET 0x10008 | 12 | #define EP_OFFSET 0x10008 |
@@ -21,25 +21,25 @@ | |||
21 | * Machine features detected in early.c | 21 | * Machine features detected in early.c |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define MACHINE_FLAG_VM _BITUL(0) | 24 | #define MACHINE_FLAG_VM BIT(0) |
25 | #define MACHINE_FLAG_KVM _BITUL(1) | 25 | #define MACHINE_FLAG_KVM BIT(1) |
26 | #define MACHINE_FLAG_LPAR _BITUL(2) | 26 | #define MACHINE_FLAG_LPAR BIT(2) |
27 | #define MACHINE_FLAG_DIAG9C _BITUL(3) | 27 | #define MACHINE_FLAG_DIAG9C BIT(3) |
28 | #define MACHINE_FLAG_ESOP _BITUL(4) | 28 | #define MACHINE_FLAG_ESOP BIT(4) |
29 | #define MACHINE_FLAG_IDTE _BITUL(5) | 29 | #define MACHINE_FLAG_IDTE BIT(5) |
30 | #define MACHINE_FLAG_DIAG44 _BITUL(6) | 30 | #define MACHINE_FLAG_DIAG44 BIT(6) |
31 | #define MACHINE_FLAG_EDAT1 _BITUL(7) | 31 | #define MACHINE_FLAG_EDAT1 BIT(7) |
32 | #define MACHINE_FLAG_EDAT2 _BITUL(8) | 32 | #define MACHINE_FLAG_EDAT2 BIT(8) |
33 | #define MACHINE_FLAG_TOPOLOGY _BITUL(10) | 33 | #define MACHINE_FLAG_TOPOLOGY BIT(10) |
34 | #define MACHINE_FLAG_TE _BITUL(11) | 34 | #define MACHINE_FLAG_TE BIT(11) |
35 | #define MACHINE_FLAG_TLB_LC _BITUL(12) | 35 | #define MACHINE_FLAG_TLB_LC BIT(12) |
36 | #define MACHINE_FLAG_VX _BITUL(13) | 36 | #define MACHINE_FLAG_VX BIT(13) |
37 | #define MACHINE_FLAG_TLB_GUEST _BITUL(14) | 37 | #define MACHINE_FLAG_TLB_GUEST BIT(14) |
38 | #define MACHINE_FLAG_NX _BITUL(15) | 38 | #define MACHINE_FLAG_NX BIT(15) |
39 | #define MACHINE_FLAG_GS _BITUL(16) | 39 | #define MACHINE_FLAG_GS BIT(16) |
40 | #define MACHINE_FLAG_SCC _BITUL(17) | 40 | #define MACHINE_FLAG_SCC BIT(17) |
41 | 41 | ||
42 | #define LPP_MAGIC _BITUL(31) | 42 | #define LPP_MAGIC BIT(31) |
43 | #define LPP_PID_MASK _AC(0xffffffff, UL) | 43 | #define LPP_PID_MASK _AC(0xffffffff, UL) |
44 | 44 | ||
45 | /* Offsets to entry points in kernel/head.S */ | 45 | /* Offsets to entry points in kernel/head.S */ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ce4e17c9aad6..e582fbe59e20 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_THREAD_INFO_H | 8 | #ifndef _ASM_THREAD_INFO_H |
9 | #define _ASM_THREAD_INFO_H | 9 | #define _ASM_THREAD_INFO_H |
10 | 10 | ||
11 | #include <linux/const.h> | 11 | #include <linux/bits.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * General size of kernel stacks | 14 | * General size of kernel stacks |
@@ -82,21 +82,21 @@ void arch_setup_new_exec(void); | |||
82 | #define TIF_SECCOMP 26 /* secure computing */ | 82 | #define TIF_SECCOMP 26 /* secure computing */ |
83 | #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ | 83 | #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ |
84 | 84 | ||
85 | #define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) | 85 | #define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) |
86 | #define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) | 86 | #define _TIF_SIGPENDING BIT(TIF_SIGPENDING) |
87 | #define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) | 87 | #define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) |
88 | #define _TIF_UPROBE _BITUL(TIF_UPROBE) | 88 | #define _TIF_UPROBE BIT(TIF_UPROBE) |
89 | #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) | 89 | #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) |
90 | #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) | 90 | #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) |
91 | #define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP) | 91 | #define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP) |
92 | #define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST) | 92 | #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) |
93 | 93 | ||
94 | #define _TIF_31BIT _BITUL(TIF_31BIT) | 94 | #define _TIF_31BIT BIT(TIF_31BIT) |
95 | #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) | 95 | #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) |
96 | 96 | ||
97 | #define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) | 97 | #define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE) |
98 | #define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) | 98 | #define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT) |
99 | #define _TIF_SECCOMP _BITUL(TIF_SECCOMP) | 99 | #define _TIF_SECCOMP BIT(TIF_SECCOMP) |
100 | #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) | 100 | #define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT) |
101 | 101 | ||
102 | #endif /* _ASM_THREAD_INFO_H */ | 102 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 0ba174f779da..63507662828f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -67,20 +67,6 @@ static int __init fault_init(void) | |||
67 | } | 67 | } |
68 | early_initcall(fault_init); | 68 | early_initcall(fault_init); |
69 | 69 | ||
70 | static inline int notify_page_fault(struct pt_regs *regs) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | |||
74 | /* kprobe_running() needs smp_processor_id() */ | ||
75 | if (kprobes_built_in() && !user_mode(regs)) { | ||
76 | preempt_disable(); | ||
77 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | ||
78 | ret = 1; | ||
79 | preempt_enable(); | ||
80 | } | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | /* | 70 | /* |
85 | * Find out which address space caused the exception. | 71 | * Find out which address space caused the exception. |
86 | */ | 72 | */ |
@@ -412,7 +398,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) | |||
412 | */ | 398 | */ |
413 | clear_pt_regs_flag(regs, PIF_PER_TRAP); | 399 | clear_pt_regs_flag(regs, PIF_PER_TRAP); |
414 | 400 | ||
415 | if (notify_page_fault(regs)) | 401 | if (kprobe_page_fault(regs, 14)) |
416 | return 0; | 402 | return 0; |
417 | 403 | ||
418 | mm = tsk->mm; | 404 | mm = tsk->mm; |
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 3093bc372138..5f51456f4fc7 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -24,20 +24,6 @@ | |||
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/traps.h> | 25 | #include <asm/traps.h> |
26 | 26 | ||
27 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
28 | { | ||
29 | int ret = 0; | ||
30 | |||
31 | if (kprobes_built_in() && !user_mode(regs)) { | ||
32 | preempt_disable(); | ||
33 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | ||
34 | ret = 1; | ||
35 | preempt_enable(); | ||
36 | } | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | |||
41 | static void | 27 | static void |
42 | force_sig_info_fault(int si_signo, int si_code, unsigned long address) | 28 | force_sig_info_fault(int si_signo, int si_code, unsigned long address) |
43 | { | 29 | { |
@@ -412,14 +398,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
412 | if (unlikely(fault_in_kernel_space(address))) { | 398 | if (unlikely(fault_in_kernel_space(address))) { |
413 | if (vmalloc_fault(address) >= 0) | 399 | if (vmalloc_fault(address) >= 0) |
414 | return; | 400 | return; |
415 | if (notify_page_fault(regs, vec)) | 401 | if (kprobe_page_fault(regs, vec)) |
416 | return; | 402 | return; |
417 | 403 | ||
418 | bad_area_nosemaphore(regs, error_code, address); | 404 | bad_area_nosemaphore(regs, error_code, address); |
419 | return; | 405 | return; |
420 | } | 406 | } |
421 | 407 | ||
422 | if (unlikely(notify_page_fault(regs, vec))) | 408 | if (unlikely(kprobe_page_fault(regs, vec))) |
423 | return; | 409 | return; |
424 | 410 | ||
425 | /* Only enable interrupts if they were on before the fault */ | 411 | /* Only enable interrupts if they were on before the fault */ |
diff --git a/arch/sparc/include/uapi/asm/mman.h b/arch/sparc/include/uapi/asm/mman.h index f6f99ec65bb3..cec9f4109687 100644 --- a/arch/sparc/include/uapi/asm/mman.h +++ b/arch/sparc/include/uapi/asm/mman.h | |||
@@ -22,10 +22,4 @@ | |||
22 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ | 22 | #define MCL_FUTURE 0x4000 /* lock all additions to address space */ |
23 | #define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */ | 23 | #define MCL_ONFAULT 0x8000 /* lock all pages that are faulted in */ |
24 | 24 | ||
25 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
26 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
27 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
28 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
29 | |||
30 | |||
31 | #endif /* _UAPI__SPARC_MMAN_H__ */ | 25 | #endif /* _UAPI__SPARC_MMAN_H__ */ |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 83fda4d9c3b2..2371fb6b97e4 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -38,20 +38,6 @@ | |||
38 | 38 | ||
39 | int show_unhandled_signals = 1; | 39 | int show_unhandled_signals = 1; |
40 | 40 | ||
41 | static inline __kprobes int notify_page_fault(struct pt_regs *regs) | ||
42 | { | ||
43 | int ret = 0; | ||
44 | |||
45 | /* kprobe_running() needs smp_processor_id() */ | ||
46 | if (kprobes_built_in() && !user_mode(regs)) { | ||
47 | preempt_disable(); | ||
48 | if (kprobe_running() && kprobe_fault_handler(regs, 0)) | ||
49 | ret = 1; | ||
50 | preempt_enable(); | ||
51 | } | ||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | static void __kprobes unhandled_fault(unsigned long address, | 41 | static void __kprobes unhandled_fault(unsigned long address, |
56 | struct task_struct *tsk, | 42 | struct task_struct *tsk, |
57 | struct pt_regs *regs) | 43 | struct pt_regs *regs) |
@@ -285,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
285 | 271 | ||
286 | fault_code = get_thread_fault_code(); | 272 | fault_code = get_thread_fault_code(); |
287 | 273 | ||
288 | if (notify_page_fault(regs)) | 274 | if (kprobe_page_fault(regs, 0)) |
289 | goto exit_exception; | 275 | goto exit_exception; |
290 | 276 | ||
291 | si_code = SEGV_MAPERR; | 277 | si_code = SEGV_MAPERR; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 57d3b5d96bd2..1342654e8057 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -70,6 +70,7 @@ config X86 | |||
70 | select ARCH_HAS_KCOV if X86_64 | 70 | select ARCH_HAS_KCOV if X86_64 |
71 | select ARCH_HAS_MEMBARRIER_SYNC_CORE | 71 | select ARCH_HAS_MEMBARRIER_SYNC_CORE |
72 | select ARCH_HAS_PMEM_API if X86_64 | 72 | select ARCH_HAS_PMEM_API if X86_64 |
73 | select ARCH_HAS_PTE_DEVMAP if X86_64 | ||
73 | select ARCH_HAS_PTE_SPECIAL | 74 | select ARCH_HAS_PTE_SPECIAL |
74 | select ARCH_HAS_REFCOUNT | 75 | select ARCH_HAS_REFCOUNT |
75 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 | 76 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 |
@@ -80,7 +81,6 @@ config X86 | |||
80 | select ARCH_HAS_STRICT_MODULE_RWX | 81 | select ARCH_HAS_STRICT_MODULE_RWX |
81 | select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE | 82 | select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE |
82 | select ARCH_HAS_UBSAN_SANITIZE_ALL | 83 | select ARCH_HAS_UBSAN_SANITIZE_ALL |
83 | select ARCH_HAS_ZONE_DEVICE if X86_64 | ||
84 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 84 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
85 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI | 85 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI |
86 | select ARCH_MIGHT_HAVE_PC_PARPORT | 86 | select ARCH_MIGHT_HAVE_PC_PARPORT |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index a06a9f8294ea..6bed97ff6db2 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -165,7 +165,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) | |||
165 | { | 165 | { |
166 | return (unsigned int)virt_to_phys(address); | 166 | return (unsigned int)virt_to_phys(address); |
167 | } | 167 | } |
168 | #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) | ||
169 | #define isa_bus_to_virt phys_to_virt | 168 | #define isa_bus_to_virt phys_to_virt |
170 | 169 | ||
171 | /* | 170 | /* |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5e0509b41986..0bc530c4eb13 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -271,7 +271,7 @@ static inline int has_transparent_hugepage(void) | |||
271 | return boot_cpu_has(X86_FEATURE_PSE); | 271 | return boot_cpu_has(X86_FEATURE_PSE); |
272 | } | 272 | } |
273 | 273 | ||
274 | #ifdef __HAVE_ARCH_PTE_DEVMAP | 274 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP |
275 | static inline int pmd_devmap(pmd_t pmd) | 275 | static inline int pmd_devmap(pmd_t pmd) |
276 | { | 276 | { |
277 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); | 277 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); |
@@ -732,7 +732,7 @@ static inline int pte_present(pte_t a) | |||
732 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); | 732 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); |
733 | } | 733 | } |
734 | 734 | ||
735 | #ifdef __HAVE_ARCH_PTE_DEVMAP | 735 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP |
736 | static inline int pte_devmap(pte_t a) | 736 | static inline int pte_devmap(pte_t a) |
737 | { | 737 | { |
738 | return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; | 738 | return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index d6ff0bbdb394..b5e49e6bac63 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -103,7 +103,6 @@ | |||
103 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 103 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
104 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 104 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
105 | #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) | 105 | #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) |
106 | #define __HAVE_ARCH_PTE_DEVMAP | ||
107 | #else | 106 | #else |
108 | #define _PAGE_NX (_AT(pteval_t, 0)) | 107 | #define _PAGE_NX (_AT(pteval_t, 0)) |
109 | #define _PAGE_DEVMAP (_AT(pteval_t, 0)) | 108 | #define _PAGE_DEVMAP (_AT(pteval_t, 0)) |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 794f364cb882..d1634c59ed56 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr) | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static nokprobe_inline int kprobes_fault(struct pt_regs *regs) | ||
50 | { | ||
51 | if (!kprobes_built_in()) | ||
52 | return 0; | ||
53 | if (user_mode(regs)) | ||
54 | return 0; | ||
55 | /* | ||
56 | * To be potentially processing a kprobe fault and to be allowed to call | ||
57 | * kprobe_running(), we have to be non-preemptible. | ||
58 | */ | ||
59 | if (preemptible()) | ||
60 | return 0; | ||
61 | if (!kprobe_running()) | ||
62 | return 0; | ||
63 | return kprobe_fault_handler(regs, X86_TRAP_PF); | ||
64 | } | ||
65 | |||
66 | /* | 49 | /* |
67 | * Prefetch quirks: | 50 | * Prefetch quirks: |
68 | * | 51 | * |
@@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, | |||
1282 | return; | 1265 | return; |
1283 | 1266 | ||
1284 | /* kprobes don't want to hook the spurious faults: */ | 1267 | /* kprobes don't want to hook the spurious faults: */ |
1285 | if (kprobes_fault(regs)) | 1268 | if (kprobe_page_fault(regs, X86_TRAP_PF)) |
1286 | return; | 1269 | return; |
1287 | 1270 | ||
1288 | /* | 1271 | /* |
@@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs, | |||
1313 | mm = tsk->mm; | 1296 | mm = tsk->mm; |
1314 | 1297 | ||
1315 | /* kprobes don't want to hook the spurious faults: */ | 1298 | /* kprobes don't want to hook the spurious faults: */ |
1316 | if (unlikely(kprobes_fault(regs))) | 1299 | if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF))) |
1317 | return; | 1300 | return; |
1318 | 1301 | ||
1319 | /* | 1302 | /* |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index e500f1df1140..63e99f15d7cf 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -459,6 +459,11 @@ void iounmap(volatile void __iomem *addr) | |||
459 | } | 459 | } |
460 | EXPORT_SYMBOL(iounmap); | 460 | EXPORT_SYMBOL(iounmap); |
461 | 461 | ||
462 | int __init arch_ioremap_p4d_supported(void) | ||
463 | { | ||
464 | return 0; | ||
465 | } | ||
466 | |||
462 | int __init arch_ioremap_pud_supported(void) | 467 | int __init arch_ioremap_pud_supported(void) |
463 | { | 468 | { |
464 | #ifdef CONFIG_X86_64 | 469 | #ifdef CONFIG_X86_64 |
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index be726062412b..ebbb48842190 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h | |||
@@ -56,12 +56,8 @@ | |||
56 | #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ | 56 | #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ |
57 | #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ | 57 | #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ |
58 | #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ | 58 | #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ |
59 | #ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED | 59 | #define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be |
60 | # define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be | ||
61 | * uninitialized */ | 60 | * uninitialized */ |
62 | #else | ||
63 | # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ | ||
64 | #endif | ||
65 | 61 | ||
66 | /* | 62 | /* |
67 | * Flags for msync | 63 | * Flags for msync |
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index c915889d1769..6ccca3b890d6 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h | |||
@@ -43,6 +43,7 @@ struct dax_region { | |||
43 | * @target_node: effective numa node if dev_dax memory range is onlined | 43 | * @target_node: effective numa node if dev_dax memory range is onlined |
44 | * @dev - device core | 44 | * @dev - device core |
45 | * @pgmap - pgmap for memmap setup / lifetime (driver owned) | 45 | * @pgmap - pgmap for memmap setup / lifetime (driver owned) |
46 | * @dax_mem_res: physical address range of hotadded DAX memory | ||
46 | */ | 47 | */ |
47 | struct dev_dax { | 48 | struct dev_dax { |
48 | struct dax_region *region; | 49 | struct dax_region *region; |
@@ -50,6 +51,7 @@ struct dev_dax { | |||
50 | int target_node; | 51 | int target_node; |
51 | struct device dev; | 52 | struct device dev; |
52 | struct dev_pagemap pgmap; | 53 | struct dev_pagemap pgmap; |
54 | struct resource *dax_kmem_res; | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | static inline struct dev_dax *to_dev_dax(struct device *dev) | 57 | static inline struct dev_dax *to_dev_dax(struct device *dev) |
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index a02318c6d28a..3d0a7e702c94 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c | |||
@@ -66,23 +66,59 @@ int dev_dax_kmem_probe(struct device *dev) | |||
66 | new_res->name = dev_name(dev); | 66 | new_res->name = dev_name(dev); |
67 | 67 | ||
68 | rc = add_memory(numa_node, new_res->start, resource_size(new_res)); | 68 | rc = add_memory(numa_node, new_res->start, resource_size(new_res)); |
69 | if (rc) | 69 | if (rc) { |
70 | release_resource(new_res); | ||
71 | kfree(new_res); | ||
70 | return rc; | 72 | return rc; |
73 | } | ||
74 | dev_dax->dax_kmem_res = new_res; | ||
71 | 75 | ||
72 | return 0; | 76 | return 0; |
73 | } | 77 | } |
74 | 78 | ||
79 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
80 | static int dev_dax_kmem_remove(struct device *dev) | ||
81 | { | ||
82 | struct dev_dax *dev_dax = to_dev_dax(dev); | ||
83 | struct resource *res = dev_dax->dax_kmem_res; | ||
84 | resource_size_t kmem_start = res->start; | ||
85 | resource_size_t kmem_size = resource_size(res); | ||
86 | int rc; | ||
87 | |||
88 | /* | ||
89 | * We have one shot for removing memory, if some memory blocks were not | ||
90 | * offline prior to calling this function remove_memory() will fail, and | ||
91 | * there is no way to hotremove this memory until reboot because device | ||
92 | * unbind will succeed even if we return failure. | ||
93 | */ | ||
94 | rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size); | ||
95 | if (rc) { | ||
96 | dev_err(dev, | ||
97 | "DAX region %pR cannot be hotremoved until the next reboot\n", | ||
98 | res); | ||
99 | return rc; | ||
100 | } | ||
101 | |||
102 | /* Release and free dax resources */ | ||
103 | release_resource(res); | ||
104 | kfree(res); | ||
105 | dev_dax->dax_kmem_res = NULL; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | #else | ||
75 | static int dev_dax_kmem_remove(struct device *dev) | 110 | static int dev_dax_kmem_remove(struct device *dev) |
76 | { | 111 | { |
77 | /* | 112 | /* |
78 | * Purposely leak the request_mem_region() for the device-dax | 113 | * Without hotremove purposely leak the request_mem_region() for the |
79 | * range and return '0' to ->remove() attempts. The removal of | 114 | * device-dax range and return '0' to ->remove() attempts. The removal |
80 | * the device from the driver always succeeds, but the region | 115 | * of the device from the driver always succeeds, but the region is |
81 | * is permanently pinned as reserved by the unreleased | 116 | * permanently pinned as reserved by the unreleased |
82 | * request_mem_region(). | 117 | * request_mem_region(). |
83 | */ | 118 | */ |
84 | return 0; | 119 | return 0; |
85 | } | 120 | } |
121 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | ||
86 | 122 | ||
87 | static struct dax_device_driver device_dax_kmem_driver = { | 123 | static struct dax_device_driver device_dax_kmem_driver = { |
88 | .drv = { | 124 | .drv = { |
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index dcd80b088c7b..62f924489db5 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/sched/signal.h> | 13 | #include <linux/sched/signal.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/mm.h> | ||
15 | 16 | ||
16 | #include "dfl-afu.h" | 17 | #include "dfl-afu.h" |
17 | 18 | ||
@@ -32,52 +33,6 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata) | |||
32 | } | 33 | } |
33 | 34 | ||
34 | /** | 35 | /** |
35 | * afu_dma_adjust_locked_vm - adjust locked memory | ||
36 | * @dev: port device | ||
37 | * @npages: number of pages | ||
38 | * @incr: increase or decrease locked memory | ||
39 | * | ||
40 | * Increase or decrease the locked memory size with npages input. | ||
41 | * | ||
42 | * Return 0 on success. | ||
43 | * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK. | ||
44 | */ | ||
45 | static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) | ||
46 | { | ||
47 | unsigned long locked, lock_limit; | ||
48 | int ret = 0; | ||
49 | |||
50 | /* the task is exiting. */ | ||
51 | if (!current->mm) | ||
52 | return 0; | ||
53 | |||
54 | down_write(¤t->mm->mmap_sem); | ||
55 | |||
56 | if (incr) { | ||
57 | locked = current->mm->locked_vm + npages; | ||
58 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
59 | |||
60 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | ||
61 | ret = -ENOMEM; | ||
62 | else | ||
63 | current->mm->locked_vm += npages; | ||
64 | } else { | ||
65 | if (WARN_ON_ONCE(npages > current->mm->locked_vm)) | ||
66 | npages = current->mm->locked_vm; | ||
67 | current->mm->locked_vm -= npages; | ||
68 | } | ||
69 | |||
70 | dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, | ||
71 | incr ? '+' : '-', npages << PAGE_SHIFT, | ||
72 | current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), | ||
73 | ret ? "- exceeded" : ""); | ||
74 | |||
75 | up_write(¤t->mm->mmap_sem); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * afu_dma_pin_pages - pin pages of given dma memory region | 36 | * afu_dma_pin_pages - pin pages of given dma memory region |
82 | * @pdata: feature device platform data | 37 | * @pdata: feature device platform data |
83 | * @region: dma memory region to be pinned | 38 | * @region: dma memory region to be pinned |
@@ -92,7 +47,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, | |||
92 | struct device *dev = &pdata->dev->dev; | 47 | struct device *dev = &pdata->dev->dev; |
93 | int ret, pinned; | 48 | int ret, pinned; |
94 | 49 | ||
95 | ret = afu_dma_adjust_locked_vm(dev, npages, true); | 50 | ret = account_locked_vm(current->mm, npages, true); |
96 | if (ret) | 51 | if (ret) |
97 | return ret; | 52 | return ret; |
98 | 53 | ||
@@ -121,7 +76,7 @@ put_pages: | |||
121 | free_pages: | 76 | free_pages: |
122 | kfree(region->pages); | 77 | kfree(region->pages); |
123 | unlock_vm: | 78 | unlock_vm: |
124 | afu_dma_adjust_locked_vm(dev, npages, false); | 79 | account_locked_vm(current->mm, npages, false); |
125 | return ret; | 80 | return ret; |
126 | } | 81 | } |
127 | 82 | ||
@@ -141,7 +96,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, | |||
141 | 96 | ||
142 | put_all_pages(region->pages, npages); | 97 | put_all_pages(region->pages, npages); |
143 | kfree(region->pages); | 98 | kfree(region->pages); |
144 | afu_dma_adjust_locked_vm(dev, npages, false); | 99 | account_locked_vm(current->mm, npages, false); |
145 | 100 | ||
146 | dev_dbg(dev, "%ld pages unpinned\n", npages); | 101 | dev_dbg(dev, "%ld pages unpinned\n", npages); |
147 | } | 102 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 5e4f3a8c5784..e4332d5a5757 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c | |||
@@ -53,7 +53,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) | |||
53 | pad = round_up(skb->len, 4) + 4 - skb->len; | 53 | pad = round_up(skb->len, 4) + 4 - skb->len; |
54 | 54 | ||
55 | /* First packet of a A-MSDU burst keeps track of the whole burst | 55 | /* First packet of a A-MSDU burst keeps track of the whole burst |
56 | * length, need to update lenght of it and the last packet. | 56 | * length, need to update length of it and the last packet. |
57 | */ | 57 | */ |
58 | skb_walk_frags(skb, iter) { | 58 | skb_walk_frags(skb, iter) { |
59 | last = iter; | 59 | last = iter; |
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 3a546ec10d90..22a65ad4e46e 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c | |||
@@ -152,6 +152,14 @@ static long pps_cdev_ioctl(struct file *file, | |||
152 | pps->params.mode |= PPS_CANWAIT; | 152 | pps->params.mode |= PPS_CANWAIT; |
153 | pps->params.api_version = PPS_API_VERS; | 153 | pps->params.api_version = PPS_API_VERS; |
154 | 154 | ||
155 | /* | ||
156 | * Clear unused fields of pps_kparams to avoid leaking | ||
157 | * uninitialized data of the PPS_SETPARAMS caller via | ||
158 | * PPS_GETPARAMS | ||
159 | */ | ||
160 | pps->params.assert_off_tu.flags = 0; | ||
161 | pps->params.clear_off_tu.flags = 0; | ||
162 | |||
155 | spin_unlock_irq(&pps->lock); | 163 | spin_unlock_irq(&pps->lock); |
156 | 164 | ||
157 | break; | 165 | break; |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index ce7a90e68042..8155f59ece38 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -1686,6 +1686,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, | |||
1686 | 1686 | ||
1687 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | 1687 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) |
1688 | return -EFAULT; | 1688 | return -EFAULT; |
1689 | dev_info.name[sizeof(dev_info.name) - 1] = '\0'; | ||
1689 | 1690 | ||
1690 | rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, | 1691 | rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, |
1691 | dev_info.comptag, dev_info.destid, dev_info.hopcount); | 1692 | dev_info.comptag, dev_info.destid, dev_info.hopcount); |
@@ -1817,6 +1818,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) | |||
1817 | 1818 | ||
1818 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | 1819 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) |
1819 | return -EFAULT; | 1820 | return -EFAULT; |
1821 | dev_info.name[sizeof(dev_info.name) - 1] = '\0'; | ||
1820 | 1822 | ||
1821 | mport = priv->md->mport; | 1823 | mport = priv->md->mport; |
1822 | 1824 | ||
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 7048c9198c21..8ce9ad21129f 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
20 | #include <linux/sched/mm.h> | 20 | #include <linux/sched/mm.h> |
21 | #include <linux/sched/signal.h> | 21 | #include <linux/sched/signal.h> |
22 | #include <linux/mm.h> | ||
22 | 23 | ||
23 | #include <asm/iommu.h> | 24 | #include <asm/iommu.h> |
24 | #include <asm/tce.h> | 25 | #include <asm/tce.h> |
@@ -31,51 +32,6 @@ | |||
31 | static void tce_iommu_detach_group(void *iommu_data, | 32 | static void tce_iommu_detach_group(void *iommu_data, |
32 | struct iommu_group *iommu_group); | 33 | struct iommu_group *iommu_group); |
33 | 34 | ||
34 | static long try_increment_locked_vm(struct mm_struct *mm, long npages) | ||
35 | { | ||
36 | long ret = 0, locked, lock_limit; | ||
37 | |||
38 | if (WARN_ON_ONCE(!mm)) | ||
39 | return -EPERM; | ||
40 | |||
41 | if (!npages) | ||
42 | return 0; | ||
43 | |||
44 | down_write(&mm->mmap_sem); | ||
45 | locked = mm->locked_vm + npages; | ||
46 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
47 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | ||
48 | ret = -ENOMEM; | ||
49 | else | ||
50 | mm->locked_vm += npages; | ||
51 | |||
52 | pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, | ||
53 | npages << PAGE_SHIFT, | ||
54 | mm->locked_vm << PAGE_SHIFT, | ||
55 | rlimit(RLIMIT_MEMLOCK), | ||
56 | ret ? " - exceeded" : ""); | ||
57 | |||
58 | up_write(&mm->mmap_sem); | ||
59 | |||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static void decrement_locked_vm(struct mm_struct *mm, long npages) | ||
64 | { | ||
65 | if (!mm || !npages) | ||
66 | return; | ||
67 | |||
68 | down_write(&mm->mmap_sem); | ||
69 | if (WARN_ON_ONCE(npages > mm->locked_vm)) | ||
70 | npages = mm->locked_vm; | ||
71 | mm->locked_vm -= npages; | ||
72 | pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, | ||
73 | npages << PAGE_SHIFT, | ||
74 | mm->locked_vm << PAGE_SHIFT, | ||
75 | rlimit(RLIMIT_MEMLOCK)); | ||
76 | up_write(&mm->mmap_sem); | ||
77 | } | ||
78 | |||
79 | /* | 35 | /* |
80 | * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation | 36 | * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation |
81 | * | 37 | * |
@@ -333,7 +289,7 @@ static int tce_iommu_enable(struct tce_container *container) | |||
333 | return ret; | 289 | return ret; |
334 | 290 | ||
335 | locked = table_group->tce32_size >> PAGE_SHIFT; | 291 | locked = table_group->tce32_size >> PAGE_SHIFT; |
336 | ret = try_increment_locked_vm(container->mm, locked); | 292 | ret = account_locked_vm(container->mm, locked, true); |
337 | if (ret) | 293 | if (ret) |
338 | return ret; | 294 | return ret; |
339 | 295 | ||
@@ -352,7 +308,7 @@ static void tce_iommu_disable(struct tce_container *container) | |||
352 | container->enabled = false; | 308 | container->enabled = false; |
353 | 309 | ||
354 | BUG_ON(!container->mm); | 310 | BUG_ON(!container->mm); |
355 | decrement_locked_vm(container->mm, container->locked_pages); | 311 | account_locked_vm(container->mm, container->locked_pages, false); |
356 | } | 312 | } |
357 | 313 | ||
358 | static void *tce_iommu_open(unsigned long arg) | 314 | static void *tce_iommu_open(unsigned long arg) |
@@ -656,7 +612,7 @@ static long tce_iommu_create_table(struct tce_container *container, | |||
656 | if (!table_size) | 612 | if (!table_size) |
657 | return -EINVAL; | 613 | return -EINVAL; |
658 | 614 | ||
659 | ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); | 615 | ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true); |
660 | if (ret) | 616 | if (ret) |
661 | return ret; | 617 | return ret; |
662 | 618 | ||
@@ -675,7 +631,7 @@ static void tce_iommu_free_table(struct tce_container *container, | |||
675 | unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; | 631 | unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; |
676 | 632 | ||
677 | iommu_tce_table_put(tbl); | 633 | iommu_tce_table_put(tbl); |
678 | decrement_locked_vm(container->mm, pages); | 634 | account_locked_vm(container->mm, pages, false); |
679 | } | 635 | } |
680 | 636 | ||
681 | static long tce_iommu_create_window(struct tce_container *container, | 637 | static long tce_iommu_create_window(struct tce_container *container, |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index add34adfadc7..054391f30fa8 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -272,21 +272,8 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) | |||
272 | 272 | ||
273 | ret = down_write_killable(&mm->mmap_sem); | 273 | ret = down_write_killable(&mm->mmap_sem); |
274 | if (!ret) { | 274 | if (!ret) { |
275 | if (npage > 0) { | 275 | ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, |
276 | if (!dma->lock_cap) { | 276 | dma->lock_cap); |
277 | unsigned long limit; | ||
278 | |||
279 | limit = task_rlimit(dma->task, | ||
280 | RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
281 | |||
282 | if (mm->locked_vm + npage > limit) | ||
283 | ret = -ENOMEM; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | if (!ret) | ||
288 | mm->locked_vm += npage; | ||
289 | |||
290 | up_write(&mm->mmap_sem); | 277 | up_write(&mm->mmap_sem); |
291 | } | 278 | } |
292 | 279 | ||
@@ -2094,7 +2094,6 @@ SYSCALL_DEFINE6(io_pgetevents, | |||
2094 | const struct __aio_sigset __user *, usig) | 2094 | const struct __aio_sigset __user *, usig) |
2095 | { | 2095 | { |
2096 | struct __aio_sigset ksig = { NULL, }; | 2096 | struct __aio_sigset ksig = { NULL, }; |
2097 | sigset_t ksigmask, sigsaved; | ||
2098 | struct timespec64 ts; | 2097 | struct timespec64 ts; |
2099 | bool interrupted; | 2098 | bool interrupted; |
2100 | int ret; | 2099 | int ret; |
@@ -2105,14 +2104,14 @@ SYSCALL_DEFINE6(io_pgetevents, | |||
2105 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | 2104 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) |
2106 | return -EFAULT; | 2105 | return -EFAULT; |
2107 | 2106 | ||
2108 | ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | 2107 | ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); |
2109 | if (ret) | 2108 | if (ret) |
2110 | return ret; | 2109 | return ret; |
2111 | 2110 | ||
2112 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); | 2111 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); |
2113 | 2112 | ||
2114 | interrupted = signal_pending(current); | 2113 | interrupted = signal_pending(current); |
2115 | restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); | 2114 | restore_saved_sigmask_unless(interrupted); |
2116 | if (interrupted && !ret) | 2115 | if (interrupted && !ret) |
2117 | ret = -ERESTARTNOHAND; | 2116 | ret = -ERESTARTNOHAND; |
2118 | 2117 | ||
@@ -2130,7 +2129,6 @@ SYSCALL_DEFINE6(io_pgetevents_time32, | |||
2130 | const struct __aio_sigset __user *, usig) | 2129 | const struct __aio_sigset __user *, usig) |
2131 | { | 2130 | { |
2132 | struct __aio_sigset ksig = { NULL, }; | 2131 | struct __aio_sigset ksig = { NULL, }; |
2133 | sigset_t ksigmask, sigsaved; | ||
2134 | struct timespec64 ts; | 2132 | struct timespec64 ts; |
2135 | bool interrupted; | 2133 | bool interrupted; |
2136 | int ret; | 2134 | int ret; |
@@ -2142,14 +2140,14 @@ SYSCALL_DEFINE6(io_pgetevents_time32, | |||
2142 | return -EFAULT; | 2140 | return -EFAULT; |
2143 | 2141 | ||
2144 | 2142 | ||
2145 | ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | 2143 | ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); |
2146 | if (ret) | 2144 | if (ret) |
2147 | return ret; | 2145 | return ret; |
2148 | 2146 | ||
2149 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); | 2147 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); |
2150 | 2148 | ||
2151 | interrupted = signal_pending(current); | 2149 | interrupted = signal_pending(current); |
2152 | restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); | 2150 | restore_saved_sigmask_unless(interrupted); |
2153 | if (interrupted && !ret) | 2151 | if (interrupted && !ret) |
2154 | ret = -ERESTARTNOHAND; | 2152 | ret = -ERESTARTNOHAND; |
2155 | 2153 | ||
@@ -2198,7 +2196,6 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, | |||
2198 | const struct __compat_aio_sigset __user *, usig) | 2196 | const struct __compat_aio_sigset __user *, usig) |
2199 | { | 2197 | { |
2200 | struct __compat_aio_sigset ksig = { NULL, }; | 2198 | struct __compat_aio_sigset ksig = { NULL, }; |
2201 | sigset_t ksigmask, sigsaved; | ||
2202 | struct timespec64 t; | 2199 | struct timespec64 t; |
2203 | bool interrupted; | 2200 | bool interrupted; |
2204 | int ret; | 2201 | int ret; |
@@ -2209,14 +2206,14 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, | |||
2209 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | 2206 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) |
2210 | return -EFAULT; | 2207 | return -EFAULT; |
2211 | 2208 | ||
2212 | ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | 2209 | ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); |
2213 | if (ret) | 2210 | if (ret) |
2214 | return ret; | 2211 | return ret; |
2215 | 2212 | ||
2216 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); | 2213 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); |
2217 | 2214 | ||
2218 | interrupted = signal_pending(current); | 2215 | interrupted = signal_pending(current); |
2219 | restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); | 2216 | restore_saved_sigmask_unless(interrupted); |
2220 | if (interrupted && !ret) | 2217 | if (interrupted && !ret) |
2221 | ret = -ERESTARTNOHAND; | 2218 | ret = -ERESTARTNOHAND; |
2222 | 2219 | ||
@@ -2234,7 +2231,6 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, | |||
2234 | const struct __compat_aio_sigset __user *, usig) | 2231 | const struct __compat_aio_sigset __user *, usig) |
2235 | { | 2232 | { |
2236 | struct __compat_aio_sigset ksig = { NULL, }; | 2233 | struct __compat_aio_sigset ksig = { NULL, }; |
2237 | sigset_t ksigmask, sigsaved; | ||
2238 | struct timespec64 t; | 2234 | struct timespec64 t; |
2239 | bool interrupted; | 2235 | bool interrupted; |
2240 | int ret; | 2236 | int ret; |
@@ -2245,14 +2241,14 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, | |||
2245 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | 2241 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) |
2246 | return -EFAULT; | 2242 | return -EFAULT; |
2247 | 2243 | ||
2248 | ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | 2244 | ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); |
2249 | if (ret) | 2245 | if (ret) |
2250 | return ret; | 2246 | return ret; |
2251 | 2247 | ||
2252 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); | 2248 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); |
2253 | 2249 | ||
2254 | interrupted = signal_pending(current); | 2250 | interrupted = signal_pending(current); |
2255 | restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); | 2251 | restore_saved_sigmask_unless(interrupted); |
2256 | if (interrupted && !ret) | 2252 | if (interrupted && !ret) |
2257 | ret = -ERESTARTNOHAND; | 2253 | ret = -ERESTARTNOHAND; |
2258 | 2254 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 8264b468f283..d4e11b2e04f6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1127,7 +1127,6 @@ out_free_interp: | |||
1127 | load_addr, interp_load_addr); | 1127 | load_addr, interp_load_addr); |
1128 | if (retval < 0) | 1128 | if (retval < 0) |
1129 | goto out; | 1129 | goto out; |
1130 | /* N.B. passed_fileno might not be initialized? */ | ||
1131 | current->mm->end_code = end_code; | 1130 | current->mm->end_code = end_code; |
1132 | current->mm->start_code = start_code; | 1131 | current->mm->start_code = start_code; |
1133 | current->mm->start_data = start_data; | 1132 | current->mm->start_data = start_data; |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 8c6b50f34466..831a2b25ba79 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -431,7 +431,6 @@ static int load_flat_file(struct linux_binprm *bprm, | |||
431 | unsigned long len, memp, memp_size, extra, rlim; | 431 | unsigned long len, memp, memp_size, extra, rlim; |
432 | __be32 __user *reloc; | 432 | __be32 __user *reloc; |
433 | u32 __user *rp; | 433 | u32 __user *rp; |
434 | struct inode *inode; | ||
435 | int i, rev, relocs; | 434 | int i, rev, relocs; |
436 | loff_t fpos; | 435 | loff_t fpos; |
437 | unsigned long start_code, end_code; | 436 | unsigned long start_code, end_code; |
@@ -439,7 +438,6 @@ static int load_flat_file(struct linux_binprm *bprm, | |||
439 | int ret; | 438 | int ret; |
440 | 439 | ||
441 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ | 440 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ |
442 | inode = file_inode(bprm->file); | ||
443 | 441 | ||
444 | text_len = ntohl(hdr->data_start); | 442 | text_len = ntohl(hdr->data_start); |
445 | data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start); | 443 | data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start); |
diff --git a/fs/coda/Makefile b/fs/coda/Makefile index 1ce66819da2a..78befb8369c9 100644 --- a/fs/coda/Makefile +++ b/fs/coda/Makefile | |||
@@ -6,7 +6,8 @@ | |||
6 | obj-$(CONFIG_CODA_FS) += coda.o | 6 | obj-$(CONFIG_CODA_FS) += coda.o |
7 | 7 | ||
8 | coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \ | 8 | coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \ |
9 | coda_linux.o symlink.o pioctl.o sysctl.o | 9 | coda_linux.o symlink.o pioctl.o |
10 | coda-$(CONFIG_SYSCTL) += sysctl.o | ||
10 | 11 | ||
11 | # If you want debugging output, please uncomment the following line. | 12 | # If you want debugging output, please uncomment the following line. |
12 | 13 | ||
diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 201fc08a8b4f..3b8c4513118f 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | 22 | ||
23 | #include <linux/coda.h> | 23 | #include <linux/coda.h> |
24 | #include <linux/coda_psdev.h> | 24 | #include "coda_psdev.h" |
25 | #include "coda_linux.h" | 25 | #include "coda_linux.h" |
26 | #include "coda_cache.h" | 26 | #include "coda_cache.h" |
27 | 27 | ||
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c index 845b5a66952a..06855f6c7902 100644 --- a/fs/coda/cnode.c +++ b/fs/coda/cnode.c | |||
@@ -8,8 +8,8 @@ | |||
8 | #include <linux/time.h> | 8 | #include <linux/time.h> |
9 | 9 | ||
10 | #include <linux/coda.h> | 10 | #include <linux/coda.h> |
11 | #include <linux/coda_psdev.h> | ||
12 | #include <linux/pagemap.h> | 11 | #include <linux/pagemap.h> |
12 | #include "coda_psdev.h" | ||
13 | #include "coda_linux.h" | 13 | #include "coda_linux.h" |
14 | 14 | ||
15 | static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) | 15 | static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) |
@@ -137,11 +137,6 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) | |||
137 | struct inode *inode; | 137 | struct inode *inode; |
138 | unsigned long hash = coda_f2i(fid); | 138 | unsigned long hash = coda_f2i(fid); |
139 | 139 | ||
140 | if ( !sb ) { | ||
141 | pr_warn("%s: no sb!\n", __func__); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | inode = ilookup5(sb, hash, coda_test_inode, fid); | 140 | inode = ilookup5(sb, hash, coda_test_inode, fid); |
146 | if ( !inode ) | 141 | if ( !inode ) |
147 | return NULL; | 142 | return NULL; |
@@ -153,6 +148,16 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) | |||
153 | return inode; | 148 | return inode; |
154 | } | 149 | } |
155 | 150 | ||
151 | struct coda_file_info *coda_ftoc(struct file *file) | ||
152 | { | ||
153 | struct coda_file_info *cfi = file->private_data; | ||
154 | |||
155 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
156 | |||
157 | return cfi; | ||
158 | |||
159 | } | ||
160 | |||
156 | /* the CONTROL inode is made without asking attributes from Venus */ | 161 | /* the CONTROL inode is made without asking attributes from Venus */ |
157 | struct inode *coda_cnode_makectl(struct super_block *sb) | 162 | struct inode *coda_cnode_makectl(struct super_block *sb) |
158 | { | 163 | { |
diff --git a/fs/coda/coda_fs_i.h b/fs/coda/coda_fs_i.h index d702ba1a2bf9..1763ff95d865 100644 --- a/fs/coda/coda_fs_i.h +++ b/fs/coda/coda_fs_i.h | |||
@@ -40,10 +40,9 @@ struct coda_file_info { | |||
40 | int cfi_magic; /* magic number */ | 40 | int cfi_magic; /* magic number */ |
41 | struct file *cfi_container; /* container file for this cnode */ | 41 | struct file *cfi_container; /* container file for this cnode */ |
42 | unsigned int cfi_mapcount; /* nr of times this file is mapped */ | 42 | unsigned int cfi_mapcount; /* nr of times this file is mapped */ |
43 | bool cfi_access_intent; /* is access intent supported */ | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | #define CODA_FTOC(file) ((struct coda_file_info *)((file)->private_data)) | ||
46 | |||
47 | /* flags */ | 46 | /* flags */ |
48 | #define C_VATTR 0x1 /* Validity of vattr in inode */ | 47 | #define C_VATTR 0x1 /* Validity of vattr in inode */ |
49 | #define C_FLUSH 0x2 /* used after a flush */ | 48 | #define C_FLUSH 0x2 /* used after a flush */ |
@@ -54,6 +53,7 @@ struct inode *coda_cnode_make(struct CodaFid *, struct super_block *); | |||
54 | struct inode *coda_iget(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr); | 53 | struct inode *coda_iget(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr); |
55 | struct inode *coda_cnode_makectl(struct super_block *sb); | 54 | struct inode *coda_cnode_makectl(struct super_block *sb); |
56 | struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb); | 55 | struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb); |
56 | struct coda_file_info *coda_ftoc(struct file *file); | ||
57 | void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *); | 57 | void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *); |
58 | 58 | ||
59 | #endif | 59 | #endif |
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h index bb0b3e0ed6c2..f82b59c9dd28 100644 --- a/fs/coda/coda_int.h +++ b/fs/coda/coda_int.h | |||
@@ -13,9 +13,19 @@ extern int coda_fake_statfs; | |||
13 | void coda_destroy_inodecache(void); | 13 | void coda_destroy_inodecache(void); |
14 | int __init coda_init_inodecache(void); | 14 | int __init coda_init_inodecache(void); |
15 | int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); | 15 | int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); |
16 | |||
17 | #ifdef CONFIG_SYSCTL | ||
16 | void coda_sysctl_init(void); | 18 | void coda_sysctl_init(void); |
17 | void coda_sysctl_clean(void); | 19 | void coda_sysctl_clean(void); |
20 | #else | ||
21 | static inline void coda_sysctl_init(void) | ||
22 | { | ||
23 | } | ||
18 | 24 | ||
25 | static inline void coda_sysctl_clean(void) | ||
26 | { | ||
27 | } | ||
28 | #endif | ||
19 | #endif /* _CODA_INT_ */ | 29 | #endif /* _CODA_INT_ */ |
20 | 30 | ||
21 | 31 | ||
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c index f3d543dd9a98..2e1a5a192074 100644 --- a/fs/coda/coda_linux.c +++ b/fs/coda/coda_linux.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | 19 | ||
20 | #include <linux/coda.h> | 20 | #include <linux/coda.h> |
21 | #include <linux/coda_psdev.h> | 21 | #include "coda_psdev.h" |
22 | #include "coda_linux.h" | 22 | #include "coda_linux.h" |
23 | 23 | ||
24 | /* initialize the debugging variables */ | 24 | /* initialize the debugging variables */ |
@@ -66,6 +66,25 @@ unsigned short coda_flags_to_cflags(unsigned short flags) | |||
66 | return coda_flags; | 66 | return coda_flags; |
67 | } | 67 | } |
68 | 68 | ||
69 | static struct timespec64 coda_to_timespec64(struct coda_timespec ts) | ||
70 | { | ||
71 | struct timespec64 ts64 = { | ||
72 | .tv_sec = ts.tv_sec, | ||
73 | .tv_nsec = ts.tv_nsec, | ||
74 | }; | ||
75 | |||
76 | return ts64; | ||
77 | } | ||
78 | |||
79 | static struct coda_timespec timespec64_to_coda(struct timespec64 ts64) | ||
80 | { | ||
81 | struct coda_timespec ts = { | ||
82 | .tv_sec = ts64.tv_sec, | ||
83 | .tv_nsec = ts64.tv_nsec, | ||
84 | }; | ||
85 | |||
86 | return ts; | ||
87 | } | ||
69 | 88 | ||
70 | /* utility functions below */ | 89 | /* utility functions below */ |
71 | void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr) | 90 | void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr) |
@@ -105,11 +124,11 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr) | |||
105 | if (attr->va_size != -1) | 124 | if (attr->va_size != -1) |
106 | inode->i_blocks = (attr->va_size + 511) >> 9; | 125 | inode->i_blocks = (attr->va_size + 511) >> 9; |
107 | if (attr->va_atime.tv_sec != -1) | 126 | if (attr->va_atime.tv_sec != -1) |
108 | inode->i_atime = timespec_to_timespec64(attr->va_atime); | 127 | inode->i_atime = coda_to_timespec64(attr->va_atime); |
109 | if (attr->va_mtime.tv_sec != -1) | 128 | if (attr->va_mtime.tv_sec != -1) |
110 | inode->i_mtime = timespec_to_timespec64(attr->va_mtime); | 129 | inode->i_mtime = coda_to_timespec64(attr->va_mtime); |
111 | if (attr->va_ctime.tv_sec != -1) | 130 | if (attr->va_ctime.tv_sec != -1) |
112 | inode->i_ctime = timespec_to_timespec64(attr->va_ctime); | 131 | inode->i_ctime = coda_to_timespec64(attr->va_ctime); |
113 | } | 132 | } |
114 | 133 | ||
115 | 134 | ||
@@ -130,12 +149,12 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr) | |||
130 | vattr->va_uid = (vuid_t) -1; | 149 | vattr->va_uid = (vuid_t) -1; |
131 | vattr->va_gid = (vgid_t) -1; | 150 | vattr->va_gid = (vgid_t) -1; |
132 | vattr->va_size = (off_t) -1; | 151 | vattr->va_size = (off_t) -1; |
133 | vattr->va_atime.tv_sec = (time_t) -1; | 152 | vattr->va_atime.tv_sec = (int64_t) -1; |
134 | vattr->va_atime.tv_nsec = (time_t) -1; | 153 | vattr->va_atime.tv_nsec = (long) -1; |
135 | vattr->va_mtime.tv_sec = (time_t) -1; | 154 | vattr->va_mtime.tv_sec = (int64_t) -1; |
136 | vattr->va_mtime.tv_nsec = (time_t) -1; | 155 | vattr->va_mtime.tv_nsec = (long) -1; |
137 | vattr->va_ctime.tv_sec = (time_t) -1; | 156 | vattr->va_ctime.tv_sec = (int64_t) -1; |
138 | vattr->va_ctime.tv_nsec = (time_t) -1; | 157 | vattr->va_ctime.tv_nsec = (long) -1; |
139 | vattr->va_type = C_VNON; | 158 | vattr->va_type = C_VNON; |
140 | vattr->va_fileid = -1; | 159 | vattr->va_fileid = -1; |
141 | vattr->va_gen = -1; | 160 | vattr->va_gen = -1; |
@@ -175,13 +194,13 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr) | |||
175 | vattr->va_size = iattr->ia_size; | 194 | vattr->va_size = iattr->ia_size; |
176 | } | 195 | } |
177 | if ( valid & ATTR_ATIME ) { | 196 | if ( valid & ATTR_ATIME ) { |
178 | vattr->va_atime = timespec64_to_timespec(iattr->ia_atime); | 197 | vattr->va_atime = timespec64_to_coda(iattr->ia_atime); |
179 | } | 198 | } |
180 | if ( valid & ATTR_MTIME ) { | 199 | if ( valid & ATTR_MTIME ) { |
181 | vattr->va_mtime = timespec64_to_timespec(iattr->ia_mtime); | 200 | vattr->va_mtime = timespec64_to_coda(iattr->ia_mtime); |
182 | } | 201 | } |
183 | if ( valid & ATTR_CTIME ) { | 202 | if ( valid & ATTR_CTIME ) { |
184 | vattr->va_ctime = timespec64_to_timespec(iattr->ia_ctime); | 203 | vattr->va_ctime = timespec64_to_coda(iattr->ia_ctime); |
185 | } | 204 | } |
186 | } | 205 | } |
187 | 206 | ||
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h index 126155cadfa9..d5ebd36fb2cc 100644 --- a/fs/coda/coda_linux.h +++ b/fs/coda/coda_linux.h | |||
@@ -59,22 +59,6 @@ void coda_vattr_to_iattr(struct inode *, struct coda_vattr *); | |||
59 | void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *); | 59 | void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *); |
60 | unsigned short coda_flags_to_cflags(unsigned short); | 60 | unsigned short coda_flags_to_cflags(unsigned short); |
61 | 61 | ||
62 | /* sysctl.h */ | ||
63 | void coda_sysctl_init(void); | ||
64 | void coda_sysctl_clean(void); | ||
65 | |||
66 | #define CODA_ALLOC(ptr, cast, size) do { \ | ||
67 | if (size < PAGE_SIZE) \ | ||
68 | ptr = kzalloc((unsigned long) size, GFP_KERNEL); \ | ||
69 | else \ | ||
70 | ptr = (cast)vzalloc((unsigned long) size); \ | ||
71 | if (!ptr) \ | ||
72 | pr_warn("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \ | ||
73 | } while (0) | ||
74 | |||
75 | |||
76 | #define CODA_FREE(ptr, size) kvfree((ptr)) | ||
77 | |||
78 | /* inode to cnode access functions */ | 62 | /* inode to cnode access functions */ |
79 | 63 | ||
80 | static inline struct coda_inode_info *ITOC(struct inode *inode) | 64 | static inline struct coda_inode_info *ITOC(struct inode *inode) |
diff --git a/include/linux/coda_psdev.h b/fs/coda/coda_psdev.h index 15170954aa2b..52da08c770b0 100644 --- a/include/linux/coda_psdev.h +++ b/fs/coda/coda_psdev.h | |||
@@ -3,11 +3,31 @@ | |||
3 | #define __CODA_PSDEV_H | 3 | #define __CODA_PSDEV_H |
4 | 4 | ||
5 | #include <linux/backing-dev.h> | 5 | #include <linux/backing-dev.h> |
6 | #include <linux/magic.h> | ||
6 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
7 | #include <uapi/linux/coda_psdev.h> | 8 | |
9 | #define CODA_PSDEV_MAJOR 67 | ||
10 | #define MAX_CODADEVS 5 /* how many do we allow */ | ||
8 | 11 | ||
9 | struct kstatfs; | 12 | struct kstatfs; |
10 | 13 | ||
14 | /* messages between coda filesystem in kernel and Venus */ | ||
15 | struct upc_req { | ||
16 | struct list_head uc_chain; | ||
17 | caddr_t uc_data; | ||
18 | u_short uc_flags; | ||
19 | u_short uc_inSize; /* Size is at most 5000 bytes */ | ||
20 | u_short uc_outSize; | ||
21 | u_short uc_opcode; /* copied from data to save lookup */ | ||
22 | int uc_unique; | ||
23 | wait_queue_head_t uc_sleep; /* process' wait queue */ | ||
24 | }; | ||
25 | |||
26 | #define CODA_REQ_ASYNC 0x1 | ||
27 | #define CODA_REQ_READ 0x2 | ||
28 | #define CODA_REQ_WRITE 0x4 | ||
29 | #define CODA_REQ_ABORT 0x8 | ||
30 | |||
11 | /* communication pending/processing queues */ | 31 | /* communication pending/processing queues */ |
12 | struct venus_comm { | 32 | struct venus_comm { |
13 | u_long vc_seq; | 33 | u_long vc_seq; |
@@ -19,7 +39,6 @@ struct venus_comm { | |||
19 | struct mutex vc_mutex; | 39 | struct mutex vc_mutex; |
20 | }; | 40 | }; |
21 | 41 | ||
22 | |||
23 | static inline struct venus_comm *coda_vcp(struct super_block *sb) | 42 | static inline struct venus_comm *coda_vcp(struct super_block *sb) |
24 | { | 43 | { |
25 | return (struct venus_comm *)((sb)->s_fs_info); | 44 | return (struct venus_comm *)((sb)->s_fs_info); |
@@ -30,39 +49,43 @@ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); | |||
30 | int venus_getattr(struct super_block *sb, struct CodaFid *fid, | 49 | int venus_getattr(struct super_block *sb, struct CodaFid *fid, |
31 | struct coda_vattr *attr); | 50 | struct coda_vattr *attr); |
32 | int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); | 51 | int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); |
33 | int venus_lookup(struct super_block *sb, struct CodaFid *fid, | 52 | int venus_lookup(struct super_block *sb, struct CodaFid *fid, |
34 | const char *name, int length, int *type, | 53 | const char *name, int length, int *type, |
35 | struct CodaFid *resfid); | 54 | struct CodaFid *resfid); |
36 | int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, | 55 | int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, |
37 | kuid_t uid); | 56 | kuid_t uid); |
38 | int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, | 57 | int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, |
39 | struct file **f); | 58 | struct file **f); |
40 | int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, | 59 | int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, |
41 | const char *name, int length, | 60 | const char *name, int length, |
42 | struct CodaFid *newfid, struct coda_vattr *attrs); | 61 | struct CodaFid *newfid, struct coda_vattr *attrs); |
43 | int venus_create(struct super_block *sb, struct CodaFid *dirfid, | 62 | int venus_create(struct super_block *sb, struct CodaFid *dirfid, |
44 | const char *name, int length, int excl, int mode, | 63 | const char *name, int length, int excl, int mode, |
45 | struct CodaFid *newfid, struct coda_vattr *attrs) ; | 64 | struct CodaFid *newfid, struct coda_vattr *attrs); |
46 | int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, | 65 | int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, |
47 | const char *name, int length); | 66 | const char *name, int length); |
48 | int venus_remove(struct super_block *sb, struct CodaFid *dirfid, | 67 | int venus_remove(struct super_block *sb, struct CodaFid *dirfid, |
49 | const char *name, int length); | 68 | const char *name, int length); |
50 | int venus_readlink(struct super_block *sb, struct CodaFid *fid, | 69 | int venus_readlink(struct super_block *sb, struct CodaFid *fid, |
51 | char *buffer, int *length); | 70 | char *buffer, int *length); |
52 | int venus_rename(struct super_block *, struct CodaFid *new_fid, | 71 | int venus_rename(struct super_block *sb, struct CodaFid *new_fid, |
53 | struct CodaFid *old_fid, size_t old_length, | 72 | struct CodaFid *old_fid, size_t old_length, |
54 | size_t new_length, const char *old_name, | 73 | size_t new_length, const char *old_name, |
55 | const char *new_name); | 74 | const char *new_name); |
56 | int venus_link(struct super_block *sb, struct CodaFid *fid, | 75 | int venus_link(struct super_block *sb, struct CodaFid *fid, |
57 | struct CodaFid *dirfid, const char *name, int len ); | 76 | struct CodaFid *dirfid, const char *name, int len ); |
58 | int venus_symlink(struct super_block *sb, struct CodaFid *fid, | 77 | int venus_symlink(struct super_block *sb, struct CodaFid *fid, |
59 | const char *name, int len, const char *symname, int symlen); | 78 | const char *name, int len, const char *symname, int symlen); |
60 | int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); | 79 | int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); |
61 | int venus_pioctl(struct super_block *sb, struct CodaFid *fid, | 80 | int venus_pioctl(struct super_block *sb, struct CodaFid *fid, |
62 | unsigned int cmd, struct PioctlData *data); | 81 | unsigned int cmd, struct PioctlData *data); |
63 | int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); | 82 | int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out, |
83 | size_t nbytes); | ||
64 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); | 84 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); |
65 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); | 85 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); |
86 | int venus_access_intent(struct super_block *sb, struct CodaFid *fid, | ||
87 | bool *access_intent_supported, | ||
88 | size_t count, loff_t ppos, int type); | ||
66 | 89 | ||
67 | /* | 90 | /* |
68 | * Statistics | 91 | * Statistics |
diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 00876ddadb43..ca40c2556ba6 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | 24 | ||
25 | #include <linux/coda.h> | 25 | #include <linux/coda.h> |
26 | #include <linux/coda_psdev.h> | 26 | #include "coda_psdev.h" |
27 | #include "coda_linux.h" | 27 | #include "coda_linux.h" |
28 | #include "coda_cache.h" | 28 | #include "coda_cache.h" |
29 | 29 | ||
@@ -47,8 +47,8 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsig | |||
47 | int type = 0; | 47 | int type = 0; |
48 | 48 | ||
49 | if (length > CODA_MAXNAMLEN) { | 49 | if (length > CODA_MAXNAMLEN) { |
50 | pr_err("name too long: lookup, %s (%*s)\n", | 50 | pr_err("name too long: lookup, %s %zu\n", |
51 | coda_i2s(dir), (int)length, name); | 51 | coda_i2s(dir), length); |
52 | return ERR_PTR(-ENAMETOOLONG); | 52 | return ERR_PTR(-ENAMETOOLONG); |
53 | } | 53 | } |
54 | 54 | ||
@@ -356,8 +356,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) | |||
356 | ino_t ino; | 356 | ino_t ino; |
357 | int ret; | 357 | int ret; |
358 | 358 | ||
359 | cfi = CODA_FTOC(coda_file); | 359 | cfi = coda_ftoc(coda_file); |
360 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
361 | host_file = cfi->cfi_container; | 360 | host_file = cfi->cfi_container; |
362 | 361 | ||
363 | cii = ITOC(file_inode(coda_file)); | 362 | cii = ITOC(file_inode(coda_file)); |
@@ -426,8 +425,7 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) | |||
426 | struct file *host_file; | 425 | struct file *host_file; |
427 | int ret; | 426 | int ret; |
428 | 427 | ||
429 | cfi = CODA_FTOC(coda_file); | 428 | cfi = coda_ftoc(coda_file); |
430 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
431 | host_file = cfi->cfi_container; | 429 | host_file = cfi->cfi_container; |
432 | 430 | ||
433 | if (host_file->f_op->iterate || host_file->f_op->iterate_shared) { | 431 | if (host_file->f_op->iterate || host_file->f_op->iterate_shared) { |
diff --git a/fs/coda/file.c b/fs/coda/file.c index 1cbc1f2298ee..128d63df5bfb 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c | |||
@@ -20,22 +20,43 @@ | |||
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/uio.h> | ||
23 | 24 | ||
24 | #include <linux/coda.h> | 25 | #include <linux/coda.h> |
25 | #include <linux/coda_psdev.h> | 26 | #include "coda_psdev.h" |
26 | |||
27 | #include "coda_linux.h" | 27 | #include "coda_linux.h" |
28 | #include "coda_int.h" | 28 | #include "coda_int.h" |
29 | 29 | ||
30 | struct coda_vm_ops { | ||
31 | atomic_t refcnt; | ||
32 | struct file *coda_file; | ||
33 | const struct vm_operations_struct *host_vm_ops; | ||
34 | struct vm_operations_struct vm_ops; | ||
35 | }; | ||
36 | |||
30 | static ssize_t | 37 | static ssize_t |
31 | coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | 38 | coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
32 | { | 39 | { |
33 | struct file *coda_file = iocb->ki_filp; | 40 | struct file *coda_file = iocb->ki_filp; |
34 | struct coda_file_info *cfi = CODA_FTOC(coda_file); | 41 | struct inode *coda_inode = file_inode(coda_file); |
42 | struct coda_file_info *cfi = coda_ftoc(coda_file); | ||
43 | loff_t ki_pos = iocb->ki_pos; | ||
44 | size_t count = iov_iter_count(to); | ||
45 | ssize_t ret; | ||
35 | 46 | ||
36 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | 47 | ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), |
48 | &cfi->cfi_access_intent, | ||
49 | count, ki_pos, CODA_ACCESS_TYPE_READ); | ||
50 | if (ret) | ||
51 | goto finish_read; | ||
37 | 52 | ||
38 | return vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0); | 53 | ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0); |
54 | |||
55 | finish_read: | ||
56 | venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), | ||
57 | &cfi->cfi_access_intent, | ||
58 | count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); | ||
59 | return ret; | ||
39 | } | 60 | } |
40 | 61 | ||
41 | static ssize_t | 62 | static ssize_t |
@@ -43,13 +64,18 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) | |||
43 | { | 64 | { |
44 | struct file *coda_file = iocb->ki_filp; | 65 | struct file *coda_file = iocb->ki_filp; |
45 | struct inode *coda_inode = file_inode(coda_file); | 66 | struct inode *coda_inode = file_inode(coda_file); |
46 | struct coda_file_info *cfi = CODA_FTOC(coda_file); | 67 | struct coda_file_info *cfi = coda_ftoc(coda_file); |
47 | struct file *host_file; | 68 | struct file *host_file = cfi->cfi_container; |
69 | loff_t ki_pos = iocb->ki_pos; | ||
70 | size_t count = iov_iter_count(to); | ||
48 | ssize_t ret; | 71 | ssize_t ret; |
49 | 72 | ||
50 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | 73 | ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), |
74 | &cfi->cfi_access_intent, | ||
75 | count, ki_pos, CODA_ACCESS_TYPE_WRITE); | ||
76 | if (ret) | ||
77 | goto finish_write; | ||
51 | 78 | ||
52 | host_file = cfi->cfi_container; | ||
53 | file_start_write(host_file); | 79 | file_start_write(host_file); |
54 | inode_lock(coda_inode); | 80 | inode_lock(coda_inode); |
55 | ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0); | 81 | ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0); |
@@ -58,26 +84,73 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) | |||
58 | coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode); | 84 | coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode); |
59 | inode_unlock(coda_inode); | 85 | inode_unlock(coda_inode); |
60 | file_end_write(host_file); | 86 | file_end_write(host_file); |
87 | |||
88 | finish_write: | ||
89 | venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), | ||
90 | &cfi->cfi_access_intent, | ||
91 | count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH); | ||
61 | return ret; | 92 | return ret; |
62 | } | 93 | } |
63 | 94 | ||
95 | static void | ||
96 | coda_vm_open(struct vm_area_struct *vma) | ||
97 | { | ||
98 | struct coda_vm_ops *cvm_ops = | ||
99 | container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); | ||
100 | |||
101 | atomic_inc(&cvm_ops->refcnt); | ||
102 | |||
103 | if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open) | ||
104 | cvm_ops->host_vm_ops->open(vma); | ||
105 | } | ||
106 | |||
107 | static void | ||
108 | coda_vm_close(struct vm_area_struct *vma) | ||
109 | { | ||
110 | struct coda_vm_ops *cvm_ops = | ||
111 | container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); | ||
112 | |||
113 | if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close) | ||
114 | cvm_ops->host_vm_ops->close(vma); | ||
115 | |||
116 | if (atomic_dec_and_test(&cvm_ops->refcnt)) { | ||
117 | vma->vm_ops = cvm_ops->host_vm_ops; | ||
118 | fput(cvm_ops->coda_file); | ||
119 | kfree(cvm_ops); | ||
120 | } | ||
121 | } | ||
122 | |||
64 | static int | 123 | static int |
65 | coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) | 124 | coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) |
66 | { | 125 | { |
67 | struct coda_file_info *cfi; | 126 | struct inode *coda_inode = file_inode(coda_file); |
127 | struct coda_file_info *cfi = coda_ftoc(coda_file); | ||
128 | struct file *host_file = cfi->cfi_container; | ||
129 | struct inode *host_inode = file_inode(host_file); | ||
68 | struct coda_inode_info *cii; | 130 | struct coda_inode_info *cii; |
69 | struct file *host_file; | 131 | struct coda_vm_ops *cvm_ops; |
70 | struct inode *coda_inode, *host_inode; | 132 | loff_t ppos; |
71 | 133 | size_t count; | |
72 | cfi = CODA_FTOC(coda_file); | 134 | int ret; |
73 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
74 | host_file = cfi->cfi_container; | ||
75 | 135 | ||
76 | if (!host_file->f_op->mmap) | 136 | if (!host_file->f_op->mmap) |
77 | return -ENODEV; | 137 | return -ENODEV; |
78 | 138 | ||
79 | coda_inode = file_inode(coda_file); | 139 | if (WARN_ON(coda_file != vma->vm_file)) |
80 | host_inode = file_inode(host_file); | 140 | return -EIO; |
141 | |||
142 | count = vma->vm_end - vma->vm_start; | ||
143 | ppos = vma->vm_pgoff * PAGE_SIZE; | ||
144 | |||
145 | ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), | ||
146 | &cfi->cfi_access_intent, | ||
147 | count, ppos, CODA_ACCESS_TYPE_MMAP); | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL); | ||
152 | if (!cvm_ops) | ||
153 | return -ENOMEM; | ||
81 | 154 | ||
82 | cii = ITOC(coda_inode); | 155 | cii = ITOC(coda_inode); |
83 | spin_lock(&cii->c_lock); | 156 | spin_lock(&cii->c_lock); |
@@ -89,6 +162,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) | |||
89 | * the container file on us! */ | 162 | * the container file on us! */ |
90 | else if (coda_inode->i_mapping != host_inode->i_mapping) { | 163 | else if (coda_inode->i_mapping != host_inode->i_mapping) { |
91 | spin_unlock(&cii->c_lock); | 164 | spin_unlock(&cii->c_lock); |
165 | kfree(cvm_ops); | ||
92 | return -EBUSY; | 166 | return -EBUSY; |
93 | } | 167 | } |
94 | 168 | ||
@@ -97,7 +171,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) | |||
97 | cfi->cfi_mapcount++; | 171 | cfi->cfi_mapcount++; |
98 | spin_unlock(&cii->c_lock); | 172 | spin_unlock(&cii->c_lock); |
99 | 173 | ||
100 | return call_mmap(host_file, vma); | 174 | vma->vm_file = get_file(host_file); |
175 | ret = call_mmap(vma->vm_file, vma); | ||
176 | |||
177 | if (ret) { | ||
178 | /* if call_mmap fails, our caller will put coda_file so we | ||
179 | * should drop the reference to the host_file that we got. | ||
180 | */ | ||
181 | fput(host_file); | ||
182 | kfree(cvm_ops); | ||
183 | } else { | ||
184 | /* here we add redirects for the open/close vm_operations */ | ||
185 | cvm_ops->host_vm_ops = vma->vm_ops; | ||
186 | if (vma->vm_ops) | ||
187 | cvm_ops->vm_ops = *vma->vm_ops; | ||
188 | |||
189 | cvm_ops->vm_ops.open = coda_vm_open; | ||
190 | cvm_ops->vm_ops.close = coda_vm_close; | ||
191 | cvm_ops->coda_file = coda_file; | ||
192 | atomic_set(&cvm_ops->refcnt, 1); | ||
193 | |||
194 | vma->vm_ops = &cvm_ops->vm_ops; | ||
195 | } | ||
196 | return ret; | ||
101 | } | 197 | } |
102 | 198 | ||
103 | int coda_open(struct inode *coda_inode, struct file *coda_file) | 199 | int coda_open(struct inode *coda_inode, struct file *coda_file) |
@@ -127,6 +223,8 @@ int coda_open(struct inode *coda_inode, struct file *coda_file) | |||
127 | cfi->cfi_magic = CODA_MAGIC; | 223 | cfi->cfi_magic = CODA_MAGIC; |
128 | cfi->cfi_mapcount = 0; | 224 | cfi->cfi_mapcount = 0; |
129 | cfi->cfi_container = host_file; | 225 | cfi->cfi_container = host_file; |
226 | /* assume access intents are supported unless we hear otherwise */ | ||
227 | cfi->cfi_access_intent = true; | ||
130 | 228 | ||
131 | BUG_ON(coda_file->private_data != NULL); | 229 | BUG_ON(coda_file->private_data != NULL); |
132 | coda_file->private_data = cfi; | 230 | coda_file->private_data = cfi; |
@@ -142,8 +240,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) | |||
142 | struct inode *host_inode; | 240 | struct inode *host_inode; |
143 | int err; | 241 | int err; |
144 | 242 | ||
145 | cfi = CODA_FTOC(coda_file); | 243 | cfi = coda_ftoc(coda_file); |
146 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
147 | 244 | ||
148 | err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), | 245 | err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), |
149 | coda_flags, coda_file->f_cred->fsuid); | 246 | coda_flags, coda_file->f_cred->fsuid); |
@@ -185,8 +282,7 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) | |||
185 | return err; | 282 | return err; |
186 | inode_lock(coda_inode); | 283 | inode_lock(coda_inode); |
187 | 284 | ||
188 | cfi = CODA_FTOC(coda_file); | 285 | cfi = coda_ftoc(coda_file); |
189 | BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); | ||
190 | host_file = cfi->cfi_container; | 286 | host_file = cfi->cfi_container; |
191 | 287 | ||
192 | err = vfs_fsync(host_file, datasync); | 288 | err = vfs_fsync(host_file, datasync); |
@@ -207,4 +303,3 @@ const struct file_operations coda_file_operations = { | |||
207 | .fsync = coda_fsync, | 303 | .fsync = coda_fsync, |
208 | .splice_read = generic_file_splice_read, | 304 | .splice_read = generic_file_splice_read, |
209 | }; | 305 | }; |
210 | |||
diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 23f6ebd08e80..321f56e487cb 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
28 | 28 | ||
29 | #include <linux/coda.h> | 29 | #include <linux/coda.h> |
30 | #include <linux/coda_psdev.h> | 30 | #include "coda_psdev.h" |
31 | #include "coda_linux.h" | 31 | #include "coda_linux.h" |
32 | #include "coda_cache.h" | 32 | #include "coda_cache.h" |
33 | 33 | ||
@@ -236,6 +236,7 @@ static void coda_put_super(struct super_block *sb) | |||
236 | vcp->vc_sb = NULL; | 236 | vcp->vc_sb = NULL; |
237 | sb->s_fs_info = NULL; | 237 | sb->s_fs_info = NULL; |
238 | mutex_unlock(&vcp->vc_mutex); | 238 | mutex_unlock(&vcp->vc_mutex); |
239 | mutex_destroy(&vcp->vc_mutex); | ||
239 | 240 | ||
240 | pr_info("Bye bye.\n"); | 241 | pr_info("Bye bye.\n"); |
241 | } | 242 | } |
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index e0c17b7dccce..644d48c12ce8 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c | |||
@@ -20,8 +20,7 @@ | |||
20 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
21 | 21 | ||
22 | #include <linux/coda.h> | 22 | #include <linux/coda.h> |
23 | #include <linux/coda_psdev.h> | 23 | #include "coda_psdev.h" |
24 | |||
25 | #include "coda_linux.h" | 24 | #include "coda_linux.h" |
26 | 25 | ||
27 | /* pioctl ops */ | 26 | /* pioctl ops */ |
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 0ceef32e6fae..240669f51eac 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c | |||
@@ -38,8 +38,7 @@ | |||
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | 39 | ||
40 | #include <linux/coda.h> | 40 | #include <linux/coda.h> |
41 | #include <linux/coda_psdev.h> | 41 | #include "coda_psdev.h" |
42 | |||
43 | #include "coda_linux.h" | 42 | #include "coda_linux.h" |
44 | 43 | ||
45 | #include "coda_int.h" | 44 | #include "coda_int.h" |
@@ -100,8 +99,12 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | |||
100 | ssize_t retval = 0, count = 0; | 99 | ssize_t retval = 0, count = 0; |
101 | int error; | 100 | int error; |
102 | 101 | ||
102 | /* make sure there is enough to copy out the (opcode, unique) values */ | ||
103 | if (nbytes < (2 * sizeof(u_int32_t))) | ||
104 | return -EINVAL; | ||
105 | |||
103 | /* Peek at the opcode, uniquefier */ | 106 | /* Peek at the opcode, uniquefier */ |
104 | if (copy_from_user(&hdr, buf, 2 * sizeof(u_long))) | 107 | if (copy_from_user(&hdr, buf, 2 * sizeof(u_int32_t))) |
105 | return -EFAULT; | 108 | return -EFAULT; |
106 | 109 | ||
107 | if (DOWNCALL(hdr.opcode)) { | 110 | if (DOWNCALL(hdr.opcode)) { |
@@ -119,17 +122,21 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | |||
119 | hdr.opcode, hdr.unique); | 122 | hdr.opcode, hdr.unique); |
120 | nbytes = size; | 123 | nbytes = size; |
121 | } | 124 | } |
122 | CODA_ALLOC(dcbuf, union outputArgs *, nbytes); | 125 | dcbuf = kvmalloc(nbytes, GFP_KERNEL); |
126 | if (!dcbuf) { | ||
127 | retval = -ENOMEM; | ||
128 | goto out; | ||
129 | } | ||
123 | if (copy_from_user(dcbuf, buf, nbytes)) { | 130 | if (copy_from_user(dcbuf, buf, nbytes)) { |
124 | CODA_FREE(dcbuf, nbytes); | 131 | kvfree(dcbuf); |
125 | retval = -EFAULT; | 132 | retval = -EFAULT; |
126 | goto out; | 133 | goto out; |
127 | } | 134 | } |
128 | 135 | ||
129 | /* what downcall errors does Venus handle ? */ | 136 | /* what downcall errors does Venus handle ? */ |
130 | error = coda_downcall(vcp, hdr.opcode, dcbuf); | 137 | error = coda_downcall(vcp, hdr.opcode, dcbuf, nbytes); |
131 | 138 | ||
132 | CODA_FREE(dcbuf, nbytes); | 139 | kvfree(dcbuf); |
133 | if (error) { | 140 | if (error) { |
134 | pr_warn("%s: coda_downcall error: %d\n", | 141 | pr_warn("%s: coda_downcall error: %d\n", |
135 | __func__, error); | 142 | __func__, error); |
@@ -182,8 +189,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | |||
182 | if (req->uc_opcode == CODA_OPEN_BY_FD) { | 189 | if (req->uc_opcode == CODA_OPEN_BY_FD) { |
183 | struct coda_open_by_fd_out *outp = | 190 | struct coda_open_by_fd_out *outp = |
184 | (struct coda_open_by_fd_out *)req->uc_data; | 191 | (struct coda_open_by_fd_out *)req->uc_data; |
185 | if (!outp->oh.result) | 192 | if (!outp->oh.result) { |
186 | outp->fh = fget(outp->fd); | 193 | outp->fh = fget(outp->fd); |
194 | if (!outp->fh) | ||
195 | return -EBADF; | ||
196 | } | ||
187 | } | 197 | } |
188 | 198 | ||
189 | wake_up(&req->uc_sleep); | 199 | wake_up(&req->uc_sleep); |
@@ -252,7 +262,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf, | |||
252 | goto out; | 262 | goto out; |
253 | } | 263 | } |
254 | 264 | ||
255 | CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); | 265 | kvfree(req->uc_data); |
256 | kfree(req); | 266 | kfree(req); |
257 | out: | 267 | out: |
258 | mutex_unlock(&vcp->vc_mutex); | 268 | mutex_unlock(&vcp->vc_mutex); |
@@ -314,7 +324,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file) | |||
314 | 324 | ||
315 | /* Async requests need to be freed here */ | 325 | /* Async requests need to be freed here */ |
316 | if (req->uc_flags & CODA_REQ_ASYNC) { | 326 | if (req->uc_flags & CODA_REQ_ASYNC) { |
317 | CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); | 327 | kvfree(req->uc_data); |
318 | kfree(req); | 328 | kfree(req); |
319 | continue; | 329 | continue; |
320 | } | 330 | } |
@@ -347,13 +357,13 @@ static const struct file_operations coda_psdev_fops = { | |||
347 | .llseek = noop_llseek, | 357 | .llseek = noop_llseek, |
348 | }; | 358 | }; |
349 | 359 | ||
350 | static int init_coda_psdev(void) | 360 | static int __init init_coda_psdev(void) |
351 | { | 361 | { |
352 | int i, err = 0; | 362 | int i, err = 0; |
353 | if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) { | 363 | if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) { |
354 | pr_err("%s: unable to get major %d\n", | 364 | pr_err("%s: unable to get major %d\n", |
355 | __func__, CODA_PSDEV_MAJOR); | 365 | __func__, CODA_PSDEV_MAJOR); |
356 | return -EIO; | 366 | return -EIO; |
357 | } | 367 | } |
358 | coda_psdev_class = class_create(THIS_MODULE, "coda"); | 368 | coda_psdev_class = class_create(THIS_MODULE, "coda"); |
359 | if (IS_ERR(coda_psdev_class)) { | 369 | if (IS_ERR(coda_psdev_class)) { |
@@ -378,7 +388,7 @@ MODULE_AUTHOR("Jan Harkes, Peter J. Braam"); | |||
378 | MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); | 388 | MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); |
379 | MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); | 389 | MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); |
380 | MODULE_LICENSE("GPL"); | 390 | MODULE_LICENSE("GPL"); |
381 | MODULE_VERSION("6.6"); | 391 | MODULE_VERSION("7.0"); |
382 | 392 | ||
383 | static int __init init_coda(void) | 393 | static int __init init_coda(void) |
384 | { | 394 | { |
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c index 202297d156df..8907d0508198 100644 --- a/fs/coda/symlink.c +++ b/fs/coda/symlink.c | |||
@@ -17,8 +17,7 @@ | |||
17 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
18 | 18 | ||
19 | #include <linux/coda.h> | 19 | #include <linux/coda.h> |
20 | #include <linux/coda_psdev.h> | 20 | #include "coda_psdev.h" |
21 | |||
22 | #include "coda_linux.h" | 21 | #include "coda_linux.h" |
23 | 22 | ||
24 | static int coda_symlink_filler(struct file *file, struct page *page) | 23 | static int coda_symlink_filler(struct file *file, struct page *page) |
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c index 0301d45000a8..fda3b702b1c5 100644 --- a/fs/coda/sysctl.c +++ b/fs/coda/sysctl.c | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include "coda_int.h" | 13 | #include "coda_int.h" |
14 | 14 | ||
15 | #ifdef CONFIG_SYSCTL | ||
16 | static struct ctl_table_header *fs_table_header; | 15 | static struct ctl_table_header *fs_table_header; |
17 | 16 | ||
18 | static struct ctl_table coda_table[] = { | 17 | static struct ctl_table coda_table[] = { |
@@ -62,13 +61,3 @@ void coda_sysctl_clean(void) | |||
62 | fs_table_header = NULL; | 61 | fs_table_header = NULL; |
63 | } | 62 | } |
64 | } | 63 | } |
65 | |||
66 | #else | ||
67 | void coda_sysctl_init(void) | ||
68 | { | ||
69 | } | ||
70 | |||
71 | void coda_sysctl_clean(void) | ||
72 | { | ||
73 | } | ||
74 | #endif | ||
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index 1175a1722411..eb3b1898da46 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/vfs.h> | 33 | #include <linux/vfs.h> |
34 | 34 | ||
35 | #include <linux/coda.h> | 35 | #include <linux/coda.h> |
36 | #include <linux/coda_psdev.h> | 36 | #include "coda_psdev.h" |
37 | #include "coda_linux.h" | 37 | #include "coda_linux.h" |
38 | #include "coda_cache.h" | 38 | #include "coda_cache.h" |
39 | 39 | ||
@@ -46,7 +46,7 @@ static void *alloc_upcall(int opcode, int size) | |||
46 | { | 46 | { |
47 | union inputArgs *inp; | 47 | union inputArgs *inp; |
48 | 48 | ||
49 | CODA_ALLOC(inp, union inputArgs *, size); | 49 | inp = kvzalloc(size, GFP_KERNEL); |
50 | if (!inp) | 50 | if (!inp) |
51 | return ERR_PTR(-ENOMEM); | 51 | return ERR_PTR(-ENOMEM); |
52 | 52 | ||
@@ -85,7 +85,7 @@ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp) | |||
85 | if (!error) | 85 | if (!error) |
86 | *fidp = outp->coda_root.VFid; | 86 | *fidp = outp->coda_root.VFid; |
87 | 87 | ||
88 | CODA_FREE(inp, insize); | 88 | kvfree(inp); |
89 | return error; | 89 | return error; |
90 | } | 90 | } |
91 | 91 | ||
@@ -104,7 +104,7 @@ int venus_getattr(struct super_block *sb, struct CodaFid *fid, | |||
104 | if (!error) | 104 | if (!error) |
105 | *attr = outp->coda_getattr.attr; | 105 | *attr = outp->coda_getattr.attr; |
106 | 106 | ||
107 | CODA_FREE(inp, insize); | 107 | kvfree(inp); |
108 | return error; | 108 | return error; |
109 | } | 109 | } |
110 | 110 | ||
@@ -123,7 +123,7 @@ int venus_setattr(struct super_block *sb, struct CodaFid *fid, | |||
123 | 123 | ||
124 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 124 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
125 | 125 | ||
126 | CODA_FREE(inp, insize); | 126 | kvfree(inp); |
127 | return error; | 127 | return error; |
128 | } | 128 | } |
129 | 129 | ||
@@ -153,7 +153,7 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid, | |||
153 | *type = outp->coda_lookup.vtype; | 153 | *type = outp->coda_lookup.vtype; |
154 | } | 154 | } |
155 | 155 | ||
156 | CODA_FREE(inp, insize); | 156 | kvfree(inp); |
157 | return error; | 157 | return error; |
158 | } | 158 | } |
159 | 159 | ||
@@ -173,7 +173,7 @@ int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, | |||
173 | 173 | ||
174 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 174 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
175 | 175 | ||
176 | CODA_FREE(inp, insize); | 176 | kvfree(inp); |
177 | return error; | 177 | return error; |
178 | } | 178 | } |
179 | 179 | ||
@@ -194,7 +194,7 @@ int venus_open(struct super_block *sb, struct CodaFid *fid, | |||
194 | if (!error) | 194 | if (!error) |
195 | *fh = outp->coda_open_by_fd.fh; | 195 | *fh = outp->coda_open_by_fd.fh; |
196 | 196 | ||
197 | CODA_FREE(inp, insize); | 197 | kvfree(inp); |
198 | return error; | 198 | return error; |
199 | } | 199 | } |
200 | 200 | ||
@@ -224,7 +224,7 @@ int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, | |||
224 | *newfid = outp->coda_mkdir.VFid; | 224 | *newfid = outp->coda_mkdir.VFid; |
225 | } | 225 | } |
226 | 226 | ||
227 | CODA_FREE(inp, insize); | 227 | kvfree(inp); |
228 | return error; | 228 | return error; |
229 | } | 229 | } |
230 | 230 | ||
@@ -262,7 +262,7 @@ int venus_rename(struct super_block *sb, struct CodaFid *old_fid, | |||
262 | 262 | ||
263 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 263 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
264 | 264 | ||
265 | CODA_FREE(inp, insize); | 265 | kvfree(inp); |
266 | return error; | 266 | return error; |
267 | } | 267 | } |
268 | 268 | ||
@@ -295,7 +295,7 @@ int venus_create(struct super_block *sb, struct CodaFid *dirfid, | |||
295 | *newfid = outp->coda_create.VFid; | 295 | *newfid = outp->coda_create.VFid; |
296 | } | 296 | } |
297 | 297 | ||
298 | CODA_FREE(inp, insize); | 298 | kvfree(inp); |
299 | return error; | 299 | return error; |
300 | } | 300 | } |
301 | 301 | ||
@@ -318,7 +318,7 @@ int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, | |||
318 | 318 | ||
319 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 319 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
320 | 320 | ||
321 | CODA_FREE(inp, insize); | 321 | kvfree(inp); |
322 | return error; | 322 | return error; |
323 | } | 323 | } |
324 | 324 | ||
@@ -340,7 +340,7 @@ int venus_remove(struct super_block *sb, struct CodaFid *dirfid, | |||
340 | 340 | ||
341 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 341 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
342 | 342 | ||
343 | CODA_FREE(inp, insize); | 343 | kvfree(inp); |
344 | return error; | 344 | return error; |
345 | } | 345 | } |
346 | 346 | ||
@@ -370,7 +370,7 @@ int venus_readlink(struct super_block *sb, struct CodaFid *fid, | |||
370 | *(buffer + retlen) = '\0'; | 370 | *(buffer + retlen) = '\0'; |
371 | } | 371 | } |
372 | 372 | ||
373 | CODA_FREE(inp, insize); | 373 | kvfree(inp); |
374 | return error; | 374 | return error; |
375 | } | 375 | } |
376 | 376 | ||
@@ -398,7 +398,7 @@ int venus_link(struct super_block *sb, struct CodaFid *fid, | |||
398 | 398 | ||
399 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 399 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
400 | 400 | ||
401 | CODA_FREE(inp, insize); | 401 | kvfree(inp); |
402 | return error; | 402 | return error; |
403 | } | 403 | } |
404 | 404 | ||
@@ -433,7 +433,7 @@ int venus_symlink(struct super_block *sb, struct CodaFid *fid, | |||
433 | 433 | ||
434 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 434 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
435 | 435 | ||
436 | CODA_FREE(inp, insize); | 436 | kvfree(inp); |
437 | return error; | 437 | return error; |
438 | } | 438 | } |
439 | 439 | ||
@@ -449,7 +449,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid) | |||
449 | inp->coda_fsync.VFid = *fid; | 449 | inp->coda_fsync.VFid = *fid; |
450 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 450 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
451 | 451 | ||
452 | CODA_FREE(inp, insize); | 452 | kvfree(inp); |
453 | return error; | 453 | return error; |
454 | } | 454 | } |
455 | 455 | ||
@@ -467,7 +467,7 @@ int venus_access(struct super_block *sb, struct CodaFid *fid, int mask) | |||
467 | 467 | ||
468 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); | 468 | error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); |
469 | 469 | ||
470 | CODA_FREE(inp, insize); | 470 | kvfree(inp); |
471 | return error; | 471 | return error; |
472 | } | 472 | } |
473 | 473 | ||
@@ -543,7 +543,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | exit: | 545 | exit: |
546 | CODA_FREE(inp, insize); | 546 | kvfree(inp); |
547 | return error; | 547 | return error; |
548 | } | 548 | } |
549 | 549 | ||
@@ -553,7 +553,7 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs) | |||
553 | union outputArgs *outp; | 553 | union outputArgs *outp; |
554 | int insize, outsize, error; | 554 | int insize, outsize, error; |
555 | 555 | ||
556 | insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs)); | 556 | insize = SIZE(statfs); |
557 | UPARG(CODA_STATFS); | 557 | UPARG(CODA_STATFS); |
558 | 558 | ||
559 | error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp); | 559 | error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp); |
@@ -565,10 +565,51 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs) | |||
565 | sfs->f_ffree = outp->coda_statfs.stat.f_ffree; | 565 | sfs->f_ffree = outp->coda_statfs.stat.f_ffree; |
566 | } | 566 | } |
567 | 567 | ||
568 | CODA_FREE(inp, insize); | 568 | kvfree(inp); |
569 | return error; | 569 | return error; |
570 | } | 570 | } |
571 | 571 | ||
572 | int venus_access_intent(struct super_block *sb, struct CodaFid *fid, | ||
573 | bool *access_intent_supported, | ||
574 | size_t count, loff_t ppos, int type) | ||
575 | { | ||
576 | union inputArgs *inp; | ||
577 | union outputArgs *outp; | ||
578 | int insize, outsize, error; | ||
579 | bool finalizer = | ||
580 | type == CODA_ACCESS_TYPE_READ_FINISH || | ||
581 | type == CODA_ACCESS_TYPE_WRITE_FINISH; | ||
582 | |||
583 | if (!*access_intent_supported && !finalizer) | ||
584 | return 0; | ||
585 | |||
586 | insize = SIZE(access_intent); | ||
587 | UPARG(CODA_ACCESS_INTENT); | ||
588 | |||
589 | inp->coda_access_intent.VFid = *fid; | ||
590 | inp->coda_access_intent.count = count; | ||
591 | inp->coda_access_intent.pos = ppos; | ||
592 | inp->coda_access_intent.type = type; | ||
593 | |||
594 | error = coda_upcall(coda_vcp(sb), insize, | ||
595 | finalizer ? NULL : &outsize, inp); | ||
596 | |||
597 | /* | ||
598 | * we have to free the request buffer for synchronous upcalls | ||
599 | * or when asynchronous upcalls fail, but not when asynchronous | ||
600 | * upcalls succeed | ||
601 | */ | ||
602 | if (!finalizer || error) | ||
603 | kvfree(inp); | ||
604 | |||
605 | /* Chunked access is not supported or an old Coda client */ | ||
606 | if (error == -EOPNOTSUPP) { | ||
607 | *access_intent_supported = false; | ||
608 | error = 0; | ||
609 | } | ||
610 | return error; | ||
611 | } | ||
612 | |||
572 | /* | 613 | /* |
573 | * coda_upcall and coda_downcall routines. | 614 | * coda_upcall and coda_downcall routines. |
574 | */ | 615 | */ |
@@ -598,10 +639,12 @@ static void coda_unblock_signals(sigset_t *old) | |||
598 | * has seen them, | 639 | * has seen them, |
599 | * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems) | 640 | * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems) |
600 | * - CODA_STORE (to avoid data loss) | 641 | * - CODA_STORE (to avoid data loss) |
642 | * - CODA_ACCESS_INTENT (to avoid reference count problems) | ||
601 | */ | 643 | */ |
602 | #define CODA_INTERRUPTIBLE(r) (!coda_hard && \ | 644 | #define CODA_INTERRUPTIBLE(r) (!coda_hard && \ |
603 | (((r)->uc_opcode != CODA_CLOSE && \ | 645 | (((r)->uc_opcode != CODA_CLOSE && \ |
604 | (r)->uc_opcode != CODA_STORE && \ | 646 | (r)->uc_opcode != CODA_STORE && \ |
647 | (r)->uc_opcode != CODA_ACCESS_INTENT && \ | ||
605 | (r)->uc_opcode != CODA_RELEASE) || \ | 648 | (r)->uc_opcode != CODA_RELEASE) || \ |
606 | (r)->uc_flags & CODA_REQ_READ)) | 649 | (r)->uc_flags & CODA_REQ_READ)) |
607 | 650 | ||
@@ -687,21 +730,25 @@ static int coda_upcall(struct venus_comm *vcp, | |||
687 | goto exit; | 730 | goto exit; |
688 | } | 731 | } |
689 | 732 | ||
733 | buffer->ih.unique = ++vcp->vc_seq; | ||
734 | |||
690 | req->uc_data = (void *)buffer; | 735 | req->uc_data = (void *)buffer; |
691 | req->uc_flags = 0; | 736 | req->uc_flags = outSize ? 0 : CODA_REQ_ASYNC; |
692 | req->uc_inSize = inSize; | 737 | req->uc_inSize = inSize; |
693 | req->uc_outSize = *outSize ? *outSize : inSize; | 738 | req->uc_outSize = (outSize && *outSize) ? *outSize : inSize; |
694 | req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode; | 739 | req->uc_opcode = buffer->ih.opcode; |
695 | req->uc_unique = ++vcp->vc_seq; | 740 | req->uc_unique = buffer->ih.unique; |
696 | init_waitqueue_head(&req->uc_sleep); | 741 | init_waitqueue_head(&req->uc_sleep); |
697 | 742 | ||
698 | /* Fill in the common input args. */ | ||
699 | ((union inputArgs *)buffer)->ih.unique = req->uc_unique; | ||
700 | |||
701 | /* Append msg to pending queue and poke Venus. */ | 743 | /* Append msg to pending queue and poke Venus. */ |
702 | list_add_tail(&req->uc_chain, &vcp->vc_pending); | 744 | list_add_tail(&req->uc_chain, &vcp->vc_pending); |
703 | |||
704 | wake_up_interruptible(&vcp->vc_waitq); | 745 | wake_up_interruptible(&vcp->vc_waitq); |
746 | |||
747 | if (req->uc_flags & CODA_REQ_ASYNC) { | ||
748 | mutex_unlock(&vcp->vc_mutex); | ||
749 | return 0; | ||
750 | } | ||
751 | |||
705 | /* We can be interrupted while we wait for Venus to process | 752 | /* We can be interrupted while we wait for Venus to process |
706 | * our request. If the interrupt occurs before Venus has read | 753 | * our request. If the interrupt occurs before Venus has read |
707 | * the request, we dequeue and return. If it occurs after the | 754 | * the request, we dequeue and return. If it occurs after the |
@@ -743,20 +790,20 @@ static int coda_upcall(struct venus_comm *vcp, | |||
743 | sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); | 790 | sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); |
744 | if (!sig_req) goto exit; | 791 | if (!sig_req) goto exit; |
745 | 792 | ||
746 | CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr)); | 793 | sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL); |
747 | if (!sig_req->uc_data) { | 794 | if (!sig_inputArgs) { |
748 | kfree(sig_req); | 795 | kfree(sig_req); |
749 | goto exit; | 796 | goto exit; |
750 | } | 797 | } |
751 | 798 | ||
752 | error = -EINTR; | 799 | error = -EINTR; |
753 | sig_inputArgs = (union inputArgs *)sig_req->uc_data; | ||
754 | sig_inputArgs->ih.opcode = CODA_SIGNAL; | 800 | sig_inputArgs->ih.opcode = CODA_SIGNAL; |
755 | sig_inputArgs->ih.unique = req->uc_unique; | 801 | sig_inputArgs->ih.unique = req->uc_unique; |
756 | 802 | ||
757 | sig_req->uc_flags = CODA_REQ_ASYNC; | 803 | sig_req->uc_flags = CODA_REQ_ASYNC; |
758 | sig_req->uc_opcode = sig_inputArgs->ih.opcode; | 804 | sig_req->uc_opcode = sig_inputArgs->ih.opcode; |
759 | sig_req->uc_unique = sig_inputArgs->ih.unique; | 805 | sig_req->uc_unique = sig_inputArgs->ih.unique; |
806 | sig_req->uc_data = (void *)sig_inputArgs; | ||
760 | sig_req->uc_inSize = sizeof(struct coda_in_hdr); | 807 | sig_req->uc_inSize = sizeof(struct coda_in_hdr); |
761 | sig_req->uc_outSize = sizeof(struct coda_in_hdr); | 808 | sig_req->uc_outSize = sizeof(struct coda_in_hdr); |
762 | 809 | ||
@@ -804,12 +851,44 @@ exit: | |||
804 | * | 851 | * |
805 | * CODA_REPLACE -- replace one CodaFid with another throughout the name cache */ | 852 | * CODA_REPLACE -- replace one CodaFid with another throughout the name cache */ |
806 | 853 | ||
807 | int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out) | 854 | int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out, |
855 | size_t nbytes) | ||
808 | { | 856 | { |
809 | struct inode *inode = NULL; | 857 | struct inode *inode = NULL; |
810 | struct CodaFid *fid = NULL, *newfid; | 858 | struct CodaFid *fid = NULL, *newfid; |
811 | struct super_block *sb; | 859 | struct super_block *sb; |
812 | 860 | ||
861 | /* | ||
862 | * Make sure we have received enough data from the cache | ||
863 | * manager to populate the necessary fields in the buffer | ||
864 | */ | ||
865 | switch (opcode) { | ||
866 | case CODA_PURGEUSER: | ||
867 | if (nbytes < sizeof(struct coda_purgeuser_out)) | ||
868 | return -EINVAL; | ||
869 | break; | ||
870 | |||
871 | case CODA_ZAPDIR: | ||
872 | if (nbytes < sizeof(struct coda_zapdir_out)) | ||
873 | return -EINVAL; | ||
874 | break; | ||
875 | |||
876 | case CODA_ZAPFILE: | ||
877 | if (nbytes < sizeof(struct coda_zapfile_out)) | ||
878 | return -EINVAL; | ||
879 | break; | ||
880 | |||
881 | case CODA_PURGEFID: | ||
882 | if (nbytes < sizeof(struct coda_purgefid_out)) | ||
883 | return -EINVAL; | ||
884 | break; | ||
885 | |||
886 | case CODA_REPLACE: | ||
887 | if (nbytes < sizeof(struct coda_replace_out)) | ||
888 | return -EINVAL; | ||
889 | break; | ||
890 | } | ||
891 | |||
813 | /* Handle invalidation requests. */ | 892 | /* Handle invalidation requests. */ |
814 | mutex_lock(&vcp->vc_mutex); | 893 | mutex_lock(&vcp->vc_mutex); |
815 | sb = vcp->vc_sb; | 894 | sb = vcp->vc_sb; |
@@ -879,4 +958,3 @@ unlock_out: | |||
879 | iput(inode); | 958 | iput(inode); |
880 | return 0; | 959 | return 0; |
881 | } | 960 | } |
882 | |||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4c74c768ae43..0f9c073d78d5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -2313,19 +2313,17 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, | |||
2313 | size_t, sigsetsize) | 2313 | size_t, sigsetsize) |
2314 | { | 2314 | { |
2315 | int error; | 2315 | int error; |
2316 | sigset_t ksigmask, sigsaved; | ||
2317 | 2316 | ||
2318 | /* | 2317 | /* |
2319 | * If the caller wants a certain signal mask to be set during the wait, | 2318 | * If the caller wants a certain signal mask to be set during the wait, |
2320 | * we apply it here. | 2319 | * we apply it here. |
2321 | */ | 2320 | */ |
2322 | error = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 2321 | error = set_user_sigmask(sigmask, sigsetsize); |
2323 | if (error) | 2322 | if (error) |
2324 | return error; | 2323 | return error; |
2325 | 2324 | ||
2326 | error = do_epoll_wait(epfd, events, maxevents, timeout); | 2325 | error = do_epoll_wait(epfd, events, maxevents, timeout); |
2327 | 2326 | restore_saved_sigmask_unless(error == -EINTR); | |
2328 | restore_user_sigmask(sigmask, &sigsaved, error == -EINTR); | ||
2329 | 2327 | ||
2330 | return error; | 2328 | return error; |
2331 | } | 2329 | } |
@@ -2338,19 +2336,17 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, | |||
2338 | compat_size_t, sigsetsize) | 2336 | compat_size_t, sigsetsize) |
2339 | { | 2337 | { |
2340 | long err; | 2338 | long err; |
2341 | sigset_t ksigmask, sigsaved; | ||
2342 | 2339 | ||
2343 | /* | 2340 | /* |
2344 | * If the caller wants a certain signal mask to be set during the wait, | 2341 | * If the caller wants a certain signal mask to be set during the wait, |
2345 | * we apply it here. | 2342 | * we apply it here. |
2346 | */ | 2343 | */ |
2347 | err = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 2344 | err = set_compat_user_sigmask(sigmask, sigsetsize); |
2348 | if (err) | 2345 | if (err) |
2349 | return err; | 2346 | return err; |
2350 | 2347 | ||
2351 | err = do_epoll_wait(epfd, events, maxevents, timeout); | 2348 | err = do_epoll_wait(epfd, events, maxevents, timeout); |
2352 | 2349 | restore_saved_sigmask_unless(err == -EINTR); | |
2353 | restore_user_sigmask(sigmask, &sigsaved, err == -EINTR); | ||
2354 | 2350 | ||
2355 | return err; | 2351 | return err; |
2356 | } | 2352 | } |
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index d5403b4004c9..bb0b27d88e50 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c | |||
@@ -407,7 +407,7 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len) | |||
407 | int offset = 0; | 407 | int offset = 0; |
408 | 408 | ||
409 | if (!is_known_namespace(xattr_name)) { | 409 | if (!is_known_namespace(xattr_name)) { |
410 | strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN); | 410 | memcpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN); |
411 | offset += XATTR_MAC_OSX_PREFIX_LEN; | 411 | offset += XATTR_MAC_OSX_PREFIX_LEN; |
412 | len += XATTR_MAC_OSX_PREFIX_LEN; | 412 | len += XATTR_MAC_OSX_PREFIX_LEN; |
413 | } | 413 | } |
diff --git a/fs/io_uring.c b/fs/io_uring.c index d682049c07b2..e2a66e12fbc6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
@@ -2400,7 +2400,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, | |||
2400 | const sigset_t __user *sig, size_t sigsz) | 2400 | const sigset_t __user *sig, size_t sigsz) |
2401 | { | 2401 | { |
2402 | struct io_cq_ring *ring = ctx->cq_ring; | 2402 | struct io_cq_ring *ring = ctx->cq_ring; |
2403 | sigset_t ksigmask, sigsaved; | ||
2404 | int ret; | 2403 | int ret; |
2405 | 2404 | ||
2406 | if (io_cqring_events(ring) >= min_events) | 2405 | if (io_cqring_events(ring) >= min_events) |
@@ -2410,21 +2409,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, | |||
2410 | #ifdef CONFIG_COMPAT | 2409 | #ifdef CONFIG_COMPAT |
2411 | if (in_compat_syscall()) | 2410 | if (in_compat_syscall()) |
2412 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, | 2411 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, |
2413 | &ksigmask, &sigsaved, sigsz); | 2412 | sigsz); |
2414 | else | 2413 | else |
2415 | #endif | 2414 | #endif |
2416 | ret = set_user_sigmask(sig, &ksigmask, | 2415 | ret = set_user_sigmask(sig, sigsz); |
2417 | &sigsaved, sigsz); | ||
2418 | 2416 | ||
2419 | if (ret) | 2417 | if (ret) |
2420 | return ret; | 2418 | return ret; |
2421 | } | 2419 | } |
2422 | 2420 | ||
2423 | ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events); | 2421 | ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events); |
2424 | 2422 | restore_saved_sigmask_unless(ret == -ERESTARTSYS); | |
2425 | if (sig) | ||
2426 | restore_user_sigmask(sig, &sigsaved, ret == -ERESTARTSYS); | ||
2427 | |||
2428 | if (ret == -ERESTARTSYS) | 2423 | if (ret == -ERESTARTSYS) |
2429 | ret = -EINTR; | 2424 | ret = -EINTR; |
2430 | 2425 | ||
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 47d2651fd9dc..cb5629bd5fff 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig | |||
@@ -58,7 +58,8 @@ config PROC_VMCORE_DEVICE_DUMP | |||
58 | snapshot. | 58 | snapshot. |
59 | 59 | ||
60 | If you say Y here, the collected device dumps will be added | 60 | If you say Y here, the collected device dumps will be added |
61 | as ELF notes to /proc/vmcore. | 61 | as ELF notes to /proc/vmcore. You can still disable device |
62 | dump using the kernel command line option 'novmcoredd'. | ||
62 | 63 | ||
63 | config PROC_SYSCTL | 64 | config PROC_SYSCTL |
64 | bool "Sysctl support (/proc/sys)" if EXPERT | 65 | bool "Sysctl support (/proc/sys)" if EXPERT |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 5f8d215b3fd0..dbe43a50caf2 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -200,7 +200,8 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) | |||
200 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 200 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
201 | loff_t rv = -EINVAL; | 201 | loff_t rv = -EINVAL; |
202 | if (use_pde(pde)) { | 202 | if (use_pde(pde)) { |
203 | loff_t (*llseek)(struct file *, loff_t, int); | 203 | typeof_member(struct file_operations, llseek) llseek; |
204 | |||
204 | llseek = pde->proc_fops->llseek; | 205 | llseek = pde->proc_fops->llseek; |
205 | if (!llseek) | 206 | if (!llseek) |
206 | llseek = default_llseek; | 207 | llseek = default_llseek; |
@@ -212,10 +213,11 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) | |||
212 | 213 | ||
213 | static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 214 | static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
214 | { | 215 | { |
215 | ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); | ||
216 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 216 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
217 | ssize_t rv = -EIO; | 217 | ssize_t rv = -EIO; |
218 | if (use_pde(pde)) { | 218 | if (use_pde(pde)) { |
219 | typeof_member(struct file_operations, read) read; | ||
220 | |||
219 | read = pde->proc_fops->read; | 221 | read = pde->proc_fops->read; |
220 | if (read) | 222 | if (read) |
221 | rv = read(file, buf, count, ppos); | 223 | rv = read(file, buf, count, ppos); |
@@ -226,10 +228,11 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, | |||
226 | 228 | ||
227 | static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | 229 | static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
228 | { | 230 | { |
229 | ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); | ||
230 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 231 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
231 | ssize_t rv = -EIO; | 232 | ssize_t rv = -EIO; |
232 | if (use_pde(pde)) { | 233 | if (use_pde(pde)) { |
234 | typeof_member(struct file_operations, write) write; | ||
235 | |||
233 | write = pde->proc_fops->write; | 236 | write = pde->proc_fops->write; |
234 | if (write) | 237 | if (write) |
235 | rv = write(file, buf, count, ppos); | 238 | rv = write(file, buf, count, ppos); |
@@ -242,8 +245,9 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts) | |||
242 | { | 245 | { |
243 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 246 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
244 | __poll_t rv = DEFAULT_POLLMASK; | 247 | __poll_t rv = DEFAULT_POLLMASK; |
245 | __poll_t (*poll)(struct file *, struct poll_table_struct *); | ||
246 | if (use_pde(pde)) { | 248 | if (use_pde(pde)) { |
249 | typeof_member(struct file_operations, poll) poll; | ||
250 | |||
247 | poll = pde->proc_fops->poll; | 251 | poll = pde->proc_fops->poll; |
248 | if (poll) | 252 | if (poll) |
249 | rv = poll(file, pts); | 253 | rv = poll(file, pts); |
@@ -256,8 +260,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne | |||
256 | { | 260 | { |
257 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 261 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
258 | long rv = -ENOTTY; | 262 | long rv = -ENOTTY; |
259 | long (*ioctl)(struct file *, unsigned int, unsigned long); | ||
260 | if (use_pde(pde)) { | 263 | if (use_pde(pde)) { |
264 | typeof_member(struct file_operations, unlocked_ioctl) ioctl; | ||
265 | |||
261 | ioctl = pde->proc_fops->unlocked_ioctl; | 266 | ioctl = pde->proc_fops->unlocked_ioctl; |
262 | if (ioctl) | 267 | if (ioctl) |
263 | rv = ioctl(file, cmd, arg); | 268 | rv = ioctl(file, cmd, arg); |
@@ -271,8 +276,9 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned | |||
271 | { | 276 | { |
272 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 277 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
273 | long rv = -ENOTTY; | 278 | long rv = -ENOTTY; |
274 | long (*compat_ioctl)(struct file *, unsigned int, unsigned long); | ||
275 | if (use_pde(pde)) { | 279 | if (use_pde(pde)) { |
280 | typeof_member(struct file_operations, compat_ioctl) compat_ioctl; | ||
281 | |||
276 | compat_ioctl = pde->proc_fops->compat_ioctl; | 282 | compat_ioctl = pde->proc_fops->compat_ioctl; |
277 | if (compat_ioctl) | 283 | if (compat_ioctl) |
278 | rv = compat_ioctl(file, cmd, arg); | 284 | rv = compat_ioctl(file, cmd, arg); |
@@ -286,8 +292,9 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) | |||
286 | { | 292 | { |
287 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 293 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
288 | int rv = -EIO; | 294 | int rv = -EIO; |
289 | int (*mmap)(struct file *, struct vm_area_struct *); | ||
290 | if (use_pde(pde)) { | 295 | if (use_pde(pde)) { |
296 | typeof_member(struct file_operations, mmap) mmap; | ||
297 | |||
291 | mmap = pde->proc_fops->mmap; | 298 | mmap = pde->proc_fops->mmap; |
292 | if (mmap) | 299 | if (mmap) |
293 | rv = mmap(file, vma); | 300 | rv = mmap(file, vma); |
@@ -305,7 +312,7 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, | |||
305 | unsigned long rv = -EIO; | 312 | unsigned long rv = -EIO; |
306 | 313 | ||
307 | if (use_pde(pde)) { | 314 | if (use_pde(pde)) { |
308 | typeof(proc_reg_get_unmapped_area) *get_area; | 315 | typeof_member(struct file_operations, get_unmapped_area) get_area; |
309 | 316 | ||
310 | get_area = pde->proc_fops->get_unmapped_area; | 317 | get_area = pde->proc_fops->get_unmapped_area; |
311 | #ifdef CONFIG_MMU | 318 | #ifdef CONFIG_MMU |
@@ -326,8 +333,8 @@ static int proc_reg_open(struct inode *inode, struct file *file) | |||
326 | { | 333 | { |
327 | struct proc_dir_entry *pde = PDE(inode); | 334 | struct proc_dir_entry *pde = PDE(inode); |
328 | int rv = 0; | 335 | int rv = 0; |
329 | int (*open)(struct inode *, struct file *); | 336 | typeof_member(struct file_operations, open) open; |
330 | int (*release)(struct inode *, struct file *); | 337 | typeof_member(struct file_operations, release) release; |
331 | struct pde_opener *pdeo; | 338 | struct pde_opener *pdeo; |
332 | 339 | ||
333 | /* | 340 | /* |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index c74570736b24..36ad1b0d6259 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -499,6 +499,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, | |||
499 | 499 | ||
500 | if (root->set_ownership) | 500 | if (root->set_ownership) |
501 | root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); | 501 | root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); |
502 | else { | ||
503 | inode->i_uid = GLOBAL_ROOT_UID; | ||
504 | inode->i_gid = GLOBAL_ROOT_GID; | ||
505 | } | ||
502 | 506 | ||
503 | return inode; | 507 | return inode; |
504 | } | 508 | } |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 57957c91c6df..7bcc92add72c 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/crash_dump.h> | 22 | #include <linux/crash_dump.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/moduleparam.h> | ||
24 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
25 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
26 | #include <linux/pagemap.h> | 27 | #include <linux/pagemap.h> |
@@ -54,6 +55,9 @@ static struct proc_dir_entry *proc_vmcore; | |||
54 | /* Device Dump list and mutex to synchronize access to list */ | 55 | /* Device Dump list and mutex to synchronize access to list */ |
55 | static LIST_HEAD(vmcoredd_list); | 56 | static LIST_HEAD(vmcoredd_list); |
56 | static DEFINE_MUTEX(vmcoredd_mutex); | 57 | static DEFINE_MUTEX(vmcoredd_mutex); |
58 | |||
59 | static bool vmcoredd_disabled; | ||
60 | core_param(novmcoredd, vmcoredd_disabled, bool, 0); | ||
57 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | 61 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ |
58 | 62 | ||
59 | /* Device Dump Size */ | 63 | /* Device Dump Size */ |
@@ -1452,6 +1456,11 @@ int vmcore_add_device_dump(struct vmcoredd_data *data) | |||
1452 | size_t data_size; | 1456 | size_t data_size; |
1453 | int ret; | 1457 | int ret; |
1454 | 1458 | ||
1459 | if (vmcoredd_disabled) { | ||
1460 | pr_err_once("Device dump is disabled\n"); | ||
1461 | return -EINVAL; | ||
1462 | } | ||
1463 | |||
1455 | if (!data || !strlen(data->dump_name) || | 1464 | if (!data || !strlen(data->dump_name) || |
1456 | !data->vmcoredd_callback || !data->size) | 1465 | !data->vmcoredd_callback || !data->size) |
1457 | return -EINVAL; | 1466 | return -EINVAL; |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 36346dc4cec0..4517a1394c6f 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -94,7 +94,7 @@ static int journal_join(struct reiserfs_transaction_handle *th, | |||
94 | struct super_block *sb); | 94 | struct super_block *sb); |
95 | static void release_journal_dev(struct super_block *super, | 95 | static void release_journal_dev(struct super_block *super, |
96 | struct reiserfs_journal *journal); | 96 | struct reiserfs_journal *journal); |
97 | static int dirty_one_transaction(struct super_block *s, | 97 | static void dirty_one_transaction(struct super_block *s, |
98 | struct reiserfs_journal_list *jl); | 98 | struct reiserfs_journal_list *jl); |
99 | static void flush_async_commits(struct work_struct *work); | 99 | static void flush_async_commits(struct work_struct *work); |
100 | static void queue_log_writer(struct super_block *s); | 100 | static void queue_log_writer(struct super_block *s); |
@@ -1682,12 +1682,11 @@ next: | |||
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | /* used by flush_commit_list */ | 1684 | /* used by flush_commit_list */ |
1685 | static int dirty_one_transaction(struct super_block *s, | 1685 | static void dirty_one_transaction(struct super_block *s, |
1686 | struct reiserfs_journal_list *jl) | 1686 | struct reiserfs_journal_list *jl) |
1687 | { | 1687 | { |
1688 | struct reiserfs_journal_cnode *cn; | 1688 | struct reiserfs_journal_cnode *cn; |
1689 | struct reiserfs_journal_list *pjl; | 1689 | struct reiserfs_journal_list *pjl; |
1690 | int ret = 0; | ||
1691 | 1690 | ||
1692 | jl->j_state |= LIST_DIRTY; | 1691 | jl->j_state |= LIST_DIRTY; |
1693 | cn = jl->j_realblock; | 1692 | cn = jl->j_realblock; |
@@ -1716,7 +1715,6 @@ static int dirty_one_transaction(struct super_block *s, | |||
1716 | } | 1715 | } |
1717 | cn = cn->next; | 1716 | cn = cn->next; |
1718 | } | 1717 | } |
1719 | return ret; | ||
1720 | } | 1718 | } |
1721 | 1719 | ||
1722 | static int kupdate_transactions(struct super_block *s, | 1720 | static int kupdate_transactions(struct super_block *s, |
diff --git a/fs/select.c b/fs/select.c index a4d8f6e8b63c..53a0c149f528 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -294,12 +294,14 @@ enum poll_time_type { | |||
294 | PT_OLD_TIMESPEC = 3, | 294 | PT_OLD_TIMESPEC = 3, |
295 | }; | 295 | }; |
296 | 296 | ||
297 | static int poll_select_copy_remaining(struct timespec64 *end_time, | 297 | static int poll_select_finish(struct timespec64 *end_time, |
298 | void __user *p, | 298 | void __user *p, |
299 | enum poll_time_type pt_type, int ret) | 299 | enum poll_time_type pt_type, int ret) |
300 | { | 300 | { |
301 | struct timespec64 rts; | 301 | struct timespec64 rts; |
302 | 302 | ||
303 | restore_saved_sigmask_unless(ret == -ERESTARTNOHAND); | ||
304 | |||
303 | if (!p) | 305 | if (!p) |
304 | return ret; | 306 | return ret; |
305 | 307 | ||
@@ -714,9 +716,7 @@ static int kern_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
714 | } | 716 | } |
715 | 717 | ||
716 | ret = core_sys_select(n, inp, outp, exp, to); | 718 | ret = core_sys_select(n, inp, outp, exp, to); |
717 | ret = poll_select_copy_remaining(&end_time, tvp, PT_TIMEVAL, ret); | 719 | return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret); |
718 | |||
719 | return ret; | ||
720 | } | 720 | } |
721 | 721 | ||
722 | SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, | 722 | SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, |
@@ -730,7 +730,6 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, | |||
730 | const sigset_t __user *sigmask, size_t sigsetsize, | 730 | const sigset_t __user *sigmask, size_t sigsetsize, |
731 | enum poll_time_type type) | 731 | enum poll_time_type type) |
732 | { | 732 | { |
733 | sigset_t ksigmask, sigsaved; | ||
734 | struct timespec64 ts, end_time, *to = NULL; | 733 | struct timespec64 ts, end_time, *to = NULL; |
735 | int ret; | 734 | int ret; |
736 | 735 | ||
@@ -753,15 +752,12 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, | |||
753 | return -EINVAL; | 752 | return -EINVAL; |
754 | } | 753 | } |
755 | 754 | ||
756 | ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 755 | ret = set_user_sigmask(sigmask, sigsetsize); |
757 | if (ret) | 756 | if (ret) |
758 | return ret; | 757 | return ret; |
759 | 758 | ||
760 | ret = core_sys_select(n, inp, outp, exp, to); | 759 | ret = core_sys_select(n, inp, outp, exp, to); |
761 | restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND); | 760 | return poll_select_finish(&end_time, tsp, type, ret); |
762 | ret = poll_select_copy_remaining(&end_time, tsp, type, ret); | ||
763 | |||
764 | return ret; | ||
765 | } | 761 | } |
766 | 762 | ||
767 | /* | 763 | /* |
@@ -926,7 +922,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, | |||
926 | if (!count) { | 922 | if (!count) { |
927 | count = wait->error; | 923 | count = wait->error; |
928 | if (signal_pending(current)) | 924 | if (signal_pending(current)) |
929 | count = -EINTR; | 925 | count = -ERESTARTNOHAND; |
930 | } | 926 | } |
931 | if (count || timed_out) | 927 | if (count || timed_out) |
932 | break; | 928 | break; |
@@ -965,7 +961,7 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, | |||
965 | struct timespec64 *end_time) | 961 | struct timespec64 *end_time) |
966 | { | 962 | { |
967 | struct poll_wqueues table; | 963 | struct poll_wqueues table; |
968 | int err = -EFAULT, fdcount, len, size; | 964 | int err = -EFAULT, fdcount, len; |
969 | /* Allocate small arguments on the stack to save memory and be | 965 | /* Allocate small arguments on the stack to save memory and be |
970 | faster - use long to make sure the buffer is aligned properly | 966 | faster - use long to make sure the buffer is aligned properly |
971 | on 64 bit archs to avoid unaligned access */ | 967 | on 64 bit archs to avoid unaligned access */ |
@@ -993,8 +989,8 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, | |||
993 | break; | 989 | break; |
994 | 990 | ||
995 | len = min(todo, POLLFD_PER_PAGE); | 991 | len = min(todo, POLLFD_PER_PAGE); |
996 | size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; | 992 | walk = walk->next = kmalloc(struct_size(walk, entries, len), |
997 | walk = walk->next = kmalloc(size, GFP_KERNEL); | 993 | GFP_KERNEL); |
998 | if (!walk) { | 994 | if (!walk) { |
999 | err = -ENOMEM; | 995 | err = -ENOMEM; |
1000 | goto out_fds; | 996 | goto out_fds; |
@@ -1041,7 +1037,7 @@ static long do_restart_poll(struct restart_block *restart_block) | |||
1041 | 1037 | ||
1042 | ret = do_sys_poll(ufds, nfds, to); | 1038 | ret = do_sys_poll(ufds, nfds, to); |
1043 | 1039 | ||
1044 | if (ret == -EINTR) { | 1040 | if (ret == -ERESTARTNOHAND) { |
1045 | restart_block->fn = do_restart_poll; | 1041 | restart_block->fn = do_restart_poll; |
1046 | ret = -ERESTART_RESTARTBLOCK; | 1042 | ret = -ERESTART_RESTARTBLOCK; |
1047 | } | 1043 | } |
@@ -1062,7 +1058,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, | |||
1062 | 1058 | ||
1063 | ret = do_sys_poll(ufds, nfds, to); | 1059 | ret = do_sys_poll(ufds, nfds, to); |
1064 | 1060 | ||
1065 | if (ret == -EINTR) { | 1061 | if (ret == -ERESTARTNOHAND) { |
1066 | struct restart_block *restart_block; | 1062 | struct restart_block *restart_block; |
1067 | 1063 | ||
1068 | restart_block = ¤t->restart_block; | 1064 | restart_block = ¤t->restart_block; |
@@ -1086,7 +1082,6 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, | |||
1086 | struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask, | 1082 | struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask, |
1087 | size_t, sigsetsize) | 1083 | size_t, sigsetsize) |
1088 | { | 1084 | { |
1089 | sigset_t ksigmask, sigsaved; | ||
1090 | struct timespec64 ts, end_time, *to = NULL; | 1085 | struct timespec64 ts, end_time, *to = NULL; |
1091 | int ret; | 1086 | int ret; |
1092 | 1087 | ||
@@ -1099,20 +1094,12 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, | |||
1099 | return -EINVAL; | 1094 | return -EINVAL; |
1100 | } | 1095 | } |
1101 | 1096 | ||
1102 | ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 1097 | ret = set_user_sigmask(sigmask, sigsetsize); |
1103 | if (ret) | 1098 | if (ret) |
1104 | return ret; | 1099 | return ret; |
1105 | 1100 | ||
1106 | ret = do_sys_poll(ufds, nfds, to); | 1101 | ret = do_sys_poll(ufds, nfds, to); |
1107 | 1102 | return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret); | |
1108 | restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); | ||
1109 | /* We can restart this syscall, usually */ | ||
1110 | if (ret == -EINTR) | ||
1111 | ret = -ERESTARTNOHAND; | ||
1112 | |||
1113 | ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret); | ||
1114 | |||
1115 | return ret; | ||
1116 | } | 1103 | } |
1117 | 1104 | ||
1118 | #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) | 1105 | #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) |
@@ -1121,7 +1108,6 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds, | |||
1121 | struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask, | 1108 | struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask, |
1122 | size_t, sigsetsize) | 1109 | size_t, sigsetsize) |
1123 | { | 1110 | { |
1124 | sigset_t ksigmask, sigsaved; | ||
1125 | struct timespec64 ts, end_time, *to = NULL; | 1111 | struct timespec64 ts, end_time, *to = NULL; |
1126 | int ret; | 1112 | int ret; |
1127 | 1113 | ||
@@ -1134,20 +1120,12 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds, | |||
1134 | return -EINVAL; | 1120 | return -EINVAL; |
1135 | } | 1121 | } |
1136 | 1122 | ||
1137 | ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 1123 | ret = set_user_sigmask(sigmask, sigsetsize); |
1138 | if (ret) | 1124 | if (ret) |
1139 | return ret; | 1125 | return ret; |
1140 | 1126 | ||
1141 | ret = do_sys_poll(ufds, nfds, to); | 1127 | ret = do_sys_poll(ufds, nfds, to); |
1142 | 1128 | return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret); | |
1143 | restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); | ||
1144 | /* We can restart this syscall, usually */ | ||
1145 | if (ret == -EINTR) | ||
1146 | ret = -ERESTARTNOHAND; | ||
1147 | |||
1148 | ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret); | ||
1149 | |||
1150 | return ret; | ||
1151 | } | 1129 | } |
1152 | #endif | 1130 | #endif |
1153 | 1131 | ||
@@ -1284,9 +1262,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp, | |||
1284 | } | 1262 | } |
1285 | 1263 | ||
1286 | ret = compat_core_sys_select(n, inp, outp, exp, to); | 1264 | ret = compat_core_sys_select(n, inp, outp, exp, to); |
1287 | ret = poll_select_copy_remaining(&end_time, tvp, PT_OLD_TIMEVAL, ret); | 1265 | return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret); |
1288 | |||
1289 | return ret; | ||
1290 | } | 1266 | } |
1291 | 1267 | ||
1292 | COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, | 1268 | COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, |
@@ -1319,7 +1295,6 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, | |||
1319 | void __user *tsp, compat_sigset_t __user *sigmask, | 1295 | void __user *tsp, compat_sigset_t __user *sigmask, |
1320 | compat_size_t sigsetsize, enum poll_time_type type) | 1296 | compat_size_t sigsetsize, enum poll_time_type type) |
1321 | { | 1297 | { |
1322 | sigset_t ksigmask, sigsaved; | ||
1323 | struct timespec64 ts, end_time, *to = NULL; | 1298 | struct timespec64 ts, end_time, *to = NULL; |
1324 | int ret; | 1299 | int ret; |
1325 | 1300 | ||
@@ -1342,15 +1317,12 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, | |||
1342 | return -EINVAL; | 1317 | return -EINVAL; |
1343 | } | 1318 | } |
1344 | 1319 | ||
1345 | ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 1320 | ret = set_compat_user_sigmask(sigmask, sigsetsize); |
1346 | if (ret) | 1321 | if (ret) |
1347 | return ret; | 1322 | return ret; |
1348 | 1323 | ||
1349 | ret = compat_core_sys_select(n, inp, outp, exp, to); | 1324 | ret = compat_core_sys_select(n, inp, outp, exp, to); |
1350 | restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND); | 1325 | return poll_select_finish(&end_time, tsp, type, ret); |
1351 | ret = poll_select_copy_remaining(&end_time, tsp, type, ret); | ||
1352 | |||
1353 | return ret; | ||
1354 | } | 1326 | } |
1355 | 1327 | ||
1356 | COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp, | 1328 | COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp, |
@@ -1402,7 +1374,6 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, | |||
1402 | unsigned int, nfds, struct old_timespec32 __user *, tsp, | 1374 | unsigned int, nfds, struct old_timespec32 __user *, tsp, |
1403 | const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) | 1375 | const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) |
1404 | { | 1376 | { |
1405 | sigset_t ksigmask, sigsaved; | ||
1406 | struct timespec64 ts, end_time, *to = NULL; | 1377 | struct timespec64 ts, end_time, *to = NULL; |
1407 | int ret; | 1378 | int ret; |
1408 | 1379 | ||
@@ -1415,20 +1386,12 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, | |||
1415 | return -EINVAL; | 1386 | return -EINVAL; |
1416 | } | 1387 | } |
1417 | 1388 | ||
1418 | ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 1389 | ret = set_compat_user_sigmask(sigmask, sigsetsize); |
1419 | if (ret) | 1390 | if (ret) |
1420 | return ret; | 1391 | return ret; |
1421 | 1392 | ||
1422 | ret = do_sys_poll(ufds, nfds, to); | 1393 | ret = do_sys_poll(ufds, nfds, to); |
1423 | 1394 | return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret); | |
1424 | restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); | ||
1425 | /* We can restart this syscall, usually */ | ||
1426 | if (ret == -EINTR) | ||
1427 | ret = -ERESTARTNOHAND; | ||
1428 | |||
1429 | ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret); | ||
1430 | |||
1431 | return ret; | ||
1432 | } | 1395 | } |
1433 | #endif | 1396 | #endif |
1434 | 1397 | ||
@@ -1437,7 +1400,6 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds, | |||
1437 | unsigned int, nfds, struct __kernel_timespec __user *, tsp, | 1400 | unsigned int, nfds, struct __kernel_timespec __user *, tsp, |
1438 | const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) | 1401 | const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) |
1439 | { | 1402 | { |
1440 | sigset_t ksigmask, sigsaved; | ||
1441 | struct timespec64 ts, end_time, *to = NULL; | 1403 | struct timespec64 ts, end_time, *to = NULL; |
1442 | int ret; | 1404 | int ret; |
1443 | 1405 | ||
@@ -1450,20 +1412,12 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds, | |||
1450 | return -EINVAL; | 1412 | return -EINVAL; |
1451 | } | 1413 | } |
1452 | 1414 | ||
1453 | ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize); | 1415 | ret = set_compat_user_sigmask(sigmask, sigsetsize); |
1454 | if (ret) | 1416 | if (ret) |
1455 | return ret; | 1417 | return ret; |
1456 | 1418 | ||
1457 | ret = do_sys_poll(ufds, nfds, to); | 1419 | ret = do_sys_poll(ufds, nfds, to); |
1458 | 1420 | return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret); | |
1459 | restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); | ||
1460 | /* We can restart this syscall, usually */ | ||
1461 | if (ret == -EINTR) | ||
1462 | ret = -ERESTARTNOHAND; | ||
1463 | |||
1464 | ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret); | ||
1465 | |||
1466 | return ret; | ||
1467 | } | 1421 | } |
1468 | 1422 | ||
1469 | #endif | 1423 | #endif |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 3d247c0d92aa..4ed0dca52ec8 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -1407,11 +1407,9 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1407 | struct super_block *sb = dentry->d_sb; | 1407 | struct super_block *sb = dentry->d_sb; |
1408 | struct ufs_sb_private_info *uspi= UFS_SB(sb)->s_uspi; | 1408 | struct ufs_sb_private_info *uspi= UFS_SB(sb)->s_uspi; |
1409 | unsigned flags = UFS_SB(sb)->s_flags; | 1409 | unsigned flags = UFS_SB(sb)->s_flags; |
1410 | struct ufs_super_block_third *usb3; | ||
1411 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | 1410 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); |
1412 | 1411 | ||
1413 | mutex_lock(&UFS_SB(sb)->s_lock); | 1412 | mutex_lock(&UFS_SB(sb)->s_lock); |
1414 | usb3 = ubh_get_usb_third(uspi); | ||
1415 | 1413 | ||
1416 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) | 1414 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
1417 | buf->f_type = UFS2_MAGIC; | 1415 | buf->f_type = UFS2_MAGIC; |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 0e9bd9c83870..aa6c093d9ce9 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
104 | warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) | 104 | warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) |
105 | #else | 105 | #else |
106 | extern __printf(1, 2) void __warn_printk(const char *fmt, ...); | 106 | extern __printf(1, 2) void __warn_printk(const char *fmt, ...); |
107 | #define __WARN() __WARN_TAINT(TAINT_WARN) | 107 | #define __WARN() do { \ |
108 | #define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0) | 108 | printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \ |
109 | } while (0) | ||
110 | #define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg) | ||
109 | #define __WARN_printf_taint(taint, arg...) \ | 111 | #define __WARN_printf_taint(taint, arg...) \ |
110 | do { __warn_printk(arg); __WARN_TAINT(taint); } while (0) | 112 | do { __warn_printk(arg); __WARN_TAINT(taint); } while (0) |
111 | #endif | 113 | #endif |
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 0dd47a6db2cf..a950a22c4890 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h | |||
@@ -5,24 +5,70 @@ | |||
5 | /* Keep includes the same across arches. */ | 5 | /* Keep includes the same across arches. */ |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | 7 | ||
8 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
9 | |||
8 | /* | 10 | /* |
9 | * The cache doesn't need to be flushed when TLB entries change when | 11 | * The cache doesn't need to be flushed when TLB entries change when |
10 | * the cache is mapped to physical memory, not virtual memory | 12 | * the cache is mapped to physical memory, not virtual memory |
11 | */ | 13 | */ |
12 | #define flush_cache_all() do { } while (0) | 14 | static inline void flush_cache_all(void) |
13 | #define flush_cache_mm(mm) do { } while (0) | 15 | { |
14 | #define flush_cache_dup_mm(mm) do { } while (0) | 16 | } |
15 | #define flush_cache_range(vma, start, end) do { } while (0) | 17 | |
16 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 18 | static inline void flush_cache_mm(struct mm_struct *mm) |
17 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | 19 | { |
18 | #define flush_dcache_page(page) do { } while (0) | 20 | } |
19 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 21 | |
20 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 22 | static inline void flush_cache_dup_mm(struct mm_struct *mm) |
21 | #define flush_icache_range(start, end) do { } while (0) | 23 | { |
22 | #define flush_icache_page(vma,pg) do { } while (0) | 24 | } |
23 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 25 | |
24 | #define flush_cache_vmap(start, end) do { } while (0) | 26 | static inline void flush_cache_range(struct vm_area_struct *vma, |
25 | #define flush_cache_vunmap(start, end) do { } while (0) | 27 | unsigned long start, |
28 | unsigned long end) | ||
29 | { | ||
30 | } | ||
31 | |||
32 | static inline void flush_cache_page(struct vm_area_struct *vma, | ||
33 | unsigned long vmaddr, | ||
34 | unsigned long pfn) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void flush_dcache_page(struct page *page) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | static inline void flush_icache_range(unsigned long start, unsigned long end) | ||
51 | { | ||
52 | } | ||
53 | |||
54 | static inline void flush_icache_page(struct vm_area_struct *vma, | ||
55 | struct page *page) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | static inline void flush_icache_user_range(struct vm_area_struct *vma, | ||
60 | struct page *page, | ||
61 | unsigned long addr, int len) | ||
62 | { | ||
63 | } | ||
64 | |||
65 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | ||
70 | { | ||
71 | } | ||
26 | 72 | ||
27 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 73 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
28 | do { \ | 74 | do { \ |
diff --git a/include/linux/bits.h b/include/linux/bits.h index 2b7b532c1d51..669d69441a62 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h | |||
@@ -1,13 +1,15 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_BITS_H | 2 | #ifndef __LINUX_BITS_H |
3 | #define __LINUX_BITS_H | 3 | #define __LINUX_BITS_H |
4 | |||
5 | #include <linux/const.h> | ||
4 | #include <asm/bitsperlong.h> | 6 | #include <asm/bitsperlong.h> |
5 | 7 | ||
6 | #define BIT(nr) (1UL << (nr)) | 8 | #define BIT(nr) (UL(1) << (nr)) |
7 | #define BIT_ULL(nr) (1ULL << (nr)) | 9 | #define BIT_ULL(nr) (ULL(1) << (nr)) |
8 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | 10 | #define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) |
9 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | 11 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
10 | #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) | 12 | #define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) |
11 | #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) | 13 | #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) |
12 | #define BITS_PER_BYTE 8 | 14 | #define BITS_PER_BYTE 8 |
13 | 15 | ||
@@ -17,10 +19,11 @@ | |||
17 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. | 19 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
18 | */ | 20 | */ |
19 | #define GENMASK(h, l) \ | 21 | #define GENMASK(h, l) \ |
20 | (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) | 22 | (((~UL(0)) - (UL(1) << (l)) + 1) & \ |
23 | (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) | ||
21 | 24 | ||
22 | #define GENMASK_ULL(h, l) \ | 25 | #define GENMASK_ULL(h, l) \ |
23 | (((~0ULL) - (1ULL << (l)) + 1) & \ | 26 | (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ |
24 | (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) | 27 | (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) |
25 | 28 | ||
26 | #endif /* __LINUX_BITS_H */ | 29 | #endif /* __LINUX_BITS_H */ |
diff --git a/include/linux/coda.h b/include/linux/coda.h index d30209b9cef8..0ca0c83fdb1c 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h | |||
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance. | |||
58 | #ifndef _CODA_HEADER_ | 58 | #ifndef _CODA_HEADER_ |
59 | #define _CODA_HEADER_ | 59 | #define _CODA_HEADER_ |
60 | 60 | ||
61 | #if defined(__linux__) | ||
62 | typedef unsigned long long u_quad_t; | 61 | typedef unsigned long long u_quad_t; |
63 | #endif | 62 | |
64 | #include <uapi/linux/coda.h> | 63 | #include <uapi/linux/coda.h> |
65 | #endif | 64 | #endif |
diff --git a/include/linux/compat.h b/include/linux/compat.h index ebddcb6cfcf8..16dafd9f4b86 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -138,8 +138,7 @@ typedef struct { | |||
138 | compat_sigset_word sig[_COMPAT_NSIG_WORDS]; | 138 | compat_sigset_word sig[_COMPAT_NSIG_WORDS]; |
139 | } compat_sigset_t; | 139 | } compat_sigset_t; |
140 | 140 | ||
141 | int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, | 141 | int set_compat_user_sigmask(const compat_sigset_t __user *umask, |
142 | sigset_t *set, sigset_t *oldset, | ||
143 | size_t sigsetsize); | 142 | size_t sigsetsize); |
144 | 143 | ||
145 | struct compat_sigaction { | 144 | struct compat_sigaction { |
diff --git a/include/linux/io.h b/include/linux/io.h index 9876e5801a9d..accac822336a 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, | |||
33 | 33 | ||
34 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | 34 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
35 | void __init ioremap_huge_init(void); | 35 | void __init ioremap_huge_init(void); |
36 | int arch_ioremap_p4d_supported(void); | ||
36 | int arch_ioremap_pud_supported(void); | 37 | int arch_ioremap_pud_supported(void); |
37 | int arch_ioremap_pmd_supported(void); | 38 | int arch_ioremap_pmd_supported(void); |
38 | #else | 39 | #else |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0c9bc231107f..4fa360a13c1e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -88,6 +88,8 @@ | |||
88 | */ | 88 | */ |
89 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 89 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
90 | 90 | ||
91 | #define typeof_member(T, m) typeof(((T*)0)->m) | ||
92 | |||
91 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP | 93 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP |
92 | 94 | ||
93 | #define DIV_ROUND_DOWN_ULL(ll, d) \ | 95 | #define DIV_ROUND_DOWN_ULL(ll, d) \ |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 443d9800ca3f..04bdaf01112c 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) | |||
458 | } | 458 | } |
459 | #endif | 459 | #endif |
460 | 460 | ||
461 | /* Returns true if kprobes handled the fault */ | ||
462 | static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, | ||
463 | unsigned int trap) | ||
464 | { | ||
465 | if (!kprobes_built_in()) | ||
466 | return false; | ||
467 | if (user_mode(regs)) | ||
468 | return false; | ||
469 | /* | ||
470 | * To be potentially processing a kprobe fault and to be allowed | ||
471 | * to call kprobe_running(), we have to be non-preemptible. | ||
472 | */ | ||
473 | if (preemptible()) | ||
474 | return false; | ||
475 | if (!kprobe_running()) | ||
476 | return false; | ||
477 | return kprobe_fault_handler(regs, trap); | ||
478 | } | ||
479 | |||
461 | #endif /* _LINUX_KPROBES_H */ | 480 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 394e3d9213b8..b16e15b9587a 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h | |||
@@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize); | |||
278 | * @compressedSize: is the precise full size of the compressed block | 278 | * @compressedSize: is the precise full size of the compressed block |
279 | * @maxDecompressedSize: is the size of 'dest' buffer | 279 | * @maxDecompressedSize: is the size of 'dest' buffer |
280 | * | 280 | * |
281 | * Decompresses data fom 'source' into 'dest'. | 281 | * Decompresses data from 'source' into 'dest'. |
282 | * If the source stream is detected malformed, the function will | 282 | * If the source stream is detected malformed, the function will |
283 | * stop decoding and return a negative result. | 283 | * stop decoding and return a negative result. |
284 | * This function is protected against buffer overflow exploits, | 284 | * This function is protected against buffer overflow exploits, |
@@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, | |||
522 | const char *dictionary, int dictSize); | 522 | const char *dictionary, int dictSize); |
523 | 523 | ||
524 | /** | 524 | /** |
525 | * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode | 525 | * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode |
526 | * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure | 526 | * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure |
527 | * @source: source address of the compressed data | 527 | * @source: source address of the compressed data |
528 | * @dest: output buffer address of the uncompressed data | 528 | * @dest: output buffer address of the uncompressed data |
@@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, | |||
530 | * @compressedSize: is the precise full size of the compressed block | 530 | * @compressedSize: is the precise full size of the compressed block |
531 | * @maxDecompressedSize: is the size of 'dest' buffer | 531 | * @maxDecompressedSize: is the size of 'dest' buffer |
532 | * | 532 | * |
533 | * These decoding function allows decompression of multiple blocks | 533 | * This decoding function allows decompression of multiple blocks |
534 | * in "streaming" mode. | 534 | * in "streaming" mode. |
535 | * Previously decoded blocks *must* remain available at the memory position | 535 | * Previously decoded blocks *must* remain available at the memory position |
536 | * where they were decoded (up to 64 KB) | 536 | * where they were decoded (up to 64 KB) |
@@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, | |||
569 | * which must be already allocated with 'originalSize' bytes | 569 | * which must be already allocated with 'originalSize' bytes |
570 | * @originalSize: is the original and therefore uncompressed size | 570 | * @originalSize: is the original and therefore uncompressed size |
571 | * | 571 | * |
572 | * These decoding function allows decompression of multiple blocks | 572 | * This decoding function allows decompression of multiple blocks |
573 | * in "streaming" mode. | 573 | * in "streaming" mode. |
574 | * Previously decoded blocks *must* remain available at the memory position | 574 | * Previously decoded blocks *must* remain available at the memory position |
575 | * where they were decoded (up to 64 KB) | 575 | * where they were decoded (up to 64 KB) |
@@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, | |||
610 | * @dictStart: pointer to the start of the dictionary in memory | 610 | * @dictStart: pointer to the start of the dictionary in memory |
611 | * @dictSize: size of dictionary | 611 | * @dictSize: size of dictionary |
612 | * | 612 | * |
613 | * These decoding function works the same as | 613 | * This decoding function works the same as |
614 | * a combination of LZ4_setStreamDecode() followed by | 614 | * a combination of LZ4_setStreamDecode() followed by |
615 | * LZ4_decompress_safe_continue() | 615 | * LZ4_decompress_safe_continue() |
616 | * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. | 616 | * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. |
617 | * | 617 | * |
618 | * Return: number of bytes decompressed into destination buffer | 618 | * Return: number of bytes decompressed into destination buffer |
619 | * (necessarily <= maxDecompressedSize) | 619 | * (necessarily <= maxDecompressedSize) |
@@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest, | |||
633 | * @dictStart: pointer to the start of the dictionary in memory | 633 | * @dictStart: pointer to the start of the dictionary in memory |
634 | * @dictSize: size of dictionary | 634 | * @dictSize: size of dictionary |
635 | * | 635 | * |
636 | * These decoding function works the same as | 636 | * This decoding function works the same as |
637 | * a combination of LZ4_setStreamDecode() followed by | 637 | * a combination of LZ4_setStreamDecode() followed by |
638 | * LZ4_decompress_safe_continue() | 638 | * LZ4_decompress_fast_continue() |
639 | * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. | 639 | * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. |
640 | * | 640 | * |
641 | * Return: number of bytes decompressed into destination buffer | 641 | * Return: number of bytes decompressed into destination buffer |
642 | * (necessarily <= maxDecompressedSize) | 642 | * (necessarily <= maxDecompressedSize) |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ae892eef8b82..988fde33cd7f 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -324,7 +324,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} | |||
324 | extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); | 324 | extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); |
325 | extern void try_offline_node(int nid); | 325 | extern void try_offline_node(int nid); |
326 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); | 326 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
327 | extern void remove_memory(int nid, u64 start, u64 size); | 327 | extern int remove_memory(int nid, u64 start, u64 size); |
328 | extern void __remove_memory(int nid, u64 start, u64 size); | 328 | extern void __remove_memory(int nid, u64 start, u64 size); |
329 | 329 | ||
330 | #else | 330 | #else |
@@ -341,7 +341,11 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | |||
341 | return -EINVAL; | 341 | return -EINVAL; |
342 | } | 342 | } |
343 | 343 | ||
344 | static inline void remove_memory(int nid, u64 start, u64 size) {} | 344 | static inline int remove_memory(int nid, u64 start, u64 size) |
345 | { | ||
346 | return -EBUSY; | ||
347 | } | ||
348 | |||
345 | static inline void __remove_memory(int nid, u64 start, u64 size) {} | 349 | static inline void __remove_memory(int nid, u64 start, u64 size) {} |
346 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 350 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
347 | 351 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0389c34ac529..bd6512559bed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -547,7 +547,7 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) | |||
547 | struct mmu_gather; | 547 | struct mmu_gather; |
548 | struct inode; | 548 | struct inode; |
549 | 549 | ||
550 | #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) | 550 | #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) |
551 | static inline int pmd_devmap(pmd_t pmd) | 551 | static inline int pmd_devmap(pmd_t pmd) |
552 | { | 552 | { |
553 | return 0; | 553 | return 0; |
@@ -956,41 +956,28 @@ static inline bool put_devmap_managed_page(struct page *page) | |||
956 | return false; | 956 | return false; |
957 | } | 957 | } |
958 | 958 | ||
959 | static inline bool is_device_private_page(const struct page *page) | ||
960 | { | ||
961 | return is_zone_device_page(page) && | ||
962 | page->pgmap->type == MEMORY_DEVICE_PRIVATE; | ||
963 | } | ||
964 | |||
965 | #ifdef CONFIG_PCI_P2PDMA | ||
966 | static inline bool is_pci_p2pdma_page(const struct page *page) | ||
967 | { | ||
968 | return is_zone_device_page(page) && | ||
969 | page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; | ||
970 | } | ||
971 | #else /* CONFIG_PCI_P2PDMA */ | ||
972 | static inline bool is_pci_p2pdma_page(const struct page *page) | ||
973 | { | ||
974 | return false; | ||
975 | } | ||
976 | #endif /* CONFIG_PCI_P2PDMA */ | ||
977 | |||
978 | #else /* CONFIG_DEV_PAGEMAP_OPS */ | 959 | #else /* CONFIG_DEV_PAGEMAP_OPS */ |
979 | static inline bool put_devmap_managed_page(struct page *page) | 960 | static inline bool put_devmap_managed_page(struct page *page) |
980 | { | 961 | { |
981 | return false; | 962 | return false; |
982 | } | 963 | } |
964 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ | ||
983 | 965 | ||
984 | static inline bool is_device_private_page(const struct page *page) | 966 | static inline bool is_device_private_page(const struct page *page) |
985 | { | 967 | { |
986 | return false; | 968 | return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && |
969 | IS_ENABLED(CONFIG_DEVICE_PRIVATE) && | ||
970 | is_zone_device_page(page) && | ||
971 | page->pgmap->type == MEMORY_DEVICE_PRIVATE; | ||
987 | } | 972 | } |
988 | 973 | ||
989 | static inline bool is_pci_p2pdma_page(const struct page *page) | 974 | static inline bool is_pci_p2pdma_page(const struct page *page) |
990 | { | 975 | { |
991 | return false; | 976 | return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && |
977 | IS_ENABLED(CONFIG_PCI_P2PDMA) && | ||
978 | is_zone_device_page(page) && | ||
979 | page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; | ||
992 | } | 980 | } |
993 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ | ||
994 | 981 | ||
995 | /* 127: arbitrary random number, small enough to assemble well */ | 982 | /* 127: arbitrary random number, small enough to assemble well */ |
996 | #define page_ref_zero_or_close_to_overflow(page) \ | 983 | #define page_ref_zero_or_close_to_overflow(page) \ |
@@ -1556,6 +1543,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | |||
1556 | int get_user_pages_fast(unsigned long start, int nr_pages, | 1543 | int get_user_pages_fast(unsigned long start, int nr_pages, |
1557 | unsigned int gup_flags, struct page **pages); | 1544 | unsigned int gup_flags, struct page **pages); |
1558 | 1545 | ||
1546 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); | ||
1547 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | ||
1548 | struct task_struct *task, bool bypass_rlim); | ||
1549 | |||
1559 | /* Container for pinned pfns / pages */ | 1550 | /* Container for pinned pfns / pages */ |
1560 | struct frame_vector { | 1551 | struct frame_vector { |
1561 | unsigned int nr_allocated; /* Number of frames we have space for */ | 1552 | unsigned int nr_allocated; /* Number of frames we have space for */ |
@@ -1763,7 +1754,7 @@ static inline void sync_mm_rss(struct mm_struct *mm) | |||
1763 | } | 1754 | } |
1764 | #endif | 1755 | #endif |
1765 | 1756 | ||
1766 | #ifndef __HAVE_ARCH_PTE_DEVMAP | 1757 | #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP |
1767 | static inline int pte_devmap(pte_t pte) | 1758 | static inline int pte_devmap(pte_t pte) |
1768 | { | 1759 | { |
1769 | return 0; | 1760 | return 0; |
@@ -2767,7 +2758,13 @@ extern int randomize_va_space; | |||
2767 | #endif | 2758 | #endif |
2768 | 2759 | ||
2769 | const char * arch_vma_name(struct vm_area_struct *vma); | 2760 | const char * arch_vma_name(struct vm_area_struct *vma); |
2761 | #ifdef CONFIG_MMU | ||
2770 | void print_vma_addr(char *prefix, unsigned long rip); | 2762 | void print_vma_addr(char *prefix, unsigned long rip); |
2763 | #else | ||
2764 | static inline void print_vma_addr(char *prefix, unsigned long rip) | ||
2765 | { | ||
2766 | } | ||
2767 | #endif | ||
2771 | 2768 | ||
2772 | void *sparse_buffer_alloc(unsigned long size); | 2769 | void *sparse_buffer_alloc(unsigned long size); |
2773 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, | 2770 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, |
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 01e8037023f7..2d9148221e9a 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h | |||
@@ -97,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) | |||
97 | #endif | 97 | #endif |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #ifdef __HAVE_ARCH_PTE_DEVMAP | 100 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP |
101 | static inline bool pfn_t_devmap(pfn_t pfn) | 101 | static inline bool pfn_t_devmap(pfn_t pfn) |
102 | { | 102 | { |
103 | const u64 flags = PFN_DEV|PFN_MAP; | 103 | const u64 flags = PFN_DEV|PFN_MAP; |
@@ -115,7 +115,7 @@ pmd_t pmd_mkdevmap(pmd_t pmd); | |||
115 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) | 115 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
116 | pud_t pud_mkdevmap(pud_t pud); | 116 | pud_t pud_mkdevmap(pud_t pud); |
117 | #endif | 117 | #endif |
118 | #endif /* __HAVE_ARCH_PTE_DEVMAP */ | 118 | #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ |
119 | 119 | ||
120 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL | 120 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
121 | static inline bool pfn_t_special(pfn_t pfn) | 121 | static inline bool pfn_t_special(pfn_t pfn) |
diff --git a/include/linux/pid.h b/include/linux/pid.h index 1484db6ca8d1..2a83e434db9d 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/rculist.h> | 5 | #include <linux/rculist.h> |
6 | #include <linux/wait.h> | 6 | #include <linux/wait.h> |
7 | #include <linux/refcount.h> | ||
7 | 8 | ||
8 | enum pid_type | 9 | enum pid_type |
9 | { | 10 | { |
@@ -57,7 +58,7 @@ struct upid { | |||
57 | 58 | ||
58 | struct pid | 59 | struct pid |
59 | { | 60 | { |
60 | atomic_t count; | 61 | refcount_t count; |
61 | unsigned int level; | 62 | unsigned int level; |
62 | /* lists of tasks that use this pid */ | 63 | /* lists of tasks that use this pid */ |
63 | struct hlist_head tasks[PIDTYPE_MAX]; | 64 | struct hlist_head tasks[PIDTYPE_MAX]; |
@@ -74,7 +75,7 @@ extern const struct file_operations pidfd_fops; | |||
74 | static inline struct pid *get_pid(struct pid *pid) | 75 | static inline struct pid *get_pid(struct pid *pid) |
75 | { | 76 | { |
76 | if (pid) | 77 | if (pid) |
77 | atomic_inc(&pid->count); | 78 | refcount_inc(&pid->count); |
78 | return pid; | 79 | return pid; |
79 | } | 80 | } |
80 | 81 | ||
diff --git a/include/linux/poison.h b/include/linux/poison.h index d6d980a681c7..df34330b4e34 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
@@ -21,7 +21,7 @@ | |||
21 | * non-initialized list entries. | 21 | * non-initialized list entries. |
22 | */ | 22 | */ |
23 | #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) | 23 | #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) |
24 | #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) | 24 | #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) |
25 | 25 | ||
26 | /********** include/linux/timer.h **********/ | 26 | /********** include/linux/timer.h **********/ |
27 | /* | 27 | /* |
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e6337fce08f2..1fd61a9af45c 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
@@ -32,25 +32,9 @@ struct rb_root { | |||
32 | struct rb_node *rb_node; | 32 | struct rb_node *rb_node; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /* | ||
36 | * Leftmost-cached rbtrees. | ||
37 | * | ||
38 | * We do not cache the rightmost node based on footprint | ||
39 | * size vs number of potential users that could benefit | ||
40 | * from O(1) rb_last(). Just not worth it, users that want | ||
41 | * this feature can always implement the logic explicitly. | ||
42 | * Furthermore, users that want to cache both pointers may | ||
43 | * find it a bit asymmetric, but that's ok. | ||
44 | */ | ||
45 | struct rb_root_cached { | ||
46 | struct rb_root rb_root; | ||
47 | struct rb_node *rb_leftmost; | ||
48 | }; | ||
49 | |||
50 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) | 35 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
51 | 36 | ||
52 | #define RB_ROOT (struct rb_root) { NULL, } | 37 | #define RB_ROOT (struct rb_root) { NULL, } |
53 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } | ||
54 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) | 38 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
55 | 39 | ||
56 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) | 40 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
@@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); | |||
72 | extern struct rb_node *rb_first(const struct rb_root *); | 56 | extern struct rb_node *rb_first(const struct rb_root *); |
73 | extern struct rb_node *rb_last(const struct rb_root *); | 57 | extern struct rb_node *rb_last(const struct rb_root *); |
74 | 58 | ||
75 | extern void rb_insert_color_cached(struct rb_node *, | ||
76 | struct rb_root_cached *, bool); | ||
77 | extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); | ||
78 | /* Same as rb_first(), but O(1) */ | ||
79 | #define rb_first_cached(root) (root)->rb_leftmost | ||
80 | |||
81 | /* Postorder iteration - always visit the parent after its children */ | 59 | /* Postorder iteration - always visit the parent after its children */ |
82 | extern struct rb_node *rb_first_postorder(const struct rb_root *); | 60 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
83 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | 61 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
@@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
87 | struct rb_root *root); | 65 | struct rb_root *root); |
88 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 66 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
89 | struct rb_root *root); | 67 | struct rb_root *root); |
90 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
91 | struct rb_root_cached *root); | ||
92 | 68 | ||
93 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, | 69 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
94 | struct rb_node **rb_link) | 70 | struct rb_node **rb_link) |
@@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent | |||
136 | typeof(*pos), field); 1; }); \ | 112 | typeof(*pos), field); 1; }); \ |
137 | pos = n) | 113 | pos = n) |
138 | 114 | ||
115 | /* | ||
116 | * Leftmost-cached rbtrees. | ||
117 | * | ||
118 | * We do not cache the rightmost node based on footprint | ||
119 | * size vs number of potential users that could benefit | ||
120 | * from O(1) rb_last(). Just not worth it, users that want | ||
121 | * this feature can always implement the logic explicitly. | ||
122 | * Furthermore, users that want to cache both pointers may | ||
123 | * find it a bit asymmetric, but that's ok. | ||
124 | */ | ||
125 | struct rb_root_cached { | ||
126 | struct rb_root rb_root; | ||
127 | struct rb_node *rb_leftmost; | ||
128 | }; | ||
129 | |||
130 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } | ||
131 | |||
132 | /* Same as rb_first(), but O(1) */ | ||
133 | #define rb_first_cached(root) (root)->rb_leftmost | ||
134 | |||
135 | static inline void rb_insert_color_cached(struct rb_node *node, | ||
136 | struct rb_root_cached *root, | ||
137 | bool leftmost) | ||
138 | { | ||
139 | if (leftmost) | ||
140 | root->rb_leftmost = node; | ||
141 | rb_insert_color(node, &root->rb_root); | ||
142 | } | ||
143 | |||
144 | static inline void rb_erase_cached(struct rb_node *node, | ||
145 | struct rb_root_cached *root) | ||
146 | { | ||
147 | if (root->rb_leftmost == node) | ||
148 | root->rb_leftmost = rb_next(node); | ||
149 | rb_erase(node, &root->rb_root); | ||
150 | } | ||
151 | |||
152 | static inline void rb_replace_node_cached(struct rb_node *victim, | ||
153 | struct rb_node *new, | ||
154 | struct rb_root_cached *root) | ||
155 | { | ||
156 | if (root->rb_leftmost == victim) | ||
157 | root->rb_leftmost = new; | ||
158 | rb_replace_node(victim, new, &root->rb_root); | ||
159 | } | ||
160 | |||
139 | #endif /* _LINUX_RBTREE_H */ | 161 | #endif /* _LINUX_RBTREE_H */ |
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 0f902ccb48b0..179faab29f52 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h | |||
@@ -30,10 +30,9 @@ struct rb_augment_callbacks { | |||
30 | void (*rotate)(struct rb_node *old, struct rb_node *new); | 30 | void (*rotate)(struct rb_node *old, struct rb_node *new); |
31 | }; | 31 | }; |
32 | 32 | ||
33 | extern void __rb_insert_augmented(struct rb_node *node, | 33 | extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
34 | struct rb_root *root, | ||
35 | bool newleft, struct rb_node **leftmost, | ||
36 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); | 34 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); |
35 | |||
37 | /* | 36 | /* |
38 | * Fixup the rbtree and update the augmented information when rebalancing. | 37 | * Fixup the rbtree and update the augmented information when rebalancing. |
39 | * | 38 | * |
@@ -48,7 +47,7 @@ static inline void | |||
48 | rb_insert_augmented(struct rb_node *node, struct rb_root *root, | 47 | rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
49 | const struct rb_augment_callbacks *augment) | 48 | const struct rb_augment_callbacks *augment) |
50 | { | 49 | { |
51 | __rb_insert_augmented(node, root, false, NULL, augment->rotate); | 50 | __rb_insert_augmented(node, root, augment->rotate); |
52 | } | 51 | } |
53 | 52 | ||
54 | static inline void | 53 | static inline void |
@@ -56,8 +55,9 @@ rb_insert_augmented_cached(struct rb_node *node, | |||
56 | struct rb_root_cached *root, bool newleft, | 55 | struct rb_root_cached *root, bool newleft, |
57 | const struct rb_augment_callbacks *augment) | 56 | const struct rb_augment_callbacks *augment) |
58 | { | 57 | { |
59 | __rb_insert_augmented(node, &root->rb_root, | 58 | if (newleft) |
60 | newleft, &root->rb_leftmost, augment->rotate); | 59 | root->rb_leftmost = node; |
60 | rb_insert_augmented(node, &root->rb_root, augment); | ||
61 | } | 61 | } |
62 | 62 | ||
63 | #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ | 63 | #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ |
@@ -150,7 +150,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | |||
150 | 150 | ||
151 | static __always_inline struct rb_node * | 151 | static __always_inline struct rb_node * |
152 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | 152 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, |
153 | struct rb_node **leftmost, | ||
154 | const struct rb_augment_callbacks *augment) | 153 | const struct rb_augment_callbacks *augment) |
155 | { | 154 | { |
156 | struct rb_node *child = node->rb_right; | 155 | struct rb_node *child = node->rb_right; |
@@ -158,9 +157,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | |||
158 | struct rb_node *parent, *rebalance; | 157 | struct rb_node *parent, *rebalance; |
159 | unsigned long pc; | 158 | unsigned long pc; |
160 | 159 | ||
161 | if (leftmost && node == *leftmost) | ||
162 | *leftmost = rb_next(node); | ||
163 | |||
164 | if (!tmp) { | 160 | if (!tmp) { |
165 | /* | 161 | /* |
166 | * Case 1: node to erase has no more than 1 child (easy!) | 162 | * Case 1: node to erase has no more than 1 child (easy!) |
@@ -260,8 +256,7 @@ static __always_inline void | |||
260 | rb_erase_augmented(struct rb_node *node, struct rb_root *root, | 256 | rb_erase_augmented(struct rb_node *node, struct rb_root *root, |
261 | const struct rb_augment_callbacks *augment) | 257 | const struct rb_augment_callbacks *augment) |
262 | { | 258 | { |
263 | struct rb_node *rebalance = __rb_erase_augmented(node, root, | 259 | struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); |
264 | NULL, augment); | ||
265 | if (rebalance) | 260 | if (rebalance) |
266 | __rb_erase_color(rebalance, root, augment->rotate); | 261 | __rb_erase_color(rebalance, root, augment->rotate); |
267 | } | 262 | } |
@@ -270,11 +265,9 @@ static __always_inline void | |||
270 | rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, | 265 | rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, |
271 | const struct rb_augment_callbacks *augment) | 266 | const struct rb_augment_callbacks *augment) |
272 | { | 267 | { |
273 | struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, | 268 | if (root->rb_leftmost == node) |
274 | &root->rb_leftmost, | 269 | root->rb_leftmost = rb_next(node); |
275 | augment); | 270 | rb_erase_augmented(node, &root->rb_root, augment); |
276 | if (rebalance) | ||
277 | __rb_erase_color(rebalance, &root->rb_root, augment->rotate); | ||
278 | } | 271 | } |
279 | 272 | ||
280 | #endif /* _LINUX_RBTREE_AUGMENTED_H */ | 273 | #endif /* _LINUX_RBTREE_AUGMENTED_H */ |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 532458698bde..efd8ce7675ed 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
@@ -15,10 +15,10 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | struct sighand_struct { | 17 | struct sighand_struct { |
18 | refcount_t count; | ||
19 | struct k_sigaction action[_NSIG]; | ||
20 | spinlock_t siglock; | 18 | spinlock_t siglock; |
19 | refcount_t count; | ||
21 | wait_queue_head_t signalfd_wqh; | 20 | wait_queue_head_t signalfd_wqh; |
21 | struct k_sigaction action[_NSIG]; | ||
22 | }; | 22 | }; |
23 | 23 | ||
24 | /* | 24 | /* |
@@ -420,7 +420,6 @@ void task_join_group_stop(struct task_struct *task); | |||
420 | static inline void set_restore_sigmask(void) | 420 | static inline void set_restore_sigmask(void) |
421 | { | 421 | { |
422 | set_thread_flag(TIF_RESTORE_SIGMASK); | 422 | set_thread_flag(TIF_RESTORE_SIGMASK); |
423 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
424 | } | 423 | } |
425 | 424 | ||
426 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) | 425 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
@@ -451,7 +450,6 @@ static inline bool test_and_clear_restore_sigmask(void) | |||
451 | static inline void set_restore_sigmask(void) | 450 | static inline void set_restore_sigmask(void) |
452 | { | 451 | { |
453 | current->restore_sigmask = true; | 452 | current->restore_sigmask = true; |
454 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
455 | } | 453 | } |
456 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) | 454 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
457 | { | 455 | { |
@@ -484,6 +482,16 @@ static inline void restore_saved_sigmask(void) | |||
484 | __set_current_blocked(¤t->saved_sigmask); | 482 | __set_current_blocked(¤t->saved_sigmask); |
485 | } | 483 | } |
486 | 484 | ||
485 | extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); | ||
486 | |||
487 | static inline void restore_saved_sigmask_unless(bool interrupted) | ||
488 | { | ||
489 | if (interrupted) | ||
490 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
491 | else | ||
492 | restore_saved_sigmask(); | ||
493 | } | ||
494 | |||
487 | static inline sigset_t *sigmask_to_save(void) | 495 | static inline sigset_t *sigmask_to_save(void) |
488 | { | 496 | { |
489 | sigset_t *res = ¤t->blocked; | 497 | sigset_t *res = ¤t->blocked; |
diff --git a/include/linux/signal.h b/include/linux/signal.h index 78c2bb376954..b5d99482d3fe 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -273,10 +273,6 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info, | |||
273 | struct task_struct *p, enum pid_type type); | 273 | struct task_struct *p, enum pid_type type); |
274 | extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); | 274 | extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
275 | extern int sigprocmask(int, sigset_t *, sigset_t *); | 275 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
276 | extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, | ||
277 | sigset_t *oldset, size_t sigsetsize); | ||
278 | extern void restore_user_sigmask(const void __user *usigmask, | ||
279 | sigset_t *sigsaved, bool interrupted); | ||
280 | extern void set_current_blocked(sigset_t *); | 276 | extern void set_current_blocked(sigset_t *); |
281 | extern void __set_current_blocked(const sigset_t *); | 277 | extern void __set_current_blocked(const sigset_t *); |
282 | extern int show_unhandled_signals; | 278 | extern int show_unhandled_signals; |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 15bdb6fe71e5..877fd239b6ff 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/bug.h> | 6 | #include <linux/bug.h> |
7 | #include <linux/mm_types.h> | 7 | #include <linux/mm_types.h> |
8 | 8 | ||
9 | #ifdef CONFIG_MMU | ||
10 | |||
9 | /* | 11 | /* |
10 | * swapcache pages are stored in the swapper_space radix tree. We want to | 12 | * swapcache pages are stored in the swapper_space radix tree. We want to |
11 | * get good packing density in that tree, so the index should be dense in | 13 | * get good packing density in that tree, so the index should be dense in |
@@ -50,13 +52,11 @@ static inline pgoff_t swp_offset(swp_entry_t entry) | |||
50 | return entry.val & SWP_OFFSET_MASK; | 52 | return entry.val & SWP_OFFSET_MASK; |
51 | } | 53 | } |
52 | 54 | ||
53 | #ifdef CONFIG_MMU | ||
54 | /* check whether a pte points to a swap entry */ | 55 | /* check whether a pte points to a swap entry */ |
55 | static inline int is_swap_pte(pte_t pte) | 56 | static inline int is_swap_pte(pte_t pte) |
56 | { | 57 | { |
57 | return !pte_none(pte) && !pte_present(pte); | 58 | return !pte_none(pte) && !pte_present(pte); |
58 | } | 59 | } |
59 | #endif | ||
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Convert the arch-dependent pte representation of a swp_entry_t into an | 62 | * Convert the arch-dependent pte representation of a swp_entry_t into an |
@@ -360,4 +360,5 @@ static inline int non_swap_entry(swp_entry_t entry) | |||
360 | } | 360 | } |
361 | #endif | 361 | #endif |
362 | 362 | ||
363 | #endif /* CONFIG_MMU */ | ||
363 | #endif /* _LINUX_SWAPOPS_H */ | 364 | #endif /* _LINUX_SWAPOPS_H */ |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 8446573cc682..36fb3bbed6b2 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -54,13 +54,15 @@ struct linux_binprm; | |||
54 | /* | 54 | /* |
55 | * ptrace report for syscall entry and exit looks identical. | 55 | * ptrace report for syscall entry and exit looks identical. |
56 | */ | 56 | */ |
57 | static inline int ptrace_report_syscall(struct pt_regs *regs) | 57 | static inline int ptrace_report_syscall(struct pt_regs *regs, |
58 | unsigned long message) | ||
58 | { | 59 | { |
59 | int ptrace = current->ptrace; | 60 | int ptrace = current->ptrace; |
60 | 61 | ||
61 | if (!(ptrace & PT_PTRACED)) | 62 | if (!(ptrace & PT_PTRACED)) |
62 | return 0; | 63 | return 0; |
63 | 64 | ||
65 | current->ptrace_message = message; | ||
64 | ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | 66 | ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); |
65 | 67 | ||
66 | /* | 68 | /* |
@@ -73,6 +75,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) | |||
73 | current->exit_code = 0; | 75 | current->exit_code = 0; |
74 | } | 76 | } |
75 | 77 | ||
78 | current->ptrace_message = 0; | ||
76 | return fatal_signal_pending(current); | 79 | return fatal_signal_pending(current); |
77 | } | 80 | } |
78 | 81 | ||
@@ -98,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) | |||
98 | static inline __must_check int tracehook_report_syscall_entry( | 101 | static inline __must_check int tracehook_report_syscall_entry( |
99 | struct pt_regs *regs) | 102 | struct pt_regs *regs) |
100 | { | 103 | { |
101 | return ptrace_report_syscall(regs); | 104 | return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); |
102 | } | 105 | } |
103 | 106 | ||
104 | /** | 107 | /** |
@@ -123,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) | |||
123 | if (step) | 126 | if (step) |
124 | user_single_step_report(regs); | 127 | user_single_step_report(regs); |
125 | else | 128 | else |
126 | ptrace_report_syscall(regs); | 129 | ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); |
127 | } | 130 | } |
128 | 131 | ||
129 | /** | 132 | /** |
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index abd238d0f7a4..63b1f506ea67 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h | |||
@@ -19,15 +19,18 @@ | |||
19 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ | 19 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ |
20 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ | 20 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ |
21 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ | 21 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ |
22 | #ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED | ||
23 | # define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ | ||
24 | #else | ||
25 | # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ | ||
26 | #endif | ||
27 | 22 | ||
28 | /* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ | 23 | /* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */ |
24 | #define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */ | ||
25 | #define MAP_NONBLOCK 0x010000 /* do not block on IO */ | ||
26 | #define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */ | ||
27 | #define MAP_HUGETLB 0x040000 /* create a huge page mapping */ | ||
28 | #define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */ | ||
29 | #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ | 29 | #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ |
30 | 30 | ||
31 | #define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be | ||
32 | * uninitialized */ | ||
33 | |||
31 | /* | 34 | /* |
32 | * Flags for mlock | 35 | * Flags for mlock |
33 | */ | 36 | */ |
diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index 653687d9771b..57e8195d0b53 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h | |||
@@ -9,13 +9,11 @@ | |||
9 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | 9 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ |
10 | #define MAP_LOCKED 0x2000 /* pages are locked */ | 10 | #define MAP_LOCKED 0x2000 /* pages are locked */ |
11 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | 11 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ |
12 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
13 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
14 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | ||
15 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
16 | #define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ | ||
17 | 12 | ||
18 | /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ | 13 | /* |
14 | * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h | ||
15 | * for MAP_HUGETLB usage | ||
16 | */ | ||
19 | 17 | ||
20 | #define MCL_CURRENT 1 /* lock all current mappings */ | 18 | #define MCL_CURRENT 1 /* lock all current mappings */ |
21 | #define MCL_FUTURE 2 /* lock all future mappings */ | 19 | #define MCL_FUTURE 2 /* lock all future mappings */ |
diff --git a/include/uapi/linux/coda.h b/include/uapi/linux/coda.h index 695fade33c64..aa34c2dcae8d 100644 --- a/include/uapi/linux/coda.h +++ b/include/uapi/linux/coda.h | |||
@@ -86,10 +86,6 @@ typedef unsigned long long u_quad_t; | |||
86 | 86 | ||
87 | #define inline | 87 | #define inline |
88 | 88 | ||
89 | struct timespec { | ||
90 | long ts_sec; | ||
91 | long ts_nsec; | ||
92 | }; | ||
93 | #else /* DJGPP but not KERNEL */ | 89 | #else /* DJGPP but not KERNEL */ |
94 | #include <sys/time.h> | 90 | #include <sys/time.h> |
95 | typedef unsigned long long u_quad_t; | 91 | typedef unsigned long long u_quad_t; |
@@ -110,13 +106,6 @@ typedef unsigned long long u_quad_t; | |||
110 | #define cdev_t dev_t | 106 | #define cdev_t dev_t |
111 | #endif | 107 | #endif |
112 | 108 | ||
113 | #ifdef __CYGWIN32__ | ||
114 | struct timespec { | ||
115 | time_t tv_sec; /* seconds */ | ||
116 | long tv_nsec; /* nanoseconds */ | ||
117 | }; | ||
118 | #endif | ||
119 | |||
120 | #ifndef __BIT_TYPES_DEFINED__ | 109 | #ifndef __BIT_TYPES_DEFINED__ |
121 | #define __BIT_TYPES_DEFINED__ | 110 | #define __BIT_TYPES_DEFINED__ |
122 | typedef signed char int8_t; | 111 | typedef signed char int8_t; |
@@ -211,6 +200,11 @@ struct CodaFid { | |||
211 | */ | 200 | */ |
212 | enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD }; | 201 | enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD }; |
213 | 202 | ||
203 | struct coda_timespec { | ||
204 | int64_t tv_sec; /* seconds */ | ||
205 | long tv_nsec; /* nanoseconds */ | ||
206 | }; | ||
207 | |||
214 | struct coda_vattr { | 208 | struct coda_vattr { |
215 | long va_type; /* vnode type (for create) */ | 209 | long va_type; /* vnode type (for create) */ |
216 | u_short va_mode; /* files access mode and type */ | 210 | u_short va_mode; /* files access mode and type */ |
@@ -220,9 +214,9 @@ struct coda_vattr { | |||
220 | long va_fileid; /* file id */ | 214 | long va_fileid; /* file id */ |
221 | u_quad_t va_size; /* file size in bytes */ | 215 | u_quad_t va_size; /* file size in bytes */ |
222 | long va_blocksize; /* blocksize preferred for i/o */ | 216 | long va_blocksize; /* blocksize preferred for i/o */ |
223 | struct timespec va_atime; /* time of last access */ | 217 | struct coda_timespec va_atime; /* time of last access */ |
224 | struct timespec va_mtime; /* time of last modification */ | 218 | struct coda_timespec va_mtime; /* time of last modification */ |
225 | struct timespec va_ctime; /* time file changed */ | 219 | struct coda_timespec va_ctime; /* time file changed */ |
226 | u_long va_gen; /* generation number of file */ | 220 | u_long va_gen; /* generation number of file */ |
227 | u_long va_flags; /* flags defined for file */ | 221 | u_long va_flags; /* flags defined for file */ |
228 | cdev_t va_rdev; /* device special file represents */ | 222 | cdev_t va_rdev; /* device special file represents */ |
@@ -277,7 +271,8 @@ struct coda_statfs { | |||
277 | #define CODA_STATFS 34 | 271 | #define CODA_STATFS 34 |
278 | #define CODA_STORE 35 | 272 | #define CODA_STORE 35 |
279 | #define CODA_RELEASE 36 | 273 | #define CODA_RELEASE 36 |
280 | #define CODA_NCALLS 37 | 274 | #define CODA_ACCESS_INTENT 37 |
275 | #define CODA_NCALLS 38 | ||
281 | 276 | ||
282 | #define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID) | 277 | #define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID) |
283 | 278 | ||
@@ -287,7 +282,12 @@ struct coda_statfs { | |||
287 | 282 | ||
288 | #define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) | 283 | #define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) |
289 | 284 | ||
290 | #define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ | 285 | // CODA_KERNEL_VERSION 0 /* don't care about kernel version number */ |
286 | // CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */ | ||
287 | // CODA_KERNEL_VERSION 2 /* venus_lookup gets an extra parameter */ | ||
288 | // CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ | ||
289 | // CODA_KERNEL_VERSION 4 /* 64-bit timespec */ | ||
290 | #define CODA_KERNEL_VERSION 5 /* access intent support */ | ||
291 | 291 | ||
292 | /* | 292 | /* |
293 | * Venus <-> Coda RPC arguments | 293 | * Venus <-> Coda RPC arguments |
@@ -295,8 +295,8 @@ struct coda_statfs { | |||
295 | struct coda_in_hdr { | 295 | struct coda_in_hdr { |
296 | u_int32_t opcode; | 296 | u_int32_t opcode; |
297 | u_int32_t unique; /* Keep multiple outstanding msgs distinct */ | 297 | u_int32_t unique; /* Keep multiple outstanding msgs distinct */ |
298 | pid_t pid; | 298 | __kernel_pid_t pid; |
299 | pid_t pgid; | 299 | __kernel_pid_t pgid; |
300 | vuid_t uid; | 300 | vuid_t uid; |
301 | }; | 301 | }; |
302 | 302 | ||
@@ -642,6 +642,25 @@ struct coda_statfs_out { | |||
642 | struct coda_statfs stat; | 642 | struct coda_statfs stat; |
643 | }; | 643 | }; |
644 | 644 | ||
645 | #define CODA_ACCESS_TYPE_READ 1 | ||
646 | #define CODA_ACCESS_TYPE_WRITE 2 | ||
647 | #define CODA_ACCESS_TYPE_MMAP 3 | ||
648 | #define CODA_ACCESS_TYPE_READ_FINISH 4 | ||
649 | #define CODA_ACCESS_TYPE_WRITE_FINISH 5 | ||
650 | |||
651 | /* coda_access_intent: NO_OUT */ | ||
652 | struct coda_access_intent_in { | ||
653 | struct coda_in_hdr ih; | ||
654 | struct CodaFid VFid; | ||
655 | int count; | ||
656 | int pos; | ||
657 | int type; | ||
658 | }; | ||
659 | |||
660 | struct coda_access_intent_out { | ||
661 | struct coda_out_hdr out; | ||
662 | }; | ||
663 | |||
645 | /* | 664 | /* |
646 | * Occasionally, we don't cache the fid returned by CODA_LOOKUP. | 665 | * Occasionally, we don't cache the fid returned by CODA_LOOKUP. |
647 | * For instance, if the fid is inconsistent. | 666 | * For instance, if the fid is inconsistent. |
@@ -673,6 +692,7 @@ union inputArgs { | |||
673 | struct coda_open_by_fd_in coda_open_by_fd; | 692 | struct coda_open_by_fd_in coda_open_by_fd; |
674 | struct coda_open_by_path_in coda_open_by_path; | 693 | struct coda_open_by_path_in coda_open_by_path; |
675 | struct coda_statfs_in coda_statfs; | 694 | struct coda_statfs_in coda_statfs; |
695 | struct coda_access_intent_in coda_access_intent; | ||
676 | }; | 696 | }; |
677 | 697 | ||
678 | union outputArgs { | 698 | union outputArgs { |
diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h deleted file mode 100644 index aa6623efd2dd..000000000000 --- a/include/uapi/linux/coda_psdev.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
2 | #ifndef _UAPI__CODA_PSDEV_H | ||
3 | #define _UAPI__CODA_PSDEV_H | ||
4 | |||
5 | #include <linux/magic.h> | ||
6 | |||
7 | #define CODA_PSDEV_MAJOR 67 | ||
8 | #define MAX_CODADEVS 5 /* how many do we allow */ | ||
9 | |||
10 | |||
11 | /* messages between coda filesystem in kernel and Venus */ | ||
12 | struct upc_req { | ||
13 | struct list_head uc_chain; | ||
14 | caddr_t uc_data; | ||
15 | u_short uc_flags; | ||
16 | u_short uc_inSize; /* Size is at most 5000 bytes */ | ||
17 | u_short uc_outSize; | ||
18 | u_short uc_opcode; /* copied from data to save lookup */ | ||
19 | int uc_unique; | ||
20 | wait_queue_head_t uc_sleep; /* process' wait queue */ | ||
21 | }; | ||
22 | |||
23 | #define CODA_REQ_ASYNC 0x1 | ||
24 | #define CODA_REQ_READ 0x2 | ||
25 | #define CODA_REQ_WRITE 0x4 | ||
26 | #define CODA_REQ_ABORT 0x8 | ||
27 | |||
28 | #endif /* _UAPI__CODA_PSDEV_H */ | ||
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index d5a1b8a492b9..a71b6e3b03eb 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h | |||
@@ -73,6 +73,41 @@ struct seccomp_metadata { | |||
73 | __u64 flags; /* Output: filter's flags */ | 73 | __u64 flags; /* Output: filter's flags */ |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define PTRACE_GET_SYSCALL_INFO 0x420e | ||
77 | #define PTRACE_SYSCALL_INFO_NONE 0 | ||
78 | #define PTRACE_SYSCALL_INFO_ENTRY 1 | ||
79 | #define PTRACE_SYSCALL_INFO_EXIT 2 | ||
80 | #define PTRACE_SYSCALL_INFO_SECCOMP 3 | ||
81 | |||
82 | struct ptrace_syscall_info { | ||
83 | __u8 op; /* PTRACE_SYSCALL_INFO_* */ | ||
84 | __u32 arch __attribute__((__aligned__(sizeof(__u32)))); | ||
85 | __u64 instruction_pointer; | ||
86 | __u64 stack_pointer; | ||
87 | union { | ||
88 | struct { | ||
89 | __u64 nr; | ||
90 | __u64 args[6]; | ||
91 | } entry; | ||
92 | struct { | ||
93 | __s64 rval; | ||
94 | __u8 is_error; | ||
95 | } exit; | ||
96 | struct { | ||
97 | __u64 nr; | ||
98 | __u64 args[6]; | ||
99 | __u32 ret_data; | ||
100 | } seccomp; | ||
101 | }; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * These values are stored in task->ptrace_message | ||
106 | * by tracehook_report_syscall_* to describe the current syscall-stop. | ||
107 | */ | ||
108 | #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 | ||
109 | #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 | ||
110 | |||
76 | /* Read signals from a shared (process wide) queue */ | 111 | /* Read signals from a shared (process wide) queue */ |
77 | #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) | 112 | #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) |
78 | 113 | ||
diff --git a/init/Kconfig b/init/Kconfig index 381cdfee6e0e..bd7d650d4a99 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1827,7 +1827,7 @@ config SLAB_FREELIST_HARDENED | |||
1827 | help | 1827 | help |
1828 | Many kernel heap attacks try to target slab cache metadata and | 1828 | Many kernel heap attacks try to target slab cache metadata and |
1829 | other infrastructure. This options makes minor performance | 1829 | other infrastructure. This options makes minor performance |
1830 | sacrifies to harden the kernel slab allocator against common | 1830 | sacrifices to harden the kernel slab allocator against common |
1831 | freelist exploit methods. | 1831 | freelist exploit methods. |
1832 | 1832 | ||
1833 | config SHUFFLE_PAGE_ALLOCATOR | 1833 | config SHUFFLE_PAGE_ALLOCATOR |
@@ -1859,7 +1859,7 @@ config SLUB_CPU_PARTIAL | |||
1859 | depends on SLUB && SMP | 1859 | depends on SLUB && SMP |
1860 | bool "SLUB per cpu partial cache" | 1860 | bool "SLUB per cpu partial cache" |
1861 | help | 1861 | help |
1862 | Per cpu partial caches accellerate objects allocation and freeing | 1862 | Per cpu partial caches accelerate objects allocation and freeing |
1863 | that is local to a processor at the price of more indeterminism | 1863 | that is local to a processor at the price of more indeterminism |
1864 | in the latency of the free. On overflow these caches will be cleared | 1864 | in the latency of the free. On overflow these caches will be cleared |
1865 | which requires the taking of locks that may cause latency spikes. | 1865 | which requires the taking of locks that may cause latency spikes. |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 216cad1ff0d0..65c351564ad0 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -438,7 +438,6 @@ static void mqueue_evict_inode(struct inode *inode) | |||
438 | { | 438 | { |
439 | struct mqueue_inode_info *info; | 439 | struct mqueue_inode_info *info; |
440 | struct user_struct *user; | 440 | struct user_struct *user; |
441 | unsigned long mq_bytes, mq_treesize; | ||
442 | struct ipc_namespace *ipc_ns; | 441 | struct ipc_namespace *ipc_ns; |
443 | struct msg_msg *msg, *nmsg; | 442 | struct msg_msg *msg, *nmsg; |
444 | LIST_HEAD(tmp_msg); | 443 | LIST_HEAD(tmp_msg); |
@@ -461,16 +460,18 @@ static void mqueue_evict_inode(struct inode *inode) | |||
461 | free_msg(msg); | 460 | free_msg(msg); |
462 | } | 461 | } |
463 | 462 | ||
464 | /* Total amount of bytes accounted for the mqueue */ | ||
465 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + | ||
466 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * | ||
467 | sizeof(struct posix_msg_tree_node); | ||
468 | |||
469 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * | ||
470 | info->attr.mq_msgsize); | ||
471 | |||
472 | user = info->user; | 463 | user = info->user; |
473 | if (user) { | 464 | if (user) { |
465 | unsigned long mq_bytes, mq_treesize; | ||
466 | |||
467 | /* Total amount of bytes accounted for the mqueue */ | ||
468 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + | ||
469 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * | ||
470 | sizeof(struct posix_msg_tree_node); | ||
471 | |||
472 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * | ||
473 | info->attr.mq_msgsize); | ||
474 | |||
474 | spin_lock(&mq_lock); | 475 | spin_lock(&mq_lock); |
475 | user->mq_bytes -= mq_bytes; | 476 | user->mq_bytes -= mq_bytes; |
476 | /* | 477 | /* |
diff --git a/kernel/pid.c b/kernel/pid.c index 16263b526560..0a9f2e437217 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -37,14 +37,14 @@ | |||
37 | #include <linux/init_task.h> | 37 | #include <linux/init_task.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/proc_ns.h> | 39 | #include <linux/proc_ns.h> |
40 | #include <linux/proc_fs.h> | 40 | #include <linux/refcount.h> |
41 | #include <linux/anon_inodes.h> | 41 | #include <linux/anon_inodes.h> |
42 | #include <linux/sched/signal.h> | 42 | #include <linux/sched/signal.h> |
43 | #include <linux/sched/task.h> | 43 | #include <linux/sched/task.h> |
44 | #include <linux/idr.h> | 44 | #include <linux/idr.h> |
45 | 45 | ||
46 | struct pid init_struct_pid = { | 46 | struct pid init_struct_pid = { |
47 | .count = ATOMIC_INIT(1), | 47 | .count = REFCOUNT_INIT(1), |
48 | .tasks = { | 48 | .tasks = { |
49 | { .first = NULL }, | 49 | { .first = NULL }, |
50 | { .first = NULL }, | 50 | { .first = NULL }, |
@@ -108,8 +108,7 @@ void put_pid(struct pid *pid) | |||
108 | return; | 108 | return; |
109 | 109 | ||
110 | ns = pid->numbers[pid->level].ns; | 110 | ns = pid->numbers[pid->level].ns; |
111 | if ((atomic_read(&pid->count) == 1) || | 111 | if (refcount_dec_and_test(&pid->count)) { |
112 | atomic_dec_and_test(&pid->count)) { | ||
113 | kmem_cache_free(ns->pid_cachep, pid); | 112 | kmem_cache_free(ns->pid_cachep, pid); |
114 | put_pid_ns(ns); | 113 | put_pid_ns(ns); |
115 | } | 114 | } |
@@ -212,7 +211,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
212 | } | 211 | } |
213 | 212 | ||
214 | get_pid_ns(ns); | 213 | get_pid_ns(ns); |
215 | atomic_set(&pid->count, 1); | 214 | refcount_set(&pid->count, 1); |
216 | for (type = 0; type < PIDTYPE_MAX; ++type) | 215 | for (type = 0; type < PIDTYPE_MAX; ++type) |
217 | INIT_HLIST_HEAD(&pid->tasks[type]); | 216 | INIT_HLIST_HEAD(&pid->tasks[type]); |
218 | 217 | ||
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 83a531cea2f3..cb9ddcc08119 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/compat.h> | 32 | #include <linux/compat.h> |
33 | #include <linux/sched/signal.h> | 33 | #include <linux/sched/signal.h> |
34 | 34 | ||
35 | #include <asm/syscall.h> /* for syscall_get_* */ | ||
36 | |||
35 | /* | 37 | /* |
36 | * Access another process' address space via ptrace. | 38 | * Access another process' address space via ptrace. |
37 | * Source/target buffer must be kernel space, | 39 | * Source/target buffer must be kernel space, |
@@ -897,7 +899,100 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type, | |||
897 | * to ensure no machine forgets it. | 899 | * to ensure no machine forgets it. |
898 | */ | 900 | */ |
899 | EXPORT_SYMBOL_GPL(task_user_regset_view); | 901 | EXPORT_SYMBOL_GPL(task_user_regset_view); |
900 | #endif | 902 | |
903 | static unsigned long | ||
904 | ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, | ||
905 | struct ptrace_syscall_info *info) | ||
906 | { | ||
907 | unsigned long args[ARRAY_SIZE(info->entry.args)]; | ||
908 | int i; | ||
909 | |||
910 | info->op = PTRACE_SYSCALL_INFO_ENTRY; | ||
911 | info->entry.nr = syscall_get_nr(child, regs); | ||
912 | syscall_get_arguments(child, regs, args); | ||
913 | for (i = 0; i < ARRAY_SIZE(args); i++) | ||
914 | info->entry.args[i] = args[i]; | ||
915 | |||
916 | /* args is the last field in struct ptrace_syscall_info.entry */ | ||
917 | return offsetofend(struct ptrace_syscall_info, entry.args); | ||
918 | } | ||
919 | |||
920 | static unsigned long | ||
921 | ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, | ||
922 | struct ptrace_syscall_info *info) | ||
923 | { | ||
924 | /* | ||
925 | * As struct ptrace_syscall_info.entry is currently a subset | ||
926 | * of struct ptrace_syscall_info.seccomp, it makes sense to | ||
927 | * initialize that subset using ptrace_get_syscall_info_entry(). | ||
928 | * This can be reconsidered in the future if these structures | ||
929 | * diverge significantly enough. | ||
930 | */ | ||
931 | ptrace_get_syscall_info_entry(child, regs, info); | ||
932 | info->op = PTRACE_SYSCALL_INFO_SECCOMP; | ||
933 | info->seccomp.ret_data = child->ptrace_message; | ||
934 | |||
935 | /* ret_data is the last field in struct ptrace_syscall_info.seccomp */ | ||
936 | return offsetofend(struct ptrace_syscall_info, seccomp.ret_data); | ||
937 | } | ||
938 | |||
939 | static unsigned long | ||
940 | ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, | ||
941 | struct ptrace_syscall_info *info) | ||
942 | { | ||
943 | info->op = PTRACE_SYSCALL_INFO_EXIT; | ||
944 | info->exit.rval = syscall_get_error(child, regs); | ||
945 | info->exit.is_error = !!info->exit.rval; | ||
946 | if (!info->exit.is_error) | ||
947 | info->exit.rval = syscall_get_return_value(child, regs); | ||
948 | |||
949 | /* is_error is the last field in struct ptrace_syscall_info.exit */ | ||
950 | return offsetofend(struct ptrace_syscall_info, exit.is_error); | ||
951 | } | ||
952 | |||
953 | static int | ||
954 | ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size, | ||
955 | void __user *datavp) | ||
956 | { | ||
957 | struct pt_regs *regs = task_pt_regs(child); | ||
958 | struct ptrace_syscall_info info = { | ||
959 | .op = PTRACE_SYSCALL_INFO_NONE, | ||
960 | .arch = syscall_get_arch(child), | ||
961 | .instruction_pointer = instruction_pointer(regs), | ||
962 | .stack_pointer = user_stack_pointer(regs), | ||
963 | }; | ||
964 | unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry); | ||
965 | unsigned long write_size; | ||
966 | |||
967 | /* | ||
968 | * This does not need lock_task_sighand() to access | ||
969 | * child->last_siginfo because ptrace_freeze_traced() | ||
970 | * called earlier by ptrace_check_attach() ensures that | ||
971 | * the tracee cannot go away and clear its last_siginfo. | ||
972 | */ | ||
973 | switch (child->last_siginfo ? child->last_siginfo->si_code : 0) { | ||
974 | case SIGTRAP | 0x80: | ||
975 | switch (child->ptrace_message) { | ||
976 | case PTRACE_EVENTMSG_SYSCALL_ENTRY: | ||
977 | actual_size = ptrace_get_syscall_info_entry(child, regs, | ||
978 | &info); | ||
979 | break; | ||
980 | case PTRACE_EVENTMSG_SYSCALL_EXIT: | ||
981 | actual_size = ptrace_get_syscall_info_exit(child, regs, | ||
982 | &info); | ||
983 | break; | ||
984 | } | ||
985 | break; | ||
986 | case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8): | ||
987 | actual_size = ptrace_get_syscall_info_seccomp(child, regs, | ||
988 | &info); | ||
989 | break; | ||
990 | } | ||
991 | |||
992 | write_size = min(actual_size, user_size); | ||
993 | return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size; | ||
994 | } | ||
995 | #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ | ||
901 | 996 | ||
902 | int ptrace_request(struct task_struct *child, long request, | 997 | int ptrace_request(struct task_struct *child, long request, |
903 | unsigned long addr, unsigned long data) | 998 | unsigned long addr, unsigned long data) |
@@ -1114,6 +1209,10 @@ int ptrace_request(struct task_struct *child, long request, | |||
1114 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | 1209 | ret = __put_user(kiov.iov_len, &uiov->iov_len); |
1115 | break; | 1210 | break; |
1116 | } | 1211 | } |
1212 | |||
1213 | case PTRACE_GET_SYSCALL_INFO: | ||
1214 | ret = ptrace_get_syscall_info(child, addr, datavp); | ||
1215 | break; | ||
1117 | #endif | 1216 | #endif |
1118 | 1217 | ||
1119 | case PTRACE_SECCOMP_GET_FILTER: | 1218 | case PTRACE_SECCOMP_GET_FILTER: |
diff --git a/kernel/signal.c b/kernel/signal.c index dabe100d2091..91b789dd6e72 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2951,80 +2951,49 @@ EXPORT_SYMBOL(sigprocmask); | |||
2951 | * | 2951 | * |
2952 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and | 2952 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and |
2953 | * epoll_pwait where a new sigmask is passed from userland for the syscalls. | 2953 | * epoll_pwait where a new sigmask is passed from userland for the syscalls. |
2954 | * | ||
2955 | * Note that it does set_restore_sigmask() in advance, so it must be always | ||
2956 | * paired with restore_saved_sigmask_unless() before return from syscall. | ||
2954 | */ | 2957 | */ |
2955 | int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, | 2958 | int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) |
2956 | sigset_t *oldset, size_t sigsetsize) | ||
2957 | { | 2959 | { |
2958 | if (!usigmask) | 2960 | sigset_t kmask; |
2959 | return 0; | ||
2960 | 2961 | ||
2962 | if (!umask) | ||
2963 | return 0; | ||
2961 | if (sigsetsize != sizeof(sigset_t)) | 2964 | if (sigsetsize != sizeof(sigset_t)) |
2962 | return -EINVAL; | 2965 | return -EINVAL; |
2963 | if (copy_from_user(set, usigmask, sizeof(sigset_t))) | 2966 | if (copy_from_user(&kmask, umask, sizeof(sigset_t))) |
2964 | return -EFAULT; | 2967 | return -EFAULT; |
2965 | 2968 | ||
2966 | *oldset = current->blocked; | 2969 | set_restore_sigmask(); |
2967 | set_current_blocked(set); | 2970 | current->saved_sigmask = current->blocked; |
2971 | set_current_blocked(&kmask); | ||
2968 | 2972 | ||
2969 | return 0; | 2973 | return 0; |
2970 | } | 2974 | } |
2971 | EXPORT_SYMBOL(set_user_sigmask); | ||
2972 | 2975 | ||
2973 | #ifdef CONFIG_COMPAT | 2976 | #ifdef CONFIG_COMPAT |
2974 | int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, | 2977 | int set_compat_user_sigmask(const compat_sigset_t __user *umask, |
2975 | sigset_t *set, sigset_t *oldset, | ||
2976 | size_t sigsetsize) | 2978 | size_t sigsetsize) |
2977 | { | 2979 | { |
2978 | if (!usigmask) | 2980 | sigset_t kmask; |
2979 | return 0; | ||
2980 | 2981 | ||
2982 | if (!umask) | ||
2983 | return 0; | ||
2981 | if (sigsetsize != sizeof(compat_sigset_t)) | 2984 | if (sigsetsize != sizeof(compat_sigset_t)) |
2982 | return -EINVAL; | 2985 | return -EINVAL; |
2983 | if (get_compat_sigset(set, usigmask)) | 2986 | if (get_compat_sigset(&kmask, umask)) |
2984 | return -EFAULT; | 2987 | return -EFAULT; |
2985 | 2988 | ||
2986 | *oldset = current->blocked; | 2989 | set_restore_sigmask(); |
2987 | set_current_blocked(set); | 2990 | current->saved_sigmask = current->blocked; |
2991 | set_current_blocked(&kmask); | ||
2988 | 2992 | ||
2989 | return 0; | 2993 | return 0; |
2990 | } | 2994 | } |
2991 | EXPORT_SYMBOL(set_compat_user_sigmask); | ||
2992 | #endif | 2995 | #endif |
2993 | 2996 | ||
2994 | /* | ||
2995 | * restore_user_sigmask: | ||
2996 | * usigmask: sigmask passed in from userland. | ||
2997 | * sigsaved: saved sigmask when the syscall started and changed the sigmask to | ||
2998 | * usigmask. | ||
2999 | * | ||
3000 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and | ||
3001 | * epoll_pwait where a new sigmask is passed in from userland for the syscalls. | ||
3002 | */ | ||
3003 | void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved, | ||
3004 | bool interrupted) | ||
3005 | { | ||
3006 | |||
3007 | if (!usigmask) | ||
3008 | return; | ||
3009 | /* | ||
3010 | * When signals are pending, do not restore them here. | ||
3011 | * Restoring sigmask here can lead to delivering signals that the above | ||
3012 | * syscalls are intended to block because of the sigmask passed in. | ||
3013 | */ | ||
3014 | if (interrupted) { | ||
3015 | current->saved_sigmask = *sigsaved; | ||
3016 | set_restore_sigmask(); | ||
3017 | return; | ||
3018 | } | ||
3019 | |||
3020 | /* | ||
3021 | * This is needed because the fast syscall return path does not restore | ||
3022 | * saved_sigmask when signals are not pending. | ||
3023 | */ | ||
3024 | set_current_blocked(sigsaved); | ||
3025 | } | ||
3026 | EXPORT_SYMBOL(restore_user_sigmask); | ||
3027 | |||
3028 | /** | 2997 | /** |
3029 | * sys_rt_sigprocmask - change the list of currently blocked signals | 2998 | * sys_rt_sigprocmask - change the list of currently blocked signals |
3030 | * @how: whether to add, remove, or set signals | 2999 | * @how: whether to add, remove, or set signals |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1c1ad1e14f21..43186ccfa139 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -188,17 +188,17 @@ extern int no_unaligned_warning; | |||
188 | * enum sysctl_writes_mode - supported sysctl write modes | 188 | * enum sysctl_writes_mode - supported sysctl write modes |
189 | * | 189 | * |
190 | * @SYSCTL_WRITES_LEGACY: each write syscall must fully contain the sysctl value | 190 | * @SYSCTL_WRITES_LEGACY: each write syscall must fully contain the sysctl value |
191 | * to be written, and multiple writes on the same sysctl file descriptor | 191 | * to be written, and multiple writes on the same sysctl file descriptor |
192 | * will rewrite the sysctl value, regardless of file position. No warning | 192 | * will rewrite the sysctl value, regardless of file position. No warning |
193 | * is issued when the initial position is not 0. | 193 | * is issued when the initial position is not 0. |
194 | * @SYSCTL_WRITES_WARN: same as above but warn when the initial file position is | 194 | * @SYSCTL_WRITES_WARN: same as above but warn when the initial file position is |
195 | * not 0. | 195 | * not 0. |
196 | * @SYSCTL_WRITES_STRICT: writes to numeric sysctl entries must always be at | 196 | * @SYSCTL_WRITES_STRICT: writes to numeric sysctl entries must always be at |
197 | * file position 0 and the value must be fully contained in the buffer | 197 | * file position 0 and the value must be fully contained in the buffer |
198 | * sent to the write syscall. If dealing with strings respect the file | 198 | * sent to the write syscall. If dealing with strings respect the file |
199 | * position, but restrict this to the max length of the buffer, anything | 199 | * position, but restrict this to the max length of the buffer, anything |
200 | * passed the max lenght will be ignored. Multiple writes will append | 200 | * passed the max length will be ignored. Multiple writes will append |
201 | * to the buffer. | 201 | * to the buffer. |
202 | * | 202 | * |
203 | * These write modes control how current file position affects the behavior of | 203 | * These write modes control how current file position affects the behavior of |
204 | * updating sysctl values through the proc interface on each write. | 204 | * updating sysctl values through the proc interface on each write. |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a858b55e8ac7..bc6673ab3a08 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -2076,6 +2076,14 @@ config TEST_STACKINIT | |||
2076 | 2076 | ||
2077 | If unsure, say N. | 2077 | If unsure, say N. |
2078 | 2078 | ||
2079 | config TEST_MEMINIT | ||
2080 | tristate "Test heap/page initialization" | ||
2081 | help | ||
2082 | Test if the kernel is zero-initializing heap and page allocations. | ||
2083 | This can be useful to test init_on_alloc and init_on_free features. | ||
2084 | |||
2085 | If unsure, say N. | ||
2086 | |||
2079 | endif # RUNTIME_TESTING_MENU | 2087 | endif # RUNTIME_TESTING_MENU |
2080 | 2088 | ||
2081 | config MEMTEST | 2089 | config MEMTEST |
diff --git a/lib/Makefile b/lib/Makefile index fdd56bc219b8..59067f51f3ab 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -92,6 +92,7 @@ obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o | |||
92 | obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o | 92 | obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o |
93 | obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o | 93 | obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o |
94 | obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o | 94 | obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o |
95 | obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o | ||
95 | 96 | ||
96 | obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ | 97 | obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ |
97 | 98 | ||
diff --git a/lib/ioremap.c b/lib/ioremap.c index 063213685563..0a2ffadc6d71 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -30,6 +30,8 @@ early_param("nohugeiomap", set_nohugeiomap); | |||
30 | void __init ioremap_huge_init(void) | 30 | void __init ioremap_huge_init(void) |
31 | { | 31 | { |
32 | if (!ioremap_huge_disabled) { | 32 | if (!ioremap_huge_disabled) { |
33 | if (arch_ioremap_p4d_supported()) | ||
34 | ioremap_p4d_capable = 1; | ||
33 | if (arch_ioremap_pud_supported()) | 35 | if (arch_ioremap_pud_supported()) |
34 | ioremap_pud_capable = 1; | 36 | ioremap_pud_capable = 1; |
35 | if (arch_ioremap_pmd_supported()) | 37 | if (arch_ioremap_pmd_supported()) |
@@ -86,6 +88,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, | |||
86 | if ((end - addr) != PMD_SIZE) | 88 | if ((end - addr) != PMD_SIZE) |
87 | return 0; | 89 | return 0; |
88 | 90 | ||
91 | if (!IS_ALIGNED(addr, PMD_SIZE)) | ||
92 | return 0; | ||
93 | |||
89 | if (!IS_ALIGNED(phys_addr, PMD_SIZE)) | 94 | if (!IS_ALIGNED(phys_addr, PMD_SIZE)) |
90 | return 0; | 95 | return 0; |
91 | 96 | ||
@@ -126,6 +131,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, | |||
126 | if ((end - addr) != PUD_SIZE) | 131 | if ((end - addr) != PUD_SIZE) |
127 | return 0; | 132 | return 0; |
128 | 133 | ||
134 | if (!IS_ALIGNED(addr, PUD_SIZE)) | ||
135 | return 0; | ||
136 | |||
129 | if (!IS_ALIGNED(phys_addr, PUD_SIZE)) | 137 | if (!IS_ALIGNED(phys_addr, PUD_SIZE)) |
130 | return 0; | 138 | return 0; |
131 | 139 | ||
@@ -166,6 +174,9 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, | |||
166 | if ((end - addr) != P4D_SIZE) | 174 | if ((end - addr) != P4D_SIZE) |
167 | return 0; | 175 | return 0; |
168 | 176 | ||
177 | if (!IS_ALIGNED(addr, P4D_SIZE)) | ||
178 | return 0; | ||
179 | |||
169 | if (!IS_ALIGNED(phys_addr, P4D_SIZE)) | 180 | if (!IS_ALIGNED(phys_addr, P4D_SIZE)) |
170 | return 0; | 181 | return 0; |
171 | 182 | ||
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 08c60d10747f..3bb6260d8f42 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h | |||
@@ -397,8 +397,8 @@ do { \ | |||
397 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | 397 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ |
398 | __asm__ ("addl %5,%1\n" \ | 398 | __asm__ ("addl %5,%1\n" \ |
399 | "adcl %3,%0" \ | 399 | "adcl %3,%0" \ |
400 | : "=r" ((USItype)(sh)), \ | 400 | : "=r" (sh), \ |
401 | "=&r" ((USItype)(sl)) \ | 401 | "=&r" (sl) \ |
402 | : "%0" ((USItype)(ah)), \ | 402 | : "%0" ((USItype)(ah)), \ |
403 | "g" ((USItype)(bh)), \ | 403 | "g" ((USItype)(bh)), \ |
404 | "%1" ((USItype)(al)), \ | 404 | "%1" ((USItype)(al)), \ |
@@ -406,22 +406,22 @@ do { \ | |||
406 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | 406 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ |
407 | __asm__ ("subl %5,%1\n" \ | 407 | __asm__ ("subl %5,%1\n" \ |
408 | "sbbl %3,%0" \ | 408 | "sbbl %3,%0" \ |
409 | : "=r" ((USItype)(sh)), \ | 409 | : "=r" (sh), \ |
410 | "=&r" ((USItype)(sl)) \ | 410 | "=&r" (sl) \ |
411 | : "0" ((USItype)(ah)), \ | 411 | : "0" ((USItype)(ah)), \ |
412 | "g" ((USItype)(bh)), \ | 412 | "g" ((USItype)(bh)), \ |
413 | "1" ((USItype)(al)), \ | 413 | "1" ((USItype)(al)), \ |
414 | "g" ((USItype)(bl))) | 414 | "g" ((USItype)(bl))) |
415 | #define umul_ppmm(w1, w0, u, v) \ | 415 | #define umul_ppmm(w1, w0, u, v) \ |
416 | __asm__ ("mull %3" \ | 416 | __asm__ ("mull %3" \ |
417 | : "=a" ((USItype)(w0)), \ | 417 | : "=a" (w0), \ |
418 | "=d" ((USItype)(w1)) \ | 418 | "=d" (w1) \ |
419 | : "%0" ((USItype)(u)), \ | 419 | : "%0" ((USItype)(u)), \ |
420 | "rm" ((USItype)(v))) | 420 | "rm" ((USItype)(v))) |
421 | #define udiv_qrnnd(q, r, n1, n0, d) \ | 421 | #define udiv_qrnnd(q, r, n1, n0, d) \ |
422 | __asm__ ("divl %4" \ | 422 | __asm__ ("divl %4" \ |
423 | : "=a" ((USItype)(q)), \ | 423 | : "=a" (q), \ |
424 | "=d" ((USItype)(r)) \ | 424 | "=d" (r) \ |
425 | : "0" ((USItype)(n0)), \ | 425 | : "0" ((USItype)(n0)), \ |
426 | "1" ((USItype)(n1)), \ | 426 | "1" ((USItype)(n1)), \ |
427 | "rm" ((USItype)(d))) | 427 | "rm" ((USItype)(d))) |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 1ef6e25d031c..abc86c6a3177 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, | |||
83 | 83 | ||
84 | static __always_inline void | 84 | static __always_inline void |
85 | __rb_insert(struct rb_node *node, struct rb_root *root, | 85 | __rb_insert(struct rb_node *node, struct rb_root *root, |
86 | bool newleft, struct rb_node **leftmost, | ||
87 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | 86 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) |
88 | { | 87 | { |
89 | struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; | 88 | struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; |
90 | 89 | ||
91 | if (newleft) | ||
92 | *leftmost = node; | ||
93 | |||
94 | while (true) { | 90 | while (true) { |
95 | /* | 91 | /* |
96 | * Loop invariant: node is red. | 92 | * Loop invariant: node is red. |
@@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = { | |||
437 | 433 | ||
438 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | 434 | void rb_insert_color(struct rb_node *node, struct rb_root *root) |
439 | { | 435 | { |
440 | __rb_insert(node, root, false, NULL, dummy_rotate); | 436 | __rb_insert(node, root, dummy_rotate); |
441 | } | 437 | } |
442 | EXPORT_SYMBOL(rb_insert_color); | 438 | EXPORT_SYMBOL(rb_insert_color); |
443 | 439 | ||
444 | void rb_erase(struct rb_node *node, struct rb_root *root) | 440 | void rb_erase(struct rb_node *node, struct rb_root *root) |
445 | { | 441 | { |
446 | struct rb_node *rebalance; | 442 | struct rb_node *rebalance; |
447 | rebalance = __rb_erase_augmented(node, root, | 443 | rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); |
448 | NULL, &dummy_callbacks); | ||
449 | if (rebalance) | 444 | if (rebalance) |
450 | ____rb_erase_color(rebalance, root, dummy_rotate); | 445 | ____rb_erase_color(rebalance, root, dummy_rotate); |
451 | } | 446 | } |
452 | EXPORT_SYMBOL(rb_erase); | 447 | EXPORT_SYMBOL(rb_erase); |
453 | 448 | ||
454 | void rb_insert_color_cached(struct rb_node *node, | ||
455 | struct rb_root_cached *root, bool leftmost) | ||
456 | { | ||
457 | __rb_insert(node, &root->rb_root, leftmost, | ||
458 | &root->rb_leftmost, dummy_rotate); | ||
459 | } | ||
460 | EXPORT_SYMBOL(rb_insert_color_cached); | ||
461 | |||
462 | void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) | ||
463 | { | ||
464 | struct rb_node *rebalance; | ||
465 | rebalance = __rb_erase_augmented(node, &root->rb_root, | ||
466 | &root->rb_leftmost, &dummy_callbacks); | ||
467 | if (rebalance) | ||
468 | ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate); | ||
469 | } | ||
470 | EXPORT_SYMBOL(rb_erase_cached); | ||
471 | |||
472 | /* | 449 | /* |
473 | * Augmented rbtree manipulation functions. | 450 | * Augmented rbtree manipulation functions. |
474 | * | 451 | * |
@@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached); | |||
477 | */ | 454 | */ |
478 | 455 | ||
479 | void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, | 456 | void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
480 | bool newleft, struct rb_node **leftmost, | ||
481 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | 457 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) |
482 | { | 458 | { |
483 | __rb_insert(node, root, newleft, leftmost, augment_rotate); | 459 | __rb_insert(node, root, augment_rotate); |
484 | } | 460 | } |
485 | EXPORT_SYMBOL(__rb_insert_augmented); | 461 | EXPORT_SYMBOL(__rb_insert_augmented); |
486 | 462 | ||
@@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
591 | } | 567 | } |
592 | EXPORT_SYMBOL(rb_replace_node); | 568 | EXPORT_SYMBOL(rb_replace_node); |
593 | 569 | ||
594 | void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
595 | struct rb_root_cached *root) | ||
596 | { | ||
597 | rb_replace_node(victim, new, &root->rb_root); | ||
598 | |||
599 | if (root->rb_leftmost == victim) | ||
600 | root->rb_leftmost = new; | ||
601 | } | ||
602 | EXPORT_SYMBOL(rb_replace_node_cached); | ||
603 | |||
604 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 570 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
605 | struct rb_root *root) | 571 | struct rb_root *root) |
606 | { | 572 | { |
diff --git a/lib/string.c b/lib/string.c index 6016eb3ac73d..461fb620f85f 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -400,6 +400,9 @@ EXPORT_SYMBOL(strncmp); | |||
400 | * strchr - Find the first occurrence of a character in a string | 400 | * strchr - Find the first occurrence of a character in a string |
401 | * @s: The string to be searched | 401 | * @s: The string to be searched |
402 | * @c: The character to search for | 402 | * @c: The character to search for |
403 | * | ||
404 | * Note that the %NUL-terminator is considered part of the string, and can | ||
405 | * be searched for. | ||
403 | */ | 406 | */ |
404 | char *strchr(const char *s, int c) | 407 | char *strchr(const char *s, int c) |
405 | { | 408 | { |
@@ -453,12 +456,18 @@ EXPORT_SYMBOL(strrchr); | |||
453 | * @s: The string to be searched | 456 | * @s: The string to be searched |
454 | * @count: The number of characters to be searched | 457 | * @count: The number of characters to be searched |
455 | * @c: The character to search for | 458 | * @c: The character to search for |
459 | * | ||
460 | * Note that the %NUL-terminator is considered part of the string, and can | ||
461 | * be searched for. | ||
456 | */ | 462 | */ |
457 | char *strnchr(const char *s, size_t count, int c) | 463 | char *strnchr(const char *s, size_t count, int c) |
458 | { | 464 | { |
459 | for (; count-- && *s != '\0'; ++s) | 465 | while (count--) { |
460 | if (*s == (char)c) | 466 | if (*s == (char)c) |
461 | return (char *)s; | 467 | return (char *)s; |
468 | if (*s++ == '\0') | ||
469 | break; | ||
470 | } | ||
462 | return NULL; | 471 | return NULL; |
463 | } | 472 | } |
464 | EXPORT_SYMBOL(strnchr); | 473 | EXPORT_SYMBOL(strnchr); |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 3a90a9e2b94a..963050c0283e 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
@@ -231,35 +231,36 @@ static bool unescape_special(char **src, char **dst) | |||
231 | * @src: source buffer (escaped) | 231 | * @src: source buffer (escaped) |
232 | * @dst: destination buffer (unescaped) | 232 | * @dst: destination buffer (unescaped) |
233 | * @size: size of the destination buffer (0 to unlimit) | 233 | * @size: size of the destination buffer (0 to unlimit) |
234 | * @flags: combination of the flags (bitwise OR): | 234 | * @flags: combination of the flags. |
235 | * %UNESCAPE_SPACE: | 235 | * |
236 | * Description: | ||
237 | * The function unquotes characters in the given string. | ||
238 | * | ||
239 | * Because the size of the output will be the same as or less than the size of | ||
240 | * the input, the transformation may be performed in place. | ||
241 | * | ||
242 | * Caller must provide valid source and destination pointers. Be aware that | ||
243 | * destination buffer will always be NULL-terminated. Source string must be | ||
244 | * NULL-terminated as well. The supported flags are:: | ||
245 | * | ||
246 | * UNESCAPE_SPACE: | ||
236 | * '\f' - form feed | 247 | * '\f' - form feed |
237 | * '\n' - new line | 248 | * '\n' - new line |
238 | * '\r' - carriage return | 249 | * '\r' - carriage return |
239 | * '\t' - horizontal tab | 250 | * '\t' - horizontal tab |
240 | * '\v' - vertical tab | 251 | * '\v' - vertical tab |
241 | * %UNESCAPE_OCTAL: | 252 | * UNESCAPE_OCTAL: |
242 | * '\NNN' - byte with octal value NNN (1 to 3 digits) | 253 | * '\NNN' - byte with octal value NNN (1 to 3 digits) |
243 | * %UNESCAPE_HEX: | 254 | * UNESCAPE_HEX: |
244 | * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) | 255 | * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) |
245 | * %UNESCAPE_SPECIAL: | 256 | * UNESCAPE_SPECIAL: |
246 | * '\"' - double quote | 257 | * '\"' - double quote |
247 | * '\\' - backslash | 258 | * '\\' - backslash |
248 | * '\a' - alert (BEL) | 259 | * '\a' - alert (BEL) |
249 | * '\e' - escape | 260 | * '\e' - escape |
250 | * %UNESCAPE_ANY: | 261 | * UNESCAPE_ANY: |
251 | * all previous together | 262 | * all previous together |
252 | * | 263 | * |
253 | * Description: | ||
254 | * The function unquotes characters in the given string. | ||
255 | * | ||
256 | * Because the size of the output will be the same as or less than the size of | ||
257 | * the input, the transformation may be performed in place. | ||
258 | * | ||
259 | * Caller must provide valid source and destination pointers. Be aware that | ||
260 | * destination buffer will always be NULL-terminated. Source string must be | ||
261 | * NULL-terminated as well. | ||
262 | * | ||
263 | * Return: | 264 | * Return: |
264 | * The amount of the characters processed to the destination buffer excluding | 265 | * The amount of the characters processed to the destination buffer excluding |
265 | * trailing '\0' is returned. | 266 | * trailing '\0' is returned. |
@@ -441,7 +442,29 @@ static bool escape_hex(unsigned char c, char **dst, char *end) | |||
441 | * @isz: source buffer size | 442 | * @isz: source buffer size |
442 | * @dst: destination buffer (escaped) | 443 | * @dst: destination buffer (escaped) |
443 | * @osz: destination buffer size | 444 | * @osz: destination buffer size |
444 | * @flags: combination of the flags (bitwise OR): | 445 | * @flags: combination of the flags |
446 | * @only: NULL-terminated string containing characters used to limit | ||
447 | * the selected escape class. If characters are included in @only | ||
448 | * that would not normally be escaped by the classes selected | ||
449 | * in @flags, they will be copied to @dst unescaped. | ||
450 | * | ||
451 | * Description: | ||
452 | * The process of escaping byte buffer includes several parts. They are applied | ||
453 | * in the following sequence. | ||
454 | * | ||
455 | * 1. The character is matched to the printable class, if asked, and in | ||
456 | * case of match it passes through to the output. | ||
457 | * 2. The character is not matched to the one from @only string and thus | ||
458 | * must go as-is to the output. | ||
459 | * 3. The character is checked if it falls into the class given by @flags. | ||
460 | * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any | ||
461 | * character. Note that they actually can't go together, otherwise | ||
462 | * %ESCAPE_HEX will be ignored. | ||
463 | * | ||
464 | * Caller must provide valid source and destination pointers. Be aware that | ||
465 | * destination buffer will not be NULL-terminated, thus caller have to append | ||
466 | * it if needs. The supported flags are:: | ||
467 | * | ||
445 | * %ESCAPE_SPACE: (special white space, not space itself) | 468 | * %ESCAPE_SPACE: (special white space, not space itself) |
446 | * '\f' - form feed | 469 | * '\f' - form feed |
447 | * '\n' - new line | 470 | * '\n' - new line |
@@ -464,26 +487,6 @@ static bool escape_hex(unsigned char c, char **dst, char *end) | |||
464 | * all previous together | 487 | * all previous together |
465 | * %ESCAPE_HEX: | 488 | * %ESCAPE_HEX: |
466 | * '\xHH' - byte with hexadecimal value HH (2 digits) | 489 | * '\xHH' - byte with hexadecimal value HH (2 digits) |
467 | * @only: NULL-terminated string containing characters used to limit | ||
468 | * the selected escape class. If characters are included in @only | ||
469 | * that would not normally be escaped by the classes selected | ||
470 | * in @flags, they will be copied to @dst unescaped. | ||
471 | * | ||
472 | * Description: | ||
473 | * The process of escaping byte buffer includes several parts. They are applied | ||
474 | * in the following sequence. | ||
475 | * 1. The character is matched to the printable class, if asked, and in | ||
476 | * case of match it passes through to the output. | ||
477 | * 2. The character is not matched to the one from @only string and thus | ||
478 | * must go as-is to the output. | ||
479 | * 3. The character is checked if it falls into the class given by @flags. | ||
480 | * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any | ||
481 | * character. Note that they actually can't go together, otherwise | ||
482 | * %ESCAPE_HEX will be ignored. | ||
483 | * | ||
484 | * Caller must provide valid source and destination pointers. Be aware that | ||
485 | * destination buffer will not be NULL-terminated, thus caller have to append | ||
486 | * it if needs. | ||
487 | * | 490 | * |
488 | * Return: | 491 | * Return: |
489 | * The total size of the escaped output that would be generated for | 492 | * The total size of the escaped output that would be generated for |
diff --git a/lib/test_meminit.c b/lib/test_meminit.c new file mode 100644 index 000000000000..62d19f270cad --- /dev/null +++ b/lib/test_meminit.c | |||
@@ -0,0 +1,364 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Test cases for SL[AOU]B/page initialization at alloc/free time. | ||
4 | */ | ||
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
6 | |||
7 | #include <linux/init.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/vmalloc.h> | ||
14 | |||
15 | #define GARBAGE_INT (0x09A7BA9E) | ||
16 | #define GARBAGE_BYTE (0x9E) | ||
17 | |||
18 | #define REPORT_FAILURES_IN_FN() \ | ||
19 | do { \ | ||
20 | if (failures) \ | ||
21 | pr_info("%s failed %d out of %d times\n", \ | ||
22 | __func__, failures, num_tests); \ | ||
23 | else \ | ||
24 | pr_info("all %d tests in %s passed\n", \ | ||
25 | num_tests, __func__); \ | ||
26 | } while (0) | ||
27 | |||
28 | /* Calculate the number of uninitialized bytes in the buffer. */ | ||
29 | static int __init count_nonzero_bytes(void *ptr, size_t size) | ||
30 | { | ||
31 | int i, ret = 0; | ||
32 | unsigned char *p = (unsigned char *)ptr; | ||
33 | |||
34 | for (i = 0; i < size; i++) | ||
35 | if (p[i]) | ||
36 | ret++; | ||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | /* Fill a buffer with garbage, skipping |skip| first bytes. */ | ||
41 | static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip) | ||
42 | { | ||
43 | unsigned int *p = (unsigned int *)((char *)ptr + skip); | ||
44 | int i = 0; | ||
45 | |||
46 | WARN_ON(skip > size); | ||
47 | size -= skip; | ||
48 | |||
49 | while (size >= sizeof(*p)) { | ||
50 | p[i] = GARBAGE_INT; | ||
51 | i++; | ||
52 | size -= sizeof(*p); | ||
53 | } | ||
54 | if (size) | ||
55 | memset(&p[i], GARBAGE_BYTE, size); | ||
56 | } | ||
57 | |||
58 | static void __init fill_with_garbage(void *ptr, size_t size) | ||
59 | { | ||
60 | fill_with_garbage_skip(ptr, size, 0); | ||
61 | } | ||
62 | |||
63 | static int __init do_alloc_pages_order(int order, int *total_failures) | ||
64 | { | ||
65 | struct page *page; | ||
66 | void *buf; | ||
67 | size_t size = PAGE_SIZE << order; | ||
68 | |||
69 | page = alloc_pages(GFP_KERNEL, order); | ||
70 | buf = page_address(page); | ||
71 | fill_with_garbage(buf, size); | ||
72 | __free_pages(page, order); | ||
73 | |||
74 | page = alloc_pages(GFP_KERNEL, order); | ||
75 | buf = page_address(page); | ||
76 | if (count_nonzero_bytes(buf, size)) | ||
77 | (*total_failures)++; | ||
78 | fill_with_garbage(buf, size); | ||
79 | __free_pages(page, order); | ||
80 | return 1; | ||
81 | } | ||
82 | |||
83 | /* Test the page allocator by calling alloc_pages with different orders. */ | ||
84 | static int __init test_pages(int *total_failures) | ||
85 | { | ||
86 | int failures = 0, num_tests = 0; | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; i < 10; i++) | ||
90 | num_tests += do_alloc_pages_order(i, &failures); | ||
91 | |||
92 | REPORT_FAILURES_IN_FN(); | ||
93 | *total_failures += failures; | ||
94 | return num_tests; | ||
95 | } | ||
96 | |||
97 | /* Test kmalloc() with given parameters. */ | ||
98 | static int __init do_kmalloc_size(size_t size, int *total_failures) | ||
99 | { | ||
100 | void *buf; | ||
101 | |||
102 | buf = kmalloc(size, GFP_KERNEL); | ||
103 | fill_with_garbage(buf, size); | ||
104 | kfree(buf); | ||
105 | |||
106 | buf = kmalloc(size, GFP_KERNEL); | ||
107 | if (count_nonzero_bytes(buf, size)) | ||
108 | (*total_failures)++; | ||
109 | fill_with_garbage(buf, size); | ||
110 | kfree(buf); | ||
111 | return 1; | ||
112 | } | ||
113 | |||
114 | /* Test vmalloc() with given parameters. */ | ||
115 | static int __init do_vmalloc_size(size_t size, int *total_failures) | ||
116 | { | ||
117 | void *buf; | ||
118 | |||
119 | buf = vmalloc(size); | ||
120 | fill_with_garbage(buf, size); | ||
121 | vfree(buf); | ||
122 | |||
123 | buf = vmalloc(size); | ||
124 | if (count_nonzero_bytes(buf, size)) | ||
125 | (*total_failures)++; | ||
126 | fill_with_garbage(buf, size); | ||
127 | vfree(buf); | ||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */ | ||
132 | static int __init test_kvmalloc(int *total_failures) | ||
133 | { | ||
134 | int failures = 0, num_tests = 0; | ||
135 | int i, size; | ||
136 | |||
137 | for (i = 0; i < 20; i++) { | ||
138 | size = 1 << i; | ||
139 | num_tests += do_kmalloc_size(size, &failures); | ||
140 | num_tests += do_vmalloc_size(size, &failures); | ||
141 | } | ||
142 | |||
143 | REPORT_FAILURES_IN_FN(); | ||
144 | *total_failures += failures; | ||
145 | return num_tests; | ||
146 | } | ||
147 | |||
148 | #define CTOR_BYTES (sizeof(unsigned int)) | ||
149 | #define CTOR_PATTERN (0x41414141) | ||
150 | /* Initialize the first 4 bytes of the object. */ | ||
151 | static void test_ctor(void *obj) | ||
152 | { | ||
153 | *(unsigned int *)obj = CTOR_PATTERN; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Check the invariants for the buffer allocated from a slab cache. | ||
158 | * If the cache has a test constructor, the first 4 bytes of the object must | ||
159 | * always remain equal to CTOR_PATTERN. | ||
160 | * If the cache isn't an RCU-typesafe one, or if the allocation is done with | ||
161 | * __GFP_ZERO, then the object contents must be zeroed after allocation. | ||
162 | * If the cache is an RCU-typesafe one, the object contents must never be | ||
163 | * zeroed after the first use. This is checked by memcmp() in | ||
164 | * do_kmem_cache_size(). | ||
165 | */ | ||
166 | static bool __init check_buf(void *buf, int size, bool want_ctor, | ||
167 | bool want_rcu, bool want_zero) | ||
168 | { | ||
169 | int bytes; | ||
170 | bool fail = false; | ||
171 | |||
172 | bytes = count_nonzero_bytes(buf, size); | ||
173 | WARN_ON(want_ctor && want_zero); | ||
174 | if (want_zero) | ||
175 | return bytes; | ||
176 | if (want_ctor) { | ||
177 | if (*(unsigned int *)buf != CTOR_PATTERN) | ||
178 | fail = 1; | ||
179 | } else { | ||
180 | if (bytes) | ||
181 | fail = !want_rcu; | ||
182 | } | ||
183 | return fail; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Test kmem_cache with given parameters: | ||
188 | * want_ctor - use a constructor; | ||
189 | * want_rcu - use SLAB_TYPESAFE_BY_RCU; | ||
190 | * want_zero - use __GFP_ZERO. | ||
191 | */ | ||
192 | static int __init do_kmem_cache_size(size_t size, bool want_ctor, | ||
193 | bool want_rcu, bool want_zero, | ||
194 | int *total_failures) | ||
195 | { | ||
196 | struct kmem_cache *c; | ||
197 | int iter; | ||
198 | bool fail = false; | ||
199 | gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0); | ||
200 | void *buf, *buf_copy; | ||
201 | |||
202 | c = kmem_cache_create("test_cache", size, 1, | ||
203 | want_rcu ? SLAB_TYPESAFE_BY_RCU : 0, | ||
204 | want_ctor ? test_ctor : NULL); | ||
205 | for (iter = 0; iter < 10; iter++) { | ||
206 | buf = kmem_cache_alloc(c, alloc_mask); | ||
207 | /* Check that buf is zeroed, if it must be. */ | ||
208 | fail = check_buf(buf, size, want_ctor, want_rcu, want_zero); | ||
209 | fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0); | ||
210 | |||
211 | if (!want_rcu) { | ||
212 | kmem_cache_free(c, buf); | ||
213 | continue; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * If this is an RCU cache, use a critical section to ensure we | ||
218 | * can touch objects after they're freed. | ||
219 | */ | ||
220 | rcu_read_lock(); | ||
221 | /* | ||
222 | * Copy the buffer to check that it's not wiped on | ||
223 | * free(). | ||
224 | */ | ||
225 | buf_copy = kmalloc(size, GFP_KERNEL); | ||
226 | if (buf_copy) | ||
227 | memcpy(buf_copy, buf, size); | ||
228 | |||
229 | kmem_cache_free(c, buf); | ||
230 | /* | ||
231 | * Check that |buf| is intact after kmem_cache_free(). | ||
232 | * |want_zero| is false, because we wrote garbage to | ||
233 | * the buffer already. | ||
234 | */ | ||
235 | fail |= check_buf(buf, size, want_ctor, want_rcu, | ||
236 | false); | ||
237 | if (buf_copy) { | ||
238 | fail |= (bool)memcmp(buf, buf_copy, size); | ||
239 | kfree(buf_copy); | ||
240 | } | ||
241 | rcu_read_unlock(); | ||
242 | } | ||
243 | kmem_cache_destroy(c); | ||
244 | |||
245 | *total_failures += fail; | ||
246 | return 1; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Check that the data written to an RCU-allocated object survives | ||
251 | * reallocation. | ||
252 | */ | ||
253 | static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) | ||
254 | { | ||
255 | struct kmem_cache *c; | ||
256 | void *buf, *buf_contents, *saved_ptr; | ||
257 | void **used_objects; | ||
258 | int i, iter, maxiter = 1024; | ||
259 | bool fail = false; | ||
260 | |||
261 | c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, | ||
262 | NULL); | ||
263 | buf = kmem_cache_alloc(c, GFP_KERNEL); | ||
264 | saved_ptr = buf; | ||
265 | fill_with_garbage(buf, size); | ||
266 | buf_contents = kmalloc(size, GFP_KERNEL); | ||
267 | if (!buf_contents) | ||
268 | goto out; | ||
269 | used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL); | ||
270 | if (!used_objects) { | ||
271 | kfree(buf_contents); | ||
272 | goto out; | ||
273 | } | ||
274 | memcpy(buf_contents, buf, size); | ||
275 | kmem_cache_free(c, buf); | ||
276 | /* | ||
277 | * Run for a fixed number of iterations. If we never hit saved_ptr, | ||
278 | * assume the test passes. | ||
279 | */ | ||
280 | for (iter = 0; iter < maxiter; iter++) { | ||
281 | buf = kmem_cache_alloc(c, GFP_KERNEL); | ||
282 | used_objects[iter] = buf; | ||
283 | if (buf == saved_ptr) { | ||
284 | fail = memcmp(buf_contents, buf, size); | ||
285 | for (i = 0; i <= iter; i++) | ||
286 | kmem_cache_free(c, used_objects[i]); | ||
287 | goto free_out; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | free_out: | ||
292 | kmem_cache_destroy(c); | ||
293 | kfree(buf_contents); | ||
294 | kfree(used_objects); | ||
295 | out: | ||
296 | *total_failures += fail; | ||
297 | return 1; | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Test kmem_cache allocation by creating caches of different sizes, with and | ||
302 | * without constructors, with and without SLAB_TYPESAFE_BY_RCU. | ||
303 | */ | ||
304 | static int __init test_kmemcache(int *total_failures) | ||
305 | { | ||
306 | int failures = 0, num_tests = 0; | ||
307 | int i, flags, size; | ||
308 | bool ctor, rcu, zero; | ||
309 | |||
310 | for (i = 0; i < 10; i++) { | ||
311 | size = 8 << i; | ||
312 | for (flags = 0; flags < 8; flags++) { | ||
313 | ctor = flags & 1; | ||
314 | rcu = flags & 2; | ||
315 | zero = flags & 4; | ||
316 | if (ctor & zero) | ||
317 | continue; | ||
318 | num_tests += do_kmem_cache_size(size, ctor, rcu, zero, | ||
319 | &failures); | ||
320 | } | ||
321 | } | ||
322 | REPORT_FAILURES_IN_FN(); | ||
323 | *total_failures += failures; | ||
324 | return num_tests; | ||
325 | } | ||
326 | |||
327 | /* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */ | ||
328 | static int __init test_rcu_persistent(int *total_failures) | ||
329 | { | ||
330 | int failures = 0, num_tests = 0; | ||
331 | int i, size; | ||
332 | |||
333 | for (i = 0; i < 10; i++) { | ||
334 | size = 8 << i; | ||
335 | num_tests += do_kmem_cache_rcu_persistent(size, &failures); | ||
336 | } | ||
337 | REPORT_FAILURES_IN_FN(); | ||
338 | *total_failures += failures; | ||
339 | return num_tests; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * Run the tests. Each test function returns the number of executed tests and | ||
344 | * updates |failures| with the number of failed tests. | ||
345 | */ | ||
346 | static int __init test_meminit_init(void) | ||
347 | { | ||
348 | int failures = 0, num_tests = 0; | ||
349 | |||
350 | num_tests += test_pages(&failures); | ||
351 | num_tests += test_kvmalloc(&failures); | ||
352 | num_tests += test_kmemcache(&failures); | ||
353 | num_tests += test_rcu_persistent(&failures); | ||
354 | |||
355 | if (failures == 0) | ||
356 | pr_info("all %d tests passed!\n", num_tests); | ||
357 | else | ||
358 | pr_info("failures: %d out of %d\n", failures, num_tests); | ||
359 | |||
360 | return failures ? -EINVAL : 0; | ||
361 | } | ||
362 | module_init(test_meminit_init); | ||
363 | |||
364 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_overflow.c b/lib/test_overflow.c index fc680562d8b6..7a4b6f6c5473 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c | |||
@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void) | |||
486 | * Deal with the various forms of allocator arguments. See comments above | 486 | * Deal with the various forms of allocator arguments. See comments above |
487 | * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". | 487 | * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". |
488 | */ | 488 | */ |
489 | #define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL) | 489 | #define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) |
490 | #define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE) | 490 | #define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) |
491 | #define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) | ||
491 | #define alloc000(alloc, arg, sz) alloc(sz) | 492 | #define alloc000(alloc, arg, sz) alloc(sz) |
492 | #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) | 493 | #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) |
493 | #define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL) | 494 | #define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) |
494 | #define free0(free, arg, ptr) free(ptr) | 495 | #define free0(free, arg, ptr) free(ptr) |
495 | #define free1(free, arg, ptr) free(arg, ptr) | 496 | #define free1(free, arg, ptr) free(arg, ptr) |
496 | 497 | ||
497 | /* Wrap around to 8K */ | 498 | /* Wrap around to 16K */ |
498 | #define TEST_SIZE (9 << PAGE_SHIFT) | 499 | #define TEST_SIZE (5 * 4096) |
499 | 500 | ||
500 | #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ | 501 | #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ |
501 | static int __init test_ ## func (void *arg) \ | 502 | static int __init test_ ## func (void *arg) \ |
diff --git a/lib/test_string.c b/lib/test_string.c index bf8def01ed20..7b31f4a505bf 100644 --- a/lib/test_string.c +++ b/lib/test_string.c | |||
@@ -36,7 +36,7 @@ static __init int memset16_selftest(void) | |||
36 | fail: | 36 | fail: |
37 | kfree(p); | 37 | kfree(p); |
38 | if (i < 256) | 38 | if (i < 256) |
39 | return (i << 24) | (j << 16) | k; | 39 | return (i << 24) | (j << 16) | k | 0x8000; |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
42 | 42 | ||
@@ -72,7 +72,7 @@ static __init int memset32_selftest(void) | |||
72 | fail: | 72 | fail: |
73 | kfree(p); | 73 | kfree(p); |
74 | if (i < 256) | 74 | if (i < 256) |
75 | return (i << 24) | (j << 16) | k; | 75 | return (i << 24) | (j << 16) | k | 0x8000; |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
@@ -108,7 +108,74 @@ static __init int memset64_selftest(void) | |||
108 | fail: | 108 | fail: |
109 | kfree(p); | 109 | kfree(p); |
110 | if (i < 256) | 110 | if (i < 256) |
111 | return (i << 24) | (j << 16) | k; | 111 | return (i << 24) | (j << 16) | k | 0x8000; |
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static __init int strchr_selftest(void) | ||
116 | { | ||
117 | const char *test_string = "abcdefghijkl"; | ||
118 | const char *empty_string = ""; | ||
119 | char *result; | ||
120 | int i; | ||
121 | |||
122 | for (i = 0; i < strlen(test_string) + 1; i++) { | ||
123 | result = strchr(test_string, test_string[i]); | ||
124 | if (result - test_string != i) | ||
125 | return i + 'a'; | ||
126 | } | ||
127 | |||
128 | result = strchr(empty_string, '\0'); | ||
129 | if (result != empty_string) | ||
130 | return 0x101; | ||
131 | |||
132 | result = strchr(empty_string, 'a'); | ||
133 | if (result) | ||
134 | return 0x102; | ||
135 | |||
136 | result = strchr(test_string, 'z'); | ||
137 | if (result) | ||
138 | return 0x103; | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static __init int strnchr_selftest(void) | ||
144 | { | ||
145 | const char *test_string = "abcdefghijkl"; | ||
146 | const char *empty_string = ""; | ||
147 | char *result; | ||
148 | int i, j; | ||
149 | |||
150 | for (i = 0; i < strlen(test_string) + 1; i++) { | ||
151 | for (j = 0; j < strlen(test_string) + 2; j++) { | ||
152 | result = strnchr(test_string, j, test_string[i]); | ||
153 | if (j <= i) { | ||
154 | if (!result) | ||
155 | continue; | ||
156 | return ((i + 'a') << 8) | j; | ||
157 | } | ||
158 | if (result - test_string != i) | ||
159 | return ((i + 'a') << 8) | j; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | result = strnchr(empty_string, 0, '\0'); | ||
164 | if (result) | ||
165 | return 0x10001; | ||
166 | |||
167 | result = strnchr(empty_string, 1, '\0'); | ||
168 | if (result != empty_string) | ||
169 | return 0x10002; | ||
170 | |||
171 | result = strnchr(empty_string, 1, 'a'); | ||
172 | if (result) | ||
173 | return 0x10003; | ||
174 | |||
175 | result = strnchr(NULL, 0, '\0'); | ||
176 | if (result) | ||
177 | return 0x10004; | ||
178 | |||
112 | return 0; | 179 | return 0; |
113 | } | 180 | } |
114 | 181 | ||
@@ -131,6 +198,16 @@ static __init int string_selftest_init(void) | |||
131 | if (subtest) | 198 | if (subtest) |
132 | goto fail; | 199 | goto fail; |
133 | 200 | ||
201 | test = 4; | ||
202 | subtest = strchr_selftest(); | ||
203 | if (subtest) | ||
204 | goto fail; | ||
205 | |||
206 | test = 5; | ||
207 | subtest = strnchr_selftest(); | ||
208 | if (subtest) | ||
209 | goto fail; | ||
210 | |||
134 | pr_info("String selftests succeeded\n"); | 211 | pr_info("String selftests succeeded\n"); |
135 | return 0; | 212 | return 0; |
136 | fail: | 213 | fail: |
diff --git a/mm/Kconfig b/mm/Kconfig index 495d7368ced8..56cec636a1fc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -649,8 +649,7 @@ config IDLE_PAGE_TRACKING | |||
649 | See Documentation/admin-guide/mm/idle_page_tracking.rst for | 649 | See Documentation/admin-guide/mm/idle_page_tracking.rst for |
650 | more details. | 650 | more details. |
651 | 651 | ||
652 | # arch_add_memory() comprehends device memory | 652 | config ARCH_HAS_PTE_DEVMAP |
653 | config ARCH_HAS_ZONE_DEVICE | ||
654 | bool | 653 | bool |
655 | 654 | ||
656 | config ZONE_DEVICE | 655 | config ZONE_DEVICE |
@@ -658,7 +657,7 @@ config ZONE_DEVICE | |||
658 | depends on MEMORY_HOTPLUG | 657 | depends on MEMORY_HOTPLUG |
659 | depends on MEMORY_HOTREMOVE | 658 | depends on MEMORY_HOTREMOVE |
660 | depends on SPARSEMEM_VMEMMAP | 659 | depends on SPARSEMEM_VMEMMAP |
661 | depends on ARCH_HAS_ZONE_DEVICE | 660 | depends on ARCH_HAS_PTE_DEVMAP |
662 | select XARRAY_MULTI | 661 | select XARRAY_MULTI |
663 | 662 | ||
664 | help | 663 | help |
@@ -278,6 +278,12 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
278 | */ | 278 | */ |
279 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << | 279 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
280 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); | 280 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); |
281 | if (fixed && base & (alignment - 1)) { | ||
282 | ret = -EINVAL; | ||
283 | pr_err("Region at %pa must be aligned to %pa bytes\n", | ||
284 | &base, &alignment); | ||
285 | goto err; | ||
286 | } | ||
281 | base = ALIGN(base, alignment); | 287 | base = ALIGN(base, alignment); |
282 | size = ALIGN(size, alignment); | 288 | size = ALIGN(size, alignment); |
283 | limit &= ~(alignment - 1); | 289 | limit &= ~(alignment - 1); |
@@ -308,6 +314,13 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
308 | if (limit == 0 || limit > memblock_end) | 314 | if (limit == 0 || limit > memblock_end) |
309 | limit = memblock_end; | 315 | limit = memblock_end; |
310 | 316 | ||
317 | if (base + size > limit) { | ||
318 | ret = -EINVAL; | ||
319 | pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", | ||
320 | &size, &base, &limit); | ||
321 | goto err; | ||
322 | } | ||
323 | |||
311 | /* Reserve memory */ | 324 | /* Reserve memory */ |
312 | if (fixed) { | 325 | if (fixed) { |
313 | if (memblock_is_region_reserved(base, size) || | 326 | if (memblock_is_region_reserved(base, size) || |
@@ -494,7 +507,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
494 | * @pages: Allocated pages. | 507 | * @pages: Allocated pages. |
495 | * @count: Number of allocated pages. | 508 | * @count: Number of allocated pages. |
496 | * | 509 | * |
497 | * This function releases memory allocated by alloc_cma(). | 510 | * This function releases memory allocated by cma_alloc(). |
498 | * It returns false when provided pages do not belong to contiguous area and | 511 | * It returns false when provided pages do not belong to contiguous area and |
499 | * true otherwise. | 512 | * true otherwise. |
500 | */ | 513 | */ |
@@ -1895,7 +1895,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |||
1895 | } | 1895 | } |
1896 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ | 1896 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
1897 | 1897 | ||
1898 | #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | 1898 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
1899 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, | 1899 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
1900 | unsigned long end, struct page **pages, int *nr) | 1900 | unsigned long end, struct page **pages, int *nr) |
1901 | { | 1901 | { |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 249671873aa9..cdbb7a84cb6e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -695,12 +695,15 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) | |||
695 | if (mem_cgroup_disabled()) | 695 | if (mem_cgroup_disabled()) |
696 | return; | 696 | return; |
697 | 697 | ||
698 | __this_cpu_add(memcg->vmstats_local->stat[idx], val); | ||
699 | |||
700 | x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); | 698 | x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); |
701 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | 699 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { |
702 | struct mem_cgroup *mi; | 700 | struct mem_cgroup *mi; |
703 | 701 | ||
702 | /* | ||
703 | * Batch local counters to keep them in sync with | ||
704 | * the hierarchical ones. | ||
705 | */ | ||
706 | __this_cpu_add(memcg->vmstats_local->stat[idx], x); | ||
704 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | 707 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) |
705 | atomic_long_add(x, &mi->vmstats[idx]); | 708 | atomic_long_add(x, &mi->vmstats[idx]); |
706 | x = 0; | 709 | x = 0; |
@@ -749,13 +752,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, | |||
749 | /* Update memcg */ | 752 | /* Update memcg */ |
750 | __mod_memcg_state(memcg, idx, val); | 753 | __mod_memcg_state(memcg, idx, val); |
751 | 754 | ||
752 | /* Update lruvec */ | ||
753 | __this_cpu_add(pn->lruvec_stat_local->count[idx], val); | ||
754 | |||
755 | x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); | 755 | x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); |
756 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | 756 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { |
757 | struct mem_cgroup_per_node *pi; | 757 | struct mem_cgroup_per_node *pi; |
758 | 758 | ||
759 | /* | ||
760 | * Batch local counters to keep them in sync with | ||
761 | * the hierarchical ones. | ||
762 | */ | ||
763 | __this_cpu_add(pn->lruvec_stat_local->count[idx], x); | ||
759 | for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) | 764 | for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) |
760 | atomic_long_add(x, &pi->lruvec_stat[idx]); | 765 | atomic_long_add(x, &pi->lruvec_stat[idx]); |
761 | x = 0; | 766 | x = 0; |
@@ -777,12 +782,15 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, | |||
777 | if (mem_cgroup_disabled()) | 782 | if (mem_cgroup_disabled()) |
778 | return; | 783 | return; |
779 | 784 | ||
780 | __this_cpu_add(memcg->vmstats_local->events[idx], count); | ||
781 | |||
782 | x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); | 785 | x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); |
783 | if (unlikely(x > MEMCG_CHARGE_BATCH)) { | 786 | if (unlikely(x > MEMCG_CHARGE_BATCH)) { |
784 | struct mem_cgroup *mi; | 787 | struct mem_cgroup *mi; |
785 | 788 | ||
789 | /* | ||
790 | * Batch local counters to keep them in sync with | ||
791 | * the hierarchical ones. | ||
792 | */ | ||
793 | __this_cpu_add(memcg->vmstats_local->events[idx], x); | ||
786 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | 794 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) |
787 | atomic_long_add(x, &mi->vmevents[idx]); | 795 | atomic_long_add(x, &mi->vmevents[idx]); |
788 | x = 0; | 796 | x = 0; |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6166ba5a15f3..4ebe696138e8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1734,9 +1734,10 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) | |||
1734 | endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; | 1734 | endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; |
1735 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", | 1735 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", |
1736 | &beginpa, &endpa); | 1736 | &beginpa, &endpa); |
1737 | } | ||
1738 | 1737 | ||
1739 | return ret; | 1738 | return -EBUSY; |
1739 | } | ||
1740 | return 0; | ||
1740 | } | 1741 | } |
1741 | 1742 | ||
1742 | static int check_cpu_on_node(pg_data_t *pgdat) | 1743 | static int check_cpu_on_node(pg_data_t *pgdat) |
@@ -1819,19 +1820,9 @@ static void __release_memory_resource(resource_size_t start, | |||
1819 | } | 1820 | } |
1820 | } | 1821 | } |
1821 | 1822 | ||
1822 | /** | 1823 | static int __ref try_remove_memory(int nid, u64 start, u64 size) |
1823 | * remove_memory | ||
1824 | * @nid: the node ID | ||
1825 | * @start: physical address of the region to remove | ||
1826 | * @size: size of the region to remove | ||
1827 | * | ||
1828 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | ||
1829 | * and online/offline operations before this call, as required by | ||
1830 | * try_offline_node(). | ||
1831 | */ | ||
1832 | void __ref __remove_memory(int nid, u64 start, u64 size) | ||
1833 | { | 1824 | { |
1834 | int ret; | 1825 | int rc = 0; |
1835 | 1826 | ||
1836 | BUG_ON(check_hotplug_memory_range(start, size)); | 1827 | BUG_ON(check_hotplug_memory_range(start, size)); |
1837 | 1828 | ||
@@ -1839,13 +1830,13 @@ void __ref __remove_memory(int nid, u64 start, u64 size) | |||
1839 | 1830 | ||
1840 | /* | 1831 | /* |
1841 | * All memory blocks must be offlined before removing memory. Check | 1832 | * All memory blocks must be offlined before removing memory. Check |
1842 | * whether all memory blocks in question are offline and trigger a BUG() | 1833 | * whether all memory blocks in question are offline and return error |
1843 | * if this is not the case. | 1834 | * if this is not the case. |
1844 | */ | 1835 | */ |
1845 | ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, | 1836 | rc = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, |
1846 | check_memblock_offlined_cb); | 1837 | check_memblock_offlined_cb); |
1847 | if (ret) | 1838 | if (rc) |
1848 | BUG(); | 1839 | goto done; |
1849 | 1840 | ||
1850 | /* remove memmap entry */ | 1841 | /* remove memmap entry */ |
1851 | firmware_map_remove(start, start + size, "System RAM"); | 1842 | firmware_map_remove(start, start + size, "System RAM"); |
@@ -1857,14 +1848,45 @@ void __ref __remove_memory(int nid, u64 start, u64 size) | |||
1857 | 1848 | ||
1858 | try_offline_node(nid); | 1849 | try_offline_node(nid); |
1859 | 1850 | ||
1851 | done: | ||
1860 | mem_hotplug_done(); | 1852 | mem_hotplug_done(); |
1853 | return rc; | ||
1861 | } | 1854 | } |
1862 | 1855 | ||
1863 | void remove_memory(int nid, u64 start, u64 size) | 1856 | /** |
1857 | * remove_memory | ||
1858 | * @nid: the node ID | ||
1859 | * @start: physical address of the region to remove | ||
1860 | * @size: size of the region to remove | ||
1861 | * | ||
1862 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | ||
1863 | * and online/offline operations before this call, as required by | ||
1864 | * try_offline_node(). | ||
1865 | */ | ||
1866 | void __remove_memory(int nid, u64 start, u64 size) | ||
1867 | { | ||
1868 | |||
1869 | /* | ||
1870 | * trigger BUG() is some memory is not offlined prior to calling this | ||
1871 | * function | ||
1872 | */ | ||
1873 | if (try_remove_memory(nid, start, size)) | ||
1874 | BUG(); | ||
1875 | } | ||
1876 | |||
1877 | /* | ||
1878 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | ||
1879 | * some memory is not offline | ||
1880 | */ | ||
1881 | int remove_memory(int nid, u64 start, u64 size) | ||
1864 | { | 1882 | { |
1883 | int rc; | ||
1884 | |||
1865 | lock_device_hotplug(); | 1885 | lock_device_hotplug(); |
1866 | __remove_memory(nid, start, size); | 1886 | rc = try_remove_memory(nid, start, size); |
1867 | unlock_device_hotplug(); | 1887 | unlock_device_hotplug(); |
1888 | |||
1889 | return rc; | ||
1868 | } | 1890 | } |
1869 | EXPORT_SYMBOL_GPL(remove_memory); | 1891 | EXPORT_SYMBOL_GPL(remove_memory); |
1870 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 1892 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
diff --git a/mm/nommu.c b/mm/nommu.c index eb3e2e558da1..fed1b6e9c89b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1261,7 +1261,9 @@ unsigned long do_mmap(struct file *file, | |||
1261 | add_nommu_region(region); | 1261 | add_nommu_region(region); |
1262 | 1262 | ||
1263 | /* clear anonymous mappings that don't ask for uninitialized data */ | 1263 | /* clear anonymous mappings that don't ask for uninitialized data */ |
1264 | if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) | 1264 | if (!vma->vm_file && |
1265 | (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) || | ||
1266 | !(flags & MAP_UNINITIALIZED))) | ||
1265 | memset((void *)region->vm_start, 0, | 1267 | memset((void *)region->vm_start, 0, |
1266 | region->vm_end - region->vm_start); | 1268 | region->vm_end - region->vm_start); |
1267 | 1269 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8fd7f45a04eb..e515bfcf7f28 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4102,7 +4102,6 @@ static int | |||
4102 | __perform_reclaim(gfp_t gfp_mask, unsigned int order, | 4102 | __perform_reclaim(gfp_t gfp_mask, unsigned int order, |
4103 | const struct alloc_context *ac) | 4103 | const struct alloc_context *ac) |
4104 | { | 4104 | { |
4105 | struct reclaim_state reclaim_state; | ||
4106 | int progress; | 4105 | int progress; |
4107 | unsigned int noreclaim_flag; | 4106 | unsigned int noreclaim_flag; |
4108 | unsigned long pflags; | 4107 | unsigned long pflags; |
@@ -4114,13 +4113,10 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, | |||
4114 | psi_memstall_enter(&pflags); | 4113 | psi_memstall_enter(&pflags); |
4115 | fs_reclaim_acquire(gfp_mask); | 4114 | fs_reclaim_acquire(gfp_mask); |
4116 | noreclaim_flag = memalloc_noreclaim_save(); | 4115 | noreclaim_flag = memalloc_noreclaim_save(); |
4117 | reclaim_state.reclaimed_slab = 0; | ||
4118 | current->reclaim_state = &reclaim_state; | ||
4119 | 4116 | ||
4120 | progress = try_to_free_pages(ac->zonelist, order, gfp_mask, | 4117 | progress = try_to_free_pages(ac->zonelist, order, gfp_mask, |
4121 | ac->nodemask); | 4118 | ac->nodemask); |
4122 | 4119 | ||
4123 | current->reclaim_state = NULL; | ||
4124 | memalloc_noreclaim_restore(noreclaim_flag); | 4120 | memalloc_noreclaim_restore(noreclaim_flag); |
4125 | fs_reclaim_release(gfp_mask); | 4121 | fs_reclaim_release(gfp_mask); |
4126 | psi_memstall_leave(&pflags); | 4122 | psi_memstall_leave(&pflags); |
diff --git a/mm/shmem.c b/mm/shmem.c index f4dce9c8670d..99497cb32e71 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -400,7 +400,7 @@ static bool shmem_confirm_swap(struct address_space *mapping, | |||
400 | 400 | ||
401 | static int shmem_huge __read_mostly; | 401 | static int shmem_huge __read_mostly; |
402 | 402 | ||
403 | #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) | 403 | #if defined(CONFIG_SYSFS) |
404 | static int shmem_parse_huge(const char *str) | 404 | static int shmem_parse_huge(const char *str) |
405 | { | 405 | { |
406 | if (!strcmp(str, "never")) | 406 | if (!strcmp(str, "never")) |
@@ -417,7 +417,9 @@ static int shmem_parse_huge(const char *str) | |||
417 | return SHMEM_HUGE_FORCE; | 417 | return SHMEM_HUGE_FORCE; |
418 | return -EINVAL; | 418 | return -EINVAL; |
419 | } | 419 | } |
420 | #endif | ||
420 | 421 | ||
422 | #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) | ||
421 | static const char *shmem_format_huge(int huge) | 423 | static const char *shmem_format_huge(int huge) |
422 | { | 424 | { |
423 | switch (huge) { | 425 | switch (huge) { |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 6c49dbb3769e..807490fe217a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -1028,7 +1028,8 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, | |||
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | struct kmem_cache * | 1030 | struct kmem_cache * |
1031 | kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init; | 1031 | kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init = |
1032 | { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ }; | ||
1032 | EXPORT_SYMBOL(kmalloc_caches); | 1033 | EXPORT_SYMBOL(kmalloc_caches); |
1033 | 1034 | ||
1034 | /* | 1035 | /* |
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/err.h> | 7 | #include <linux/err.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/sched/mm.h> | 9 | #include <linux/sched/mm.h> |
10 | #include <linux/sched/signal.h> | ||
10 | #include <linux/sched/task_stack.h> | 11 | #include <linux/sched/task_stack.h> |
11 | #include <linux/security.h> | 12 | #include <linux/security.h> |
12 | #include <linux/swap.h> | 13 | #include <linux/swap.h> |
@@ -300,6 +301,80 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | |||
300 | } | 301 | } |
301 | #endif | 302 | #endif |
302 | 303 | ||
304 | /** | ||
305 | * __account_locked_vm - account locked pages to an mm's locked_vm | ||
306 | * @mm: mm to account against | ||
307 | * @pages: number of pages to account | ||
308 | * @inc: %true if @pages should be considered positive, %false if not | ||
309 | * @task: task used to check RLIMIT_MEMLOCK | ||
310 | * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped | ||
311 | * | ||
312 | * Assumes @task and @mm are valid (i.e. at least one reference on each), and | ||
313 | * that mmap_sem is held as writer. | ||
314 | * | ||
315 | * Return: | ||
316 | * * 0 on success | ||
317 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | ||
318 | */ | ||
319 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | ||
320 | struct task_struct *task, bool bypass_rlim) | ||
321 | { | ||
322 | unsigned long locked_vm, limit; | ||
323 | int ret = 0; | ||
324 | |||
325 | lockdep_assert_held_write(&mm->mmap_sem); | ||
326 | |||
327 | locked_vm = mm->locked_vm; | ||
328 | if (inc) { | ||
329 | if (!bypass_rlim) { | ||
330 | limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
331 | if (locked_vm + pages > limit) | ||
332 | ret = -ENOMEM; | ||
333 | } | ||
334 | if (!ret) | ||
335 | mm->locked_vm = locked_vm + pages; | ||
336 | } else { | ||
337 | WARN_ON_ONCE(pages > locked_vm); | ||
338 | mm->locked_vm = locked_vm - pages; | ||
339 | } | ||
340 | |||
341 | pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, | ||
342 | (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, | ||
343 | locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), | ||
344 | ret ? " - exceeded" : ""); | ||
345 | |||
346 | return ret; | ||
347 | } | ||
348 | EXPORT_SYMBOL_GPL(__account_locked_vm); | ||
349 | |||
350 | /** | ||
351 | * account_locked_vm - account locked pages to an mm's locked_vm | ||
352 | * @mm: mm to account against, may be NULL | ||
353 | * @pages: number of pages to account | ||
354 | * @inc: %true if @pages should be considered positive, %false if not | ||
355 | * | ||
356 | * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). | ||
357 | * | ||
358 | * Return: | ||
359 | * * 0 on success, or if mm is NULL | ||
360 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | ||
361 | */ | ||
362 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) | ||
363 | { | ||
364 | int ret; | ||
365 | |||
366 | if (pages == 0 || !mm) | ||
367 | return 0; | ||
368 | |||
369 | down_write(&mm->mmap_sem); | ||
370 | ret = __account_locked_vm(mm, pages, inc, current, | ||
371 | capable(CAP_IPC_LOCK)); | ||
372 | up_write(&mm->mmap_sem); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | EXPORT_SYMBOL_GPL(account_locked_vm); | ||
377 | |||
303 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, | 378 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
304 | unsigned long len, unsigned long prot, | 379 | unsigned long len, unsigned long prot, |
305 | unsigned long flag, unsigned long pgoff) | 380 | unsigned long flag, unsigned long pgoff) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index f8e3dcd527b8..44df66a98f2a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -131,6 +131,9 @@ struct scan_control { | |||
131 | unsigned int file_taken; | 131 | unsigned int file_taken; |
132 | unsigned int taken; | 132 | unsigned int taken; |
133 | } nr; | 133 | } nr; |
134 | |||
135 | /* for recording the reclaimed slab by now */ | ||
136 | struct reclaim_state reclaim_state; | ||
134 | }; | 137 | }; |
135 | 138 | ||
136 | #ifdef ARCH_HAS_PREFETCH | 139 | #ifdef ARCH_HAS_PREFETCH |
@@ -238,6 +241,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker) | |||
238 | } | 241 | } |
239 | #endif /* CONFIG_MEMCG_KMEM */ | 242 | #endif /* CONFIG_MEMCG_KMEM */ |
240 | 243 | ||
244 | static void set_task_reclaim_state(struct task_struct *task, | ||
245 | struct reclaim_state *rs) | ||
246 | { | ||
247 | /* Check for an overwrite */ | ||
248 | WARN_ON_ONCE(rs && task->reclaim_state); | ||
249 | |||
250 | /* Check for the nulling of an already-nulled member */ | ||
251 | WARN_ON_ONCE(!rs && !task->reclaim_state); | ||
252 | |||
253 | task->reclaim_state = rs; | ||
254 | } | ||
255 | |||
241 | #ifdef CONFIG_MEMCG | 256 | #ifdef CONFIG_MEMCG |
242 | static bool global_reclaim(struct scan_control *sc) | 257 | static bool global_reclaim(struct scan_control *sc) |
243 | { | 258 | { |
@@ -3191,11 +3206,13 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
3191 | if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) | 3206 | if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) |
3192 | return 1; | 3207 | return 1; |
3193 | 3208 | ||
3209 | set_task_reclaim_state(current, &sc.reclaim_state); | ||
3194 | trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); | 3210 | trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); |
3195 | 3211 | ||
3196 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | 3212 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
3197 | 3213 | ||
3198 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); | 3214 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); |
3215 | set_task_reclaim_state(current, NULL); | ||
3199 | 3216 | ||
3200 | return nr_reclaimed; | 3217 | return nr_reclaimed; |
3201 | } | 3218 | } |
@@ -3218,6 +3235,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | |||
3218 | }; | 3235 | }; |
3219 | unsigned long lru_pages; | 3236 | unsigned long lru_pages; |
3220 | 3237 | ||
3238 | set_task_reclaim_state(current, &sc.reclaim_state); | ||
3221 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 3239 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
3222 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 3240 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
3223 | 3241 | ||
@@ -3235,7 +3253,9 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | |||
3235 | 3253 | ||
3236 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 3254 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
3237 | 3255 | ||
3256 | set_task_reclaim_state(current, NULL); | ||
3238 | *nr_scanned = sc.nr_scanned; | 3257 | *nr_scanned = sc.nr_scanned; |
3258 | |||
3239 | return sc.nr_reclaimed; | 3259 | return sc.nr_reclaimed; |
3240 | } | 3260 | } |
3241 | 3261 | ||
@@ -3262,6 +3282,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3262 | .may_shrinkslab = 1, | 3282 | .may_shrinkslab = 1, |
3263 | }; | 3283 | }; |
3264 | 3284 | ||
3285 | set_task_reclaim_state(current, &sc.reclaim_state); | ||
3265 | /* | 3286 | /* |
3266 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't | 3287 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't |
3267 | * take care of from where we get pages. So the node where we start the | 3288 | * take care of from where we get pages. So the node where we start the |
@@ -3282,6 +3303,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3282 | psi_memstall_leave(&pflags); | 3303 | psi_memstall_leave(&pflags); |
3283 | 3304 | ||
3284 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); | 3305 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
3306 | set_task_reclaim_state(current, NULL); | ||
3285 | 3307 | ||
3286 | return nr_reclaimed; | 3308 | return nr_reclaimed; |
3287 | } | 3309 | } |
@@ -3483,6 +3505,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3483 | .may_unmap = 1, | 3505 | .may_unmap = 1, |
3484 | }; | 3506 | }; |
3485 | 3507 | ||
3508 | set_task_reclaim_state(current, &sc.reclaim_state); | ||
3486 | psi_memstall_enter(&pflags); | 3509 | psi_memstall_enter(&pflags); |
3487 | __fs_reclaim_acquire(); | 3510 | __fs_reclaim_acquire(); |
3488 | 3511 | ||
@@ -3664,6 +3687,8 @@ out: | |||
3664 | snapshot_refaults(NULL, pgdat); | 3687 | snapshot_refaults(NULL, pgdat); |
3665 | __fs_reclaim_release(); | 3688 | __fs_reclaim_release(); |
3666 | psi_memstall_leave(&pflags); | 3689 | psi_memstall_leave(&pflags); |
3690 | set_task_reclaim_state(current, NULL); | ||
3691 | |||
3667 | /* | 3692 | /* |
3668 | * Return the order kswapd stopped reclaiming at as | 3693 | * Return the order kswapd stopped reclaiming at as |
3669 | * prepare_kswapd_sleep() takes it into account. If another caller | 3694 | * prepare_kswapd_sleep() takes it into account. If another caller |
@@ -3787,15 +3812,10 @@ static int kswapd(void *p) | |||
3787 | unsigned int classzone_idx = MAX_NR_ZONES - 1; | 3812 | unsigned int classzone_idx = MAX_NR_ZONES - 1; |
3788 | pg_data_t *pgdat = (pg_data_t*)p; | 3813 | pg_data_t *pgdat = (pg_data_t*)p; |
3789 | struct task_struct *tsk = current; | 3814 | struct task_struct *tsk = current; |
3790 | |||
3791 | struct reclaim_state reclaim_state = { | ||
3792 | .reclaimed_slab = 0, | ||
3793 | }; | ||
3794 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); | 3815 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
3795 | 3816 | ||
3796 | if (!cpumask_empty(cpumask)) | 3817 | if (!cpumask_empty(cpumask)) |
3797 | set_cpus_allowed_ptr(tsk, cpumask); | 3818 | set_cpus_allowed_ptr(tsk, cpumask); |
3798 | current->reclaim_state = &reclaim_state; | ||
3799 | 3819 | ||
3800 | /* | 3820 | /* |
3801 | * Tell the memory management that we're a "memory allocator", | 3821 | * Tell the memory management that we're a "memory allocator", |
@@ -3857,7 +3877,6 @@ kswapd_try_sleep: | |||
3857 | } | 3877 | } |
3858 | 3878 | ||
3859 | tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); | 3879 | tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); |
3860 | current->reclaim_state = NULL; | ||
3861 | 3880 | ||
3862 | return 0; | 3881 | return 0; |
3863 | } | 3882 | } |
@@ -3922,7 +3941,6 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, | |||
3922 | */ | 3941 | */ |
3923 | unsigned long shrink_all_memory(unsigned long nr_to_reclaim) | 3942 | unsigned long shrink_all_memory(unsigned long nr_to_reclaim) |
3924 | { | 3943 | { |
3925 | struct reclaim_state reclaim_state; | ||
3926 | struct scan_control sc = { | 3944 | struct scan_control sc = { |
3927 | .nr_to_reclaim = nr_to_reclaim, | 3945 | .nr_to_reclaim = nr_to_reclaim, |
3928 | .gfp_mask = GFP_HIGHUSER_MOVABLE, | 3946 | .gfp_mask = GFP_HIGHUSER_MOVABLE, |
@@ -3934,18 +3952,16 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) | |||
3934 | .hibernation_mode = 1, | 3952 | .hibernation_mode = 1, |
3935 | }; | 3953 | }; |
3936 | struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); | 3954 | struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); |
3937 | struct task_struct *p = current; | ||
3938 | unsigned long nr_reclaimed; | 3955 | unsigned long nr_reclaimed; |
3939 | unsigned int noreclaim_flag; | 3956 | unsigned int noreclaim_flag; |
3940 | 3957 | ||
3941 | fs_reclaim_acquire(sc.gfp_mask); | 3958 | fs_reclaim_acquire(sc.gfp_mask); |
3942 | noreclaim_flag = memalloc_noreclaim_save(); | 3959 | noreclaim_flag = memalloc_noreclaim_save(); |
3943 | reclaim_state.reclaimed_slab = 0; | 3960 | set_task_reclaim_state(current, &sc.reclaim_state); |
3944 | p->reclaim_state = &reclaim_state; | ||
3945 | 3961 | ||
3946 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | 3962 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
3947 | 3963 | ||
3948 | p->reclaim_state = NULL; | 3964 | set_task_reclaim_state(current, NULL); |
3949 | memalloc_noreclaim_restore(noreclaim_flag); | 3965 | memalloc_noreclaim_restore(noreclaim_flag); |
3950 | fs_reclaim_release(sc.gfp_mask); | 3966 | fs_reclaim_release(sc.gfp_mask); |
3951 | 3967 | ||
@@ -4110,7 +4126,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in | |||
4110 | /* Minimum pages needed in order to stay on node */ | 4126 | /* Minimum pages needed in order to stay on node */ |
4111 | const unsigned long nr_pages = 1 << order; | 4127 | const unsigned long nr_pages = 1 << order; |
4112 | struct task_struct *p = current; | 4128 | struct task_struct *p = current; |
4113 | struct reclaim_state reclaim_state; | ||
4114 | unsigned int noreclaim_flag; | 4129 | unsigned int noreclaim_flag; |
4115 | struct scan_control sc = { | 4130 | struct scan_control sc = { |
4116 | .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), | 4131 | .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), |
@@ -4135,8 +4150,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in | |||
4135 | */ | 4150 | */ |
4136 | noreclaim_flag = memalloc_noreclaim_save(); | 4151 | noreclaim_flag = memalloc_noreclaim_save(); |
4137 | p->flags |= PF_SWAPWRITE; | 4152 | p->flags |= PF_SWAPWRITE; |
4138 | reclaim_state.reclaimed_slab = 0; | 4153 | set_task_reclaim_state(p, &sc.reclaim_state); |
4139 | p->reclaim_state = &reclaim_state; | ||
4140 | 4154 | ||
4141 | if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { | 4155 | if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { |
4142 | /* | 4156 | /* |
@@ -4148,7 +4162,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in | |||
4148 | } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); | 4162 | } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); |
4149 | } | 4163 | } |
4150 | 4164 | ||
4151 | p->reclaim_state = NULL; | 4165 | set_task_reclaim_state(p, NULL); |
4152 | current->flags &= ~PF_SWAPWRITE; | 4166 | current->flags &= ~PF_SWAPWRITE; |
4153 | memalloc_noreclaim_restore(noreclaim_flag); | 4167 | memalloc_noreclaim_restore(noreclaim_flag); |
4154 | fs_reclaim_release(sc.gfp_mask); | 4168 | fs_reclaim_release(sc.gfp_mask); |
diff --git a/mm/z3fold.c b/mm/z3fold.c index dfcd69d08c1e..6c72b18d8b9c 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -101,6 +101,7 @@ struct z3fold_buddy_slots { | |||
101 | * @refcount: reference count for the z3fold page | 101 | * @refcount: reference count for the z3fold page |
102 | * @work: work_struct for page layout optimization | 102 | * @work: work_struct for page layout optimization |
103 | * @slots: pointer to the structure holding buddy slots | 103 | * @slots: pointer to the structure holding buddy slots |
104 | * @pool: pointer to the containing pool | ||
104 | * @cpu: CPU which this page "belongs" to | 105 | * @cpu: CPU which this page "belongs" to |
105 | * @first_chunks: the size of the first buddy in chunks, 0 if free | 106 | * @first_chunks: the size of the first buddy in chunks, 0 if free |
106 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | 107 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free |
@@ -114,6 +115,7 @@ struct z3fold_header { | |||
114 | struct kref refcount; | 115 | struct kref refcount; |
115 | struct work_struct work; | 116 | struct work_struct work; |
116 | struct z3fold_buddy_slots *slots; | 117 | struct z3fold_buddy_slots *slots; |
118 | struct z3fold_pool *pool; | ||
117 | short cpu; | 119 | short cpu; |
118 | unsigned short first_chunks; | 120 | unsigned short first_chunks; |
119 | unsigned short middle_chunks; | 121 | unsigned short middle_chunks; |
@@ -193,8 +195,10 @@ static void compact_page_work(struct work_struct *w); | |||
193 | static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, | 195 | static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, |
194 | gfp_t gfp) | 196 | gfp_t gfp) |
195 | { | 197 | { |
196 | struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, | 198 | struct z3fold_buddy_slots *slots; |
197 | gfp); | 199 | |
200 | slots = kmem_cache_alloc(pool->c_handle, | ||
201 | (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); | ||
198 | 202 | ||
199 | if (slots) { | 203 | if (slots) { |
200 | memset(slots->slot, 0, sizeof(slots->slot)); | 204 | memset(slots->slot, 0, sizeof(slots->slot)); |
@@ -320,6 +324,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, | |||
320 | zhdr->start_middle = 0; | 324 | zhdr->start_middle = 0; |
321 | zhdr->cpu = -1; | 325 | zhdr->cpu = -1; |
322 | zhdr->slots = slots; | 326 | zhdr->slots = slots; |
327 | zhdr->pool = pool; | ||
323 | INIT_LIST_HEAD(&zhdr->buddy); | 328 | INIT_LIST_HEAD(&zhdr->buddy); |
324 | INIT_WORK(&zhdr->work, compact_page_work); | 329 | INIT_WORK(&zhdr->work, compact_page_work); |
325 | return zhdr; | 330 | return zhdr; |
@@ -426,7 +431,7 @@ static enum buddy handle_to_buddy(unsigned long handle) | |||
426 | 431 | ||
427 | static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) | 432 | static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) |
428 | { | 433 | { |
429 | return slots_to_pool(zhdr->slots); | 434 | return zhdr->pool; |
430 | } | 435 | } |
431 | 436 | ||
432 | static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) | 437 | static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) |
@@ -850,7 +855,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
850 | enum buddy bud; | 855 | enum buddy bud; |
851 | bool can_sleep = gfpflags_allow_blocking(gfp); | 856 | bool can_sleep = gfpflags_allow_blocking(gfp); |
852 | 857 | ||
853 | if (!size || (gfp & __GFP_HIGHMEM)) | 858 | if (!size) |
854 | return -EINVAL; | 859 | return -EINVAL; |
855 | 860 | ||
856 | if (size > PAGE_SIZE) | 861 | if (size > PAGE_SIZE) |
@@ -1345,24 +1350,29 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa | |||
1345 | zhdr = page_address(page); | 1350 | zhdr = page_address(page); |
1346 | pool = zhdr_to_pool(zhdr); | 1351 | pool = zhdr_to_pool(zhdr); |
1347 | 1352 | ||
1348 | if (!trylock_page(page)) | ||
1349 | return -EAGAIN; | ||
1350 | |||
1351 | if (!z3fold_page_trylock(zhdr)) { | 1353 | if (!z3fold_page_trylock(zhdr)) { |
1352 | unlock_page(page); | ||
1353 | return -EAGAIN; | 1354 | return -EAGAIN; |
1354 | } | 1355 | } |
1355 | if (zhdr->mapped_count != 0) { | 1356 | if (zhdr->mapped_count != 0) { |
1356 | z3fold_page_unlock(zhdr); | 1357 | z3fold_page_unlock(zhdr); |
1357 | unlock_page(page); | ||
1358 | return -EBUSY; | 1358 | return -EBUSY; |
1359 | } | 1359 | } |
1360 | if (work_pending(&zhdr->work)) { | ||
1361 | z3fold_page_unlock(zhdr); | ||
1362 | return -EAGAIN; | ||
1363 | } | ||
1360 | new_zhdr = page_address(newpage); | 1364 | new_zhdr = page_address(newpage); |
1361 | memcpy(new_zhdr, zhdr, PAGE_SIZE); | 1365 | memcpy(new_zhdr, zhdr, PAGE_SIZE); |
1362 | newpage->private = page->private; | 1366 | newpage->private = page->private; |
1363 | page->private = 0; | 1367 | page->private = 0; |
1364 | z3fold_page_unlock(zhdr); | 1368 | z3fold_page_unlock(zhdr); |
1365 | spin_lock_init(&new_zhdr->page_lock); | 1369 | spin_lock_init(&new_zhdr->page_lock); |
1370 | INIT_WORK(&new_zhdr->work, compact_page_work); | ||
1371 | /* | ||
1372 | * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, | ||
1373 | * so we only have to reinitialize it. | ||
1374 | */ | ||
1375 | INIT_LIST_HEAD(&new_zhdr->buddy); | ||
1366 | new_mapping = page_mapping(page); | 1376 | new_mapping = page_mapping(page); |
1367 | __ClearPageMovable(page); | 1377 | __ClearPageMovable(page); |
1368 | ClearPagePrivate(page); | 1378 | ClearPagePrivate(page); |
@@ -1386,7 +1396,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa | |||
1386 | queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); | 1396 | queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); |
1387 | 1397 | ||
1388 | page_mapcount_reset(page); | 1398 | page_mapcount_reset(page); |
1389 | unlock_page(page); | ||
1390 | put_page(page); | 1399 | put_page(page); |
1391 | return 0; | 1400 | return 0; |
1392 | } | 1401 | } |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a6d436809bf5..93a7edfe0f05 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -6639,6 +6639,12 @@ sub process { | |||
6639 | "unknown module license " . $extracted_string . "\n" . $herecurr); | 6639 | "unknown module license " . $extracted_string . "\n" . $herecurr); |
6640 | } | 6640 | } |
6641 | } | 6641 | } |
6642 | |||
6643 | # check for sysctl duplicate constants | ||
6644 | if ($line =~ /\.extra[12]\s*=\s*&(zero|one|int_max)\b/) { | ||
6645 | WARN("DUPLICATED_SYSCTL_CONST", | ||
6646 | "duplicated sysctl range checking value '$1', consider using the shared one in include/linux/sysctl.h\n" . $herecurr); | ||
6647 | } | ||
6642 | } | 6648 | } |
6643 | 6649 | ||
6644 | # If we have no input at all, then there is nothing to report on | 6650 | # If we have no input at all, then there is nothing to report on |
diff --git a/scripts/gdb/linux/device.py b/scripts/gdb/linux/device.py new file mode 100644 index 000000000000..16376c5cfec6 --- /dev/null +++ b/scripts/gdb/linux/device.py | |||
@@ -0,0 +1,182 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | # | ||
3 | # Copyright (c) NXP 2019 | ||
4 | |||
5 | import gdb | ||
6 | |||
7 | from linux.utils import CachedType | ||
8 | from linux.utils import container_of | ||
9 | from linux.lists import list_for_each_entry | ||
10 | |||
11 | |||
12 | device_private_type = CachedType('struct device_private') | ||
13 | device_type = CachedType('struct device') | ||
14 | |||
15 | subsys_private_type = CachedType('struct subsys_private') | ||
16 | kobject_type = CachedType('struct kobject') | ||
17 | kset_type = CachedType('struct kset') | ||
18 | |||
19 | bus_type = CachedType('struct bus_type') | ||
20 | class_type = CachedType('struct class') | ||
21 | |||
22 | |||
23 | def dev_name(dev): | ||
24 | dev_init_name = dev['init_name'] | ||
25 | if dev_init_name: | ||
26 | return dev_init_name.string() | ||
27 | return dev['kobj']['name'].string() | ||
28 | |||
29 | |||
30 | def kset_for_each_object(kset): | ||
31 | return list_for_each_entry(kset['list'], | ||
32 | kobject_type.get_type().pointer(), "entry") | ||
33 | |||
34 | |||
35 | def for_each_bus(): | ||
36 | for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')): | ||
37 | subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') | ||
38 | subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') | ||
39 | yield subsys_priv['bus'] | ||
40 | |||
41 | |||
42 | def for_each_class(): | ||
43 | for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')): | ||
44 | subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') | ||
45 | subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') | ||
46 | yield subsys_priv['class'] | ||
47 | |||
48 | |||
49 | def get_bus_by_name(name): | ||
50 | for item in for_each_bus(): | ||
51 | if item['name'].string() == name: | ||
52 | return item | ||
53 | raise gdb.GdbError("Can't find bus type {!r}".format(name)) | ||
54 | |||
55 | |||
56 | def get_class_by_name(name): | ||
57 | for item in for_each_class(): | ||
58 | if item['name'].string() == name: | ||
59 | return item | ||
60 | raise gdb.GdbError("Can't find device class {!r}".format(name)) | ||
61 | |||
62 | |||
63 | klist_type = CachedType('struct klist') | ||
64 | klist_node_type = CachedType('struct klist_node') | ||
65 | |||
66 | |||
67 | def klist_for_each(klist): | ||
68 | return list_for_each_entry(klist['k_list'], | ||
69 | klist_node_type.get_type().pointer(), 'n_node') | ||
70 | |||
71 | |||
72 | def bus_for_each_device(bus): | ||
73 | for kn in klist_for_each(bus['p']['klist_devices']): | ||
74 | dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus') | ||
75 | yield dp['device'] | ||
76 | |||
77 | |||
78 | def class_for_each_device(cls): | ||
79 | for kn in klist_for_each(cls['p']['klist_devices']): | ||
80 | dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class') | ||
81 | yield dp['device'] | ||
82 | |||
83 | |||
84 | def device_for_each_child(dev): | ||
85 | for kn in klist_for_each(dev['p']['klist_children']): | ||
86 | dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_parent') | ||
87 | yield dp['device'] | ||
88 | |||
89 | |||
90 | def _show_device(dev, level=0, recursive=False): | ||
91 | gdb.write('{}dev {}:\t{}\n'.format('\t' * level, dev_name(dev), dev)) | ||
92 | if recursive: | ||
93 | for child in device_for_each_child(dev): | ||
94 | _show_device(child, level + 1, recursive) | ||
95 | |||
96 | |||
97 | class LxDeviceListBus(gdb.Command): | ||
98 | '''Print devices on a bus (or all buses if not specified)''' | ||
99 | |||
100 | def __init__(self): | ||
101 | super(LxDeviceListBus, self).__init__('lx-device-list-bus', gdb.COMMAND_DATA) | ||
102 | |||
103 | def invoke(self, arg, from_tty): | ||
104 | if not arg: | ||
105 | for bus in for_each_bus(): | ||
106 | gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus)) | ||
107 | for dev in bus_for_each_device(bus): | ||
108 | _show_device(dev, level=1) | ||
109 | else: | ||
110 | bus = get_bus_by_name(arg) | ||
111 | if not bus: | ||
112 | raise gdb.GdbError("Can't find bus {!r}".format(arg)) | ||
113 | for dev in bus_for_each_device(bus): | ||
114 | _show_device(dev) | ||
115 | |||
116 | |||
117 | class LxDeviceListClass(gdb.Command): | ||
118 | '''Print devices in a class (or all classes if not specified)''' | ||
119 | |||
120 | def __init__(self): | ||
121 | super(LxDeviceListClass, self).__init__('lx-device-list-class', gdb.COMMAND_DATA) | ||
122 | |||
123 | def invoke(self, arg, from_tty): | ||
124 | if not arg: | ||
125 | for cls in for_each_class(): | ||
126 | gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls)) | ||
127 | for dev in class_for_each_device(cls): | ||
128 | _show_device(dev, level=1) | ||
129 | else: | ||
130 | cls = get_class_by_name(arg) | ||
131 | for dev in class_for_each_device(cls): | ||
132 | _show_device(dev) | ||
133 | |||
134 | |||
135 | class LxDeviceListTree(gdb.Command): | ||
136 | '''Print a device and its children recursively''' | ||
137 | |||
138 | def __init__(self): | ||
139 | super(LxDeviceListTree, self).__init__('lx-device-list-tree', gdb.COMMAND_DATA) | ||
140 | |||
141 | def invoke(self, arg, from_tty): | ||
142 | if not arg: | ||
143 | raise gdb.GdbError('Please provide pointer to struct device') | ||
144 | dev = gdb.parse_and_eval(arg) | ||
145 | if dev.type != device_type.get_type().pointer(): | ||
146 | raise gdb.GdbError('Please provide pointer to struct device') | ||
147 | _show_device(dev, level=0, recursive=True) | ||
148 | |||
149 | |||
150 | class LxDeviceFindByBusName(gdb.Function): | ||
151 | '''Find struct device by bus and name (both strings)''' | ||
152 | |||
153 | def __init__(self): | ||
154 | super(LxDeviceFindByBusName, self).__init__('lx_device_find_by_bus_name') | ||
155 | |||
156 | def invoke(self, bus, name): | ||
157 | name = name.string() | ||
158 | bus = get_bus_by_name(bus.string()) | ||
159 | for dev in bus_for_each_device(bus): | ||
160 | if dev_name(dev) == name: | ||
161 | return dev | ||
162 | |||
163 | |||
164 | class LxDeviceFindByClassName(gdb.Function): | ||
165 | '''Find struct device by class and name (both strings)''' | ||
166 | |||
167 | def __init__(self): | ||
168 | super(LxDeviceFindByClassName, self).__init__('lx_device_find_by_class_name') | ||
169 | |||
170 | def invoke(self, cls, name): | ||
171 | name = name.string() | ||
172 | cls = get_class_by_name(cls.string()) | ||
173 | for dev in class_for_each_device(cls): | ||
174 | if dev_name(dev) == name: | ||
175 | return dev | ||
176 | |||
177 | |||
178 | LxDeviceListBus() | ||
179 | LxDeviceListClass() | ||
180 | LxDeviceListTree() | ||
181 | LxDeviceFindByBusName() | ||
182 | LxDeviceFindByClassName() | ||
diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py new file mode 100644 index 000000000000..6ca93bd2949e --- /dev/null +++ b/scripts/gdb/linux/genpd.py | |||
@@ -0,0 +1,83 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | # | ||
3 | # Copyright (c) NXP 2019 | ||
4 | |||
5 | import gdb | ||
6 | import sys | ||
7 | |||
8 | from linux.utils import CachedType | ||
9 | from linux.lists import list_for_each_entry | ||
10 | |||
11 | generic_pm_domain_type = CachedType('struct generic_pm_domain') | ||
12 | pm_domain_data_type = CachedType('struct pm_domain_data') | ||
13 | device_link_type = CachedType('struct device_link') | ||
14 | |||
15 | |||
16 | def kobject_get_path(kobj): | ||
17 | path = kobj['name'].string() | ||
18 | parent = kobj['parent'] | ||
19 | if parent: | ||
20 | path = kobject_get_path(parent) + '/' + path | ||
21 | return path | ||
22 | |||
23 | |||
24 | def rtpm_status_str(dev): | ||
25 | if dev['power']['runtime_error']: | ||
26 | return 'error' | ||
27 | if dev['power']['disable_depth']: | ||
28 | return 'unsupported' | ||
29 | _RPM_STATUS_LOOKUP = [ | ||
30 | "active", | ||
31 | "resuming", | ||
32 | "suspended", | ||
33 | "suspending" | ||
34 | ] | ||
35 | return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']] | ||
36 | |||
37 | |||
38 | class LxGenPDSummary(gdb.Command): | ||
39 | '''Print genpd summary | ||
40 | |||
41 | Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary''' | ||
42 | |||
43 | def __init__(self): | ||
44 | super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA) | ||
45 | |||
46 | def summary_one(self, genpd): | ||
47 | if genpd['status'] == 0: | ||
48 | status_string = 'on' | ||
49 | else: | ||
50 | status_string = 'off-{}'.format(genpd['state_idx']) | ||
51 | |||
52 | slave_names = [] | ||
53 | for link in list_for_each_entry( | ||
54 | genpd['master_links'], | ||
55 | device_link_type.get_type().pointer(), | ||
56 | 'master_node'): | ||
57 | slave_names.apend(link['slave']['name']) | ||
58 | |||
59 | gdb.write('%-30s %-15s %s\n' % ( | ||
60 | genpd['name'].string(), | ||
61 | status_string, | ||
62 | ', '.join(slave_names))) | ||
63 | |||
64 | # Print devices in domain | ||
65 | for pm_data in list_for_each_entry(genpd['dev_list'], | ||
66 | pm_domain_data_type.get_type().pointer(), | ||
67 | 'list_node'): | ||
68 | dev = pm_data['dev'] | ||
69 | kobj_path = kobject_get_path(dev['kobj']) | ||
70 | gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev))) | ||
71 | |||
72 | def invoke(self, arg, from_tty): | ||
73 | gdb.write('domain status slaves\n'); | ||
74 | gdb.write(' /device runtime status\n'); | ||
75 | gdb.write('----------------------------------------------------------------------\n'); | ||
76 | for genpd in list_for_each_entry( | ||
77 | gdb.parse_and_eval('&gpd_list'), | ||
78 | generic_pm_domain_type.get_type().pointer(), | ||
79 | 'gpd_list_node'): | ||
80 | self.summary_one(genpd) | ||
81 | |||
82 | |||
83 | LxGenPDSummary() | ||
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py index eff5a48ac026..4136dc2c59df 100644 --- a/scripts/gdb/vmlinux-gdb.py +++ b/scripts/gdb/vmlinux-gdb.py | |||
@@ -35,3 +35,5 @@ else: | |||
35 | import linux.constants | 35 | import linux.constants |
36 | import linux.timerlist | 36 | import linux.timerlist |
37 | import linux.clk | 37 | import linux.clk |
38 | import linux.genpd | ||
39 | import linux.device | ||
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index c1c088ef1420..5ef59214c555 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -27,6 +27,7 @@ my $email_usename = 1; | |||
27 | my $email_maintainer = 1; | 27 | my $email_maintainer = 1; |
28 | my $email_reviewer = 1; | 28 | my $email_reviewer = 1; |
29 | my $email_list = 1; | 29 | my $email_list = 1; |
30 | my $email_moderated_list = 1; | ||
30 | my $email_subscriber_list = 0; | 31 | my $email_subscriber_list = 0; |
31 | my $email_git_penguin_chiefs = 0; | 32 | my $email_git_penguin_chiefs = 0; |
32 | my $email_git = 0; | 33 | my $email_git = 0; |
@@ -248,6 +249,7 @@ if (!GetOptions( | |||
248 | 'r!' => \$email_reviewer, | 249 | 'r!' => \$email_reviewer, |
249 | 'n!' => \$email_usename, | 250 | 'n!' => \$email_usename, |
250 | 'l!' => \$email_list, | 251 | 'l!' => \$email_list, |
252 | 'moderated!' => \$email_moderated_list, | ||
251 | 's!' => \$email_subscriber_list, | 253 | 's!' => \$email_subscriber_list, |
252 | 'multiline!' => \$output_multiline, | 254 | 'multiline!' => \$output_multiline, |
253 | 'roles!' => \$output_roles, | 255 | 'roles!' => \$output_roles, |
@@ -1023,7 +1025,8 @@ MAINTAINER field selection options: | |||
1023 | --r => include reviewer(s) if any | 1025 | --r => include reviewer(s) if any |
1024 | --n => include name 'Full Name <addr\@domain.tld>' | 1026 | --n => include name 'Full Name <addr\@domain.tld>' |
1025 | --l => include list(s) if any | 1027 | --l => include list(s) if any |
1026 | --s => include subscriber only list(s) if any | 1028 | --moderated => include moderated lists(s) if any (default: true) |
1029 | --s => include subscriber only list(s) if any (default: false) | ||
1027 | --remove-duplicates => minimize duplicate email names/addresses | 1030 | --remove-duplicates => minimize duplicate email names/addresses |
1028 | --roles => show roles (status:subsystem, git-signer, list, etc...) | 1031 | --roles => show roles (status:subsystem, git-signer, list, etc...) |
1029 | --rolestats => show roles and statistics (commits/total_commits, %) | 1032 | --rolestats => show roles and statistics (commits/total_commits, %) |
@@ -1313,11 +1316,14 @@ sub add_categories { | |||
1313 | } else { | 1316 | } else { |
1314 | if ($email_list) { | 1317 | if ($email_list) { |
1315 | if (!$hash_list_to{lc($list_address)}) { | 1318 | if (!$hash_list_to{lc($list_address)}) { |
1316 | $hash_list_to{lc($list_address)} = 1; | ||
1317 | if ($list_additional =~ m/moderated/) { | 1319 | if ($list_additional =~ m/moderated/) { |
1318 | push(@list_to, [$list_address, | 1320 | if ($email_moderated_list) { |
1319 | "moderated list${list_role}"]); | 1321 | $hash_list_to{lc($list_address)} = 1; |
1322 | push(@list_to, [$list_address, | ||
1323 | "moderated list${list_role}"]); | ||
1324 | } | ||
1320 | } else { | 1325 | } else { |
1326 | $hash_list_to{lc($list_address)} = 1; | ||
1321 | push(@list_to, [$list_address, | 1327 | push(@list_to, [$list_address, |
1322 | "open list${list_role}"]); | 1328 | "open list${list_role}"]); |
1323 | } | 1329 | } |
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index 4f85cb19a309..e8141a33a55e 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c | |||
@@ -1194,7 +1194,7 @@ EXPORT_SYMBOL_GPL(q6asm_open_read); | |||
1194 | * q6asm_write_async() - non blocking write | 1194 | * q6asm_write_async() - non blocking write |
1195 | * | 1195 | * |
1196 | * @ac: audio client pointer | 1196 | * @ac: audio client pointer |
1197 | * @len: lenght in bytes | 1197 | * @len: length in bytes |
1198 | * @msw_ts: timestamp msw | 1198 | * @msw_ts: timestamp msw |
1199 | * @lsw_ts: timestamp lsw | 1199 | * @lsw_ts: timestamp lsw |
1200 | * @wflags: flags associated with write | 1200 | * @wflags: flags associated with write |
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore index 444ad39d3700..66fab4c58ed4 100644 --- a/tools/testing/selftests/proc/.gitignore +++ b/tools/testing/selftests/proc/.gitignore | |||
@@ -12,4 +12,5 @@ | |||
12 | /read | 12 | /read |
13 | /self | 13 | /self |
14 | /setns-dcache | 14 | /setns-dcache |
15 | /setns-sysvipc | ||
15 | /thread-self | 16 | /thread-self |
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile index 9f09fcd09ea3..a8ed0f684829 100644 --- a/tools/testing/selftests/proc/Makefile +++ b/tools/testing/selftests/proc/Makefile | |||
@@ -17,6 +17,7 @@ TEST_GEN_PROGS += proc-uptime-002 | |||
17 | TEST_GEN_PROGS += read | 17 | TEST_GEN_PROGS += read |
18 | TEST_GEN_PROGS += self | 18 | TEST_GEN_PROGS += self |
19 | TEST_GEN_PROGS += setns-dcache | 19 | TEST_GEN_PROGS += setns-dcache |
20 | TEST_GEN_PROGS += setns-sysvipc | ||
20 | TEST_GEN_PROGS += thread-self | 21 | TEST_GEN_PROGS += thread-self |
21 | 22 | ||
22 | include ../lib.mk | 23 | include ../lib.mk |
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c index 853aa164a401..18a3bde8bc96 100644 --- a/tools/testing/selftests/proc/proc-pid-vm.c +++ b/tools/testing/selftests/proc/proc-pid-vm.c | |||
@@ -215,6 +215,11 @@ static const char str_vsyscall[] = | |||
215 | "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; | 215 | "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; |
216 | 216 | ||
217 | #ifdef __x86_64__ | 217 | #ifdef __x86_64__ |
218 | static void sigaction_SIGSEGV(int _, siginfo_t *__, void *___) | ||
219 | { | ||
220 | _exit(1); | ||
221 | } | ||
222 | |||
218 | /* | 223 | /* |
219 | * vsyscall page can't be unmapped, probe it with memory load. | 224 | * vsyscall page can't be unmapped, probe it with memory load. |
220 | */ | 225 | */ |
@@ -231,11 +236,19 @@ static void vsyscall(void) | |||
231 | if (pid == 0) { | 236 | if (pid == 0) { |
232 | struct rlimit rlim = {0, 0}; | 237 | struct rlimit rlim = {0, 0}; |
233 | (void)setrlimit(RLIMIT_CORE, &rlim); | 238 | (void)setrlimit(RLIMIT_CORE, &rlim); |
239 | |||
240 | /* Hide "segfault at ffffffffff600000" messages. */ | ||
241 | struct sigaction act; | ||
242 | memset(&act, 0, sizeof(struct sigaction)); | ||
243 | act.sa_flags = SA_SIGINFO; | ||
244 | act.sa_sigaction = sigaction_SIGSEGV; | ||
245 | (void)sigaction(SIGSEGV, &act, NULL); | ||
246 | |||
234 | *(volatile int *)0xffffffffff600000UL; | 247 | *(volatile int *)0xffffffffff600000UL; |
235 | exit(0); | 248 | exit(0); |
236 | } | 249 | } |
237 | wait(&wstatus); | 250 | waitpid(pid, &wstatus, 0); |
238 | if (WIFEXITED(wstatus)) { | 251 | if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) { |
239 | g_vsyscall = true; | 252 | g_vsyscall = true; |
240 | } | 253 | } |
241 | } | 254 | } |
diff --git a/tools/testing/selftests/proc/setns-sysvipc.c b/tools/testing/selftests/proc/setns-sysvipc.c new file mode 100644 index 000000000000..903890c5e587 --- /dev/null +++ b/tools/testing/selftests/proc/setns-sysvipc.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com> | ||
3 | * | ||
4 | * Permission to use, copy, modify, and distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | /* | ||
17 | * Test that setns(CLONE_NEWIPC) points to new /proc/sysvipc content even | ||
18 | * if old one is in dcache. | ||
19 | */ | ||
20 | #undef NDEBUG | ||
21 | #include <assert.h> | ||
22 | #include <errno.h> | ||
23 | #include <stdio.h> | ||
24 | #include <sched.h> | ||
25 | #include <signal.h> | ||
26 | #include <stdlib.h> | ||
27 | #include <string.h> | ||
28 | #include <unistd.h> | ||
29 | #include <sys/types.h> | ||
30 | #include <sys/stat.h> | ||
31 | #include <fcntl.h> | ||
32 | #include <sys/ipc.h> | ||
33 | #include <sys/shm.h> | ||
34 | |||
35 | static pid_t pid = -1; | ||
36 | |||
37 | static void f(void) | ||
38 | { | ||
39 | if (pid > 0) { | ||
40 | kill(pid, SIGTERM); | ||
41 | } | ||
42 | } | ||
43 | |||
44 | int main(void) | ||
45 | { | ||
46 | int fd[2]; | ||
47 | char _ = 0; | ||
48 | int nsfd; | ||
49 | |||
50 | atexit(f); | ||
51 | |||
52 | /* Check for priviledges and syscall availability straight away. */ | ||
53 | if (unshare(CLONE_NEWIPC) == -1) { | ||
54 | if (errno == ENOSYS || errno == EPERM) { | ||
55 | return 4; | ||
56 | } | ||
57 | return 1; | ||
58 | } | ||
59 | /* Distinguisher between two otherwise empty IPC namespaces. */ | ||
60 | if (shmget(IPC_PRIVATE, 1, IPC_CREAT) == -1) { | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | if (pipe(fd) == -1) { | ||
65 | return 1; | ||
66 | } | ||
67 | |||
68 | pid = fork(); | ||
69 | if (pid == -1) { | ||
70 | return 1; | ||
71 | } | ||
72 | |||
73 | if (pid == 0) { | ||
74 | if (unshare(CLONE_NEWIPC) == -1) { | ||
75 | return 1; | ||
76 | } | ||
77 | |||
78 | if (write(fd[1], &_, 1) != 1) { | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | pause(); | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | if (read(fd[0], &_, 1) != 1) { | ||
88 | return 1; | ||
89 | } | ||
90 | |||
91 | { | ||
92 | char buf[64]; | ||
93 | snprintf(buf, sizeof(buf), "/proc/%u/ns/ipc", pid); | ||
94 | nsfd = open(buf, O_RDONLY); | ||
95 | if (nsfd == -1) { | ||
96 | return 1; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* Reliably pin dentry into dcache. */ | ||
101 | (void)open("/proc/sysvipc/shm", O_RDONLY); | ||
102 | |||
103 | if (setns(nsfd, CLONE_NEWIPC) == -1) { | ||
104 | return 1; | ||
105 | } | ||
106 | |||
107 | kill(pid, SIGTERM); | ||
108 | pid = 0; | ||
109 | |||
110 | { | ||
111 | char buf[4096]; | ||
112 | ssize_t rv; | ||
113 | int fd; | ||
114 | |||
115 | fd = open("/proc/sysvipc/shm", O_RDONLY); | ||
116 | if (fd == -1) { | ||
117 | return 1; | ||
118 | } | ||
119 | |||
120 | #define S32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n" | ||
121 | #define S64 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n" | ||
122 | rv = read(fd, buf, sizeof(buf)); | ||
123 | if (rv == strlen(S32)) { | ||
124 | assert(memcmp(buf, S32, strlen(S32)) == 0); | ||
125 | } else if (rv == strlen(S64)) { | ||
126 | assert(memcmp(buf, S64, strlen(S64)) == 0); | ||
127 | } else { | ||
128 | assert(0); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | return 0; | ||
133 | } | ||
diff --git a/tools/testing/selftests/ptrace/.gitignore b/tools/testing/selftests/ptrace/.gitignore index b3e59d41fd82..cfcc49a7def7 100644 --- a/tools/testing/selftests/ptrace/.gitignore +++ b/tools/testing/selftests/ptrace/.gitignore | |||
@@ -1 +1,2 @@ | |||
1 | get_syscall_info | ||
1 | peeksiginfo | 2 | peeksiginfo |
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile index cb21c76a18ca..c0b7f89f0930 100644 --- a/tools/testing/selftests/ptrace/Makefile +++ b/tools/testing/selftests/ptrace/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | CFLAGS += -iquote../../../../include/uapi -Wall | 2 | CFLAGS += -iquote../../../../include/uapi -Wall |
3 | 3 | ||
4 | TEST_GEN_PROGS := peeksiginfo | 4 | TEST_GEN_PROGS := get_syscall_info peeksiginfo |
5 | 5 | ||
6 | include ../lib.mk | 6 | include ../lib.mk |
diff --git a/tools/testing/selftests/ptrace/get_syscall_info.c b/tools/testing/selftests/ptrace/get_syscall_info.c new file mode 100644 index 000000000000..5bcd1c7b5be6 --- /dev/null +++ b/tools/testing/selftests/ptrace/get_syscall_info.c | |||
@@ -0,0 +1,271 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Copyright (c) 2018 Dmitry V. Levin <ldv@altlinux.org> | ||
4 | * All rights reserved. | ||
5 | * | ||
6 | * Check whether PTRACE_GET_SYSCALL_INFO semantics implemented in the kernel | ||
7 | * matches userspace expectations. | ||
8 | */ | ||
9 | |||
10 | #include "../kselftest_harness.h" | ||
11 | #include <err.h> | ||
12 | #include <signal.h> | ||
13 | #include <asm/unistd.h> | ||
14 | #include "linux/ptrace.h" | ||
15 | |||
16 | static int | ||
17 | kill_tracee(pid_t pid) | ||
18 | { | ||
19 | if (!pid) | ||
20 | return 0; | ||
21 | |||
22 | int saved_errno = errno; | ||
23 | |||
24 | int rc = kill(pid, SIGKILL); | ||
25 | |||
26 | errno = saved_errno; | ||
27 | return rc; | ||
28 | } | ||
29 | |||
30 | static long | ||
31 | sys_ptrace(int request, pid_t pid, unsigned long addr, unsigned long data) | ||
32 | { | ||
33 | return syscall(__NR_ptrace, request, pid, addr, data); | ||
34 | } | ||
35 | |||
36 | #define LOG_KILL_TRACEE(fmt, ...) \ | ||
37 | do { \ | ||
38 | kill_tracee(pid); \ | ||
39 | TH_LOG("wait #%d: " fmt, \ | ||
40 | ptrace_stop, ##__VA_ARGS__); \ | ||
41 | } while (0) | ||
42 | |||
43 | TEST(get_syscall_info) | ||
44 | { | ||
45 | static const unsigned long args[][7] = { | ||
46 | /* a sequence of architecture-agnostic syscalls */ | ||
47 | { | ||
48 | __NR_chdir, | ||
49 | (unsigned long) "", | ||
50 | 0xbad1fed1, | ||
51 | 0xbad2fed2, | ||
52 | 0xbad3fed3, | ||
53 | 0xbad4fed4, | ||
54 | 0xbad5fed5 | ||
55 | }, | ||
56 | { | ||
57 | __NR_gettid, | ||
58 | 0xcaf0bea0, | ||
59 | 0xcaf1bea1, | ||
60 | 0xcaf2bea2, | ||
61 | 0xcaf3bea3, | ||
62 | 0xcaf4bea4, | ||
63 | 0xcaf5bea5 | ||
64 | }, | ||
65 | { | ||
66 | __NR_exit_group, | ||
67 | 0, | ||
68 | 0xfac1c0d1, | ||
69 | 0xfac2c0d2, | ||
70 | 0xfac3c0d3, | ||
71 | 0xfac4c0d4, | ||
72 | 0xfac5c0d5 | ||
73 | } | ||
74 | }; | ||
75 | const unsigned long *exp_args; | ||
76 | |||
77 | pid_t pid = fork(); | ||
78 | |||
79 | ASSERT_LE(0, pid) { | ||
80 | TH_LOG("fork: %m"); | ||
81 | } | ||
82 | |||
83 | if (pid == 0) { | ||
84 | /* get the pid before PTRACE_TRACEME */ | ||
85 | pid = getpid(); | ||
86 | ASSERT_EQ(0, sys_ptrace(PTRACE_TRACEME, 0, 0, 0)) { | ||
87 | TH_LOG("PTRACE_TRACEME: %m"); | ||
88 | } | ||
89 | ASSERT_EQ(0, kill(pid, SIGSTOP)) { | ||
90 | /* cannot happen */ | ||
91 | TH_LOG("kill SIGSTOP: %m"); | ||
92 | } | ||
93 | for (unsigned int i = 0; i < ARRAY_SIZE(args); ++i) { | ||
94 | syscall(args[i][0], | ||
95 | args[i][1], args[i][2], args[i][3], | ||
96 | args[i][4], args[i][5], args[i][6]); | ||
97 | } | ||
98 | /* unreachable */ | ||
99 | _exit(1); | ||
100 | } | ||
101 | |||
102 | const struct { | ||
103 | unsigned int is_error; | ||
104 | int rval; | ||
105 | } *exp_param, exit_param[] = { | ||
106 | { 1, -ENOENT }, /* chdir */ | ||
107 | { 0, pid } /* gettid */ | ||
108 | }; | ||
109 | |||
110 | unsigned int ptrace_stop; | ||
111 | |||
112 | for (ptrace_stop = 0; ; ++ptrace_stop) { | ||
113 | struct ptrace_syscall_info info = { | ||
114 | .op = 0xff /* invalid PTRACE_SYSCALL_INFO_* op */ | ||
115 | }; | ||
116 | const size_t size = sizeof(info); | ||
117 | const int expected_none_size = | ||
118 | (void *) &info.entry - (void *) &info; | ||
119 | const int expected_entry_size = | ||
120 | (void *) &info.entry.args[6] - (void *) &info; | ||
121 | const int expected_exit_size = | ||
122 | (void *) (&info.exit.is_error + 1) - | ||
123 | (void *) &info; | ||
124 | int status; | ||
125 | long rc; | ||
126 | |||
127 | ASSERT_EQ(pid, wait(&status)) { | ||
128 | /* cannot happen */ | ||
129 | LOG_KILL_TRACEE("wait: %m"); | ||
130 | } | ||
131 | if (WIFEXITED(status)) { | ||
132 | pid = 0; /* the tracee is no more */ | ||
133 | ASSERT_EQ(0, WEXITSTATUS(status)); | ||
134 | break; | ||
135 | } | ||
136 | ASSERT_FALSE(WIFSIGNALED(status)) { | ||
137 | pid = 0; /* the tracee is no more */ | ||
138 | LOG_KILL_TRACEE("unexpected signal %u", | ||
139 | WTERMSIG(status)); | ||
140 | } | ||
141 | ASSERT_TRUE(WIFSTOPPED(status)) { | ||
142 | /* cannot happen */ | ||
143 | LOG_KILL_TRACEE("unexpected wait status %#x", status); | ||
144 | } | ||
145 | |||
146 | switch (WSTOPSIG(status)) { | ||
147 | case SIGSTOP: | ||
148 | ASSERT_EQ(0, ptrace_stop) { | ||
149 | LOG_KILL_TRACEE("unexpected signal stop"); | ||
150 | } | ||
151 | ASSERT_EQ(0, sys_ptrace(PTRACE_SETOPTIONS, pid, 0, | ||
152 | PTRACE_O_TRACESYSGOOD)) { | ||
153 | LOG_KILL_TRACEE("PTRACE_SETOPTIONS: %m"); | ||
154 | } | ||
155 | ASSERT_LT(0, (rc = sys_ptrace(PTRACE_GET_SYSCALL_INFO, | ||
156 | pid, size, | ||
157 | (unsigned long) &info))) { | ||
158 | LOG_KILL_TRACEE("PTRACE_GET_SYSCALL_INFO: %m"); | ||
159 | } | ||
160 | ASSERT_EQ(expected_none_size, rc) { | ||
161 | LOG_KILL_TRACEE("signal stop mismatch"); | ||
162 | } | ||
163 | ASSERT_EQ(PTRACE_SYSCALL_INFO_NONE, info.op) { | ||
164 | LOG_KILL_TRACEE("signal stop mismatch"); | ||
165 | } | ||
166 | ASSERT_TRUE(info.arch) { | ||
167 | LOG_KILL_TRACEE("signal stop mismatch"); | ||
168 | } | ||
169 | ASSERT_TRUE(info.instruction_pointer) { | ||
170 | LOG_KILL_TRACEE("signal stop mismatch"); | ||
171 | } | ||
172 | ASSERT_TRUE(info.stack_pointer) { | ||
173 | LOG_KILL_TRACEE("signal stop mismatch"); | ||
174 | } | ||
175 | break; | ||
176 | |||
177 | case SIGTRAP | 0x80: | ||
178 | ASSERT_LT(0, (rc = sys_ptrace(PTRACE_GET_SYSCALL_INFO, | ||
179 | pid, size, | ||
180 | (unsigned long) &info))) { | ||
181 | LOG_KILL_TRACEE("PTRACE_GET_SYSCALL_INFO: %m"); | ||
182 | } | ||
183 | switch (ptrace_stop) { | ||
184 | case 1: /* entering chdir */ | ||
185 | case 3: /* entering gettid */ | ||
186 | case 5: /* entering exit_group */ | ||
187 | exp_args = args[ptrace_stop / 2]; | ||
188 | ASSERT_EQ(expected_entry_size, rc) { | ||
189 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
190 | } | ||
191 | ASSERT_EQ(PTRACE_SYSCALL_INFO_ENTRY, info.op) { | ||
192 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
193 | } | ||
194 | ASSERT_TRUE(info.arch) { | ||
195 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
196 | } | ||
197 | ASSERT_TRUE(info.instruction_pointer) { | ||
198 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
199 | } | ||
200 | ASSERT_TRUE(info.stack_pointer) { | ||
201 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
202 | } | ||
203 | ASSERT_EQ(exp_args[0], info.entry.nr) { | ||
204 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
205 | } | ||
206 | ASSERT_EQ(exp_args[1], info.entry.args[0]) { | ||
207 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
208 | } | ||
209 | ASSERT_EQ(exp_args[2], info.entry.args[1]) { | ||
210 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
211 | } | ||
212 | ASSERT_EQ(exp_args[3], info.entry.args[2]) { | ||
213 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
214 | } | ||
215 | ASSERT_EQ(exp_args[4], info.entry.args[3]) { | ||
216 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
217 | } | ||
218 | ASSERT_EQ(exp_args[5], info.entry.args[4]) { | ||
219 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
220 | } | ||
221 | ASSERT_EQ(exp_args[6], info.entry.args[5]) { | ||
222 | LOG_KILL_TRACEE("entry stop mismatch"); | ||
223 | } | ||
224 | break; | ||
225 | case 2: /* exiting chdir */ | ||
226 | case 4: /* exiting gettid */ | ||
227 | exp_param = &exit_param[ptrace_stop / 2 - 1]; | ||
228 | ASSERT_EQ(expected_exit_size, rc) { | ||
229 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
230 | } | ||
231 | ASSERT_EQ(PTRACE_SYSCALL_INFO_EXIT, info.op) { | ||
232 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
233 | } | ||
234 | ASSERT_TRUE(info.arch) { | ||
235 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
236 | } | ||
237 | ASSERT_TRUE(info.instruction_pointer) { | ||
238 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
239 | } | ||
240 | ASSERT_TRUE(info.stack_pointer) { | ||
241 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
242 | } | ||
243 | ASSERT_EQ(exp_param->is_error, | ||
244 | info.exit.is_error) { | ||
245 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
246 | } | ||
247 | ASSERT_EQ(exp_param->rval, info.exit.rval) { | ||
248 | LOG_KILL_TRACEE("exit stop mismatch"); | ||
249 | } | ||
250 | break; | ||
251 | default: | ||
252 | LOG_KILL_TRACEE("unexpected syscall stop"); | ||
253 | abort(); | ||
254 | } | ||
255 | break; | ||
256 | |||
257 | default: | ||
258 | LOG_KILL_TRACEE("unexpected stop signal %#x", | ||
259 | WSTOPSIG(status)); | ||
260 | abort(); | ||
261 | } | ||
262 | |||
263 | ASSERT_EQ(0, sys_ptrace(PTRACE_SYSCALL, pid, 0, 0)) { | ||
264 | LOG_KILL_TRACEE("PTRACE_SYSCALL: %m"); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | ASSERT_EQ(ARRAY_SIZE(args) * 2, ptrace_stop); | ||
269 | } | ||
270 | |||
271 | TEST_HARNESS_MAIN | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index dc66fe852768..6ef7f16c4cf5 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -1775,13 +1775,18 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, | |||
1775 | unsigned long msg; | 1775 | unsigned long msg; |
1776 | static bool entry; | 1776 | static bool entry; |
1777 | 1777 | ||
1778 | /* Make sure we got an empty message. */ | 1778 | /* |
1779 | * The traditional way to tell PTRACE_SYSCALL entry/exit | ||
1780 | * is by counting. | ||
1781 | */ | ||
1782 | entry = !entry; | ||
1783 | |||
1784 | /* Make sure we got an appropriate message. */ | ||
1779 | ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); | 1785 | ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); |
1780 | EXPECT_EQ(0, ret); | 1786 | EXPECT_EQ(0, ret); |
1781 | EXPECT_EQ(0, msg); | 1787 | EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY |
1788 | : PTRACE_EVENTMSG_SYSCALL_EXIT, msg); | ||
1782 | 1789 | ||
1783 | /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */ | ||
1784 | entry = !entry; | ||
1785 | if (!entry) | 1790 | if (!entry) |
1786 | return; | 1791 | return; |
1787 | 1792 | ||