summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 19:25:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 19:25:42 -0500
commit617aebe6a97efa539cc4b8a52adccd89596e6be0 (patch)
tree51c7753c940fd3727b8cc3e93553c57f89d1d9d2 /kernel/fork.c
parent0771ad44a20bc512d1123bac728d3a89ea6febe6 (diff)
parente47e311843dece8073146f3606871280ee9beb87 (diff)
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardened usercopy whitelisting from Kees Cook: "Currently, hardened usercopy performs dynamic bounds checking on slab cache objects. This is good, but still leaves a lot of kernel memory available to be copied to/from userspace in the face of bugs. To further restrict what memory is available for copying, this creates a way to whitelist specific areas of a given slab cache object for copying to/from userspace, allowing much finer granularity of access control. Slab caches that are never exposed to userspace can declare no whitelist for their objects, thereby keeping them unavailable to userspace via dynamic copy operations. (Note, an implicit form of whitelisting is the use of constant sizes in usercopy operations and get_user()/put_user(); these bypass all hardened usercopy checks since these sizes cannot change at runtime.) This new check is WARN-by-default, so any mistakes can be found over the next several releases without breaking anyone's system. The series has roughly the following sections: - remove %p and improve reporting with offset - prepare infrastructure and whitelist kmalloc - update VFS subsystem with whitelists - update SCSI subsystem with whitelists - update network subsystem with whitelists - update process memory with whitelists - update per-architecture thread_struct with whitelists - update KVM with whitelists and fix ioctl bug - mark all other allocations as not whitelisted - update lkdtm for more sensible test overage" * tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits) lkdtm: Update usercopy tests for whitelisting usercopy: Restrict non-usercopy caches to size 0 kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl kvm: whitelist struct kvm_vcpu_arch arm: Implement thread_struct whitelist for hardened usercopy arm64: Implement thread_struct whitelist for hardened usercopy x86: Implement thread_struct whitelist for hardened usercopy fork: Provide usercopy whitelisting for task_struct fork: Define usercopy region in thread_stack slab caches fork: Define usercopy region in mm_struct slab caches net: Restrict unwhitelisted proto caches to size 0 sctp: Copy struct sctp_sock.autoclose to userspace using put_user() sctp: Define usercopy region in SCTP proto slab cache caif: Define usercopy region in caif proto slab cache ip: Define usercopy region in IP proto slab cache net: Define usercopy region in struct proto slab cache scsi: Define usercopy region in scsi_sense_cache slab cache cifs: Define usercopy region in cifs_request slab cache vxfs: Define usercopy region in vxfs_inode slab cache ufs: Define usercopy region in ufs_inode_cache slab cache ...
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 5e6cf0dd031c..5c372c954f3b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -283,8 +283,9 @@ static void free_thread_stack(struct task_struct *tsk)
283 283
284void thread_stack_cache_init(void) 284void thread_stack_cache_init(void)
285{ 285{
286 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, 286 thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
287 THREAD_SIZE, 0, NULL); 287 THREAD_SIZE, THREAD_SIZE, 0, 0,
288 THREAD_SIZE, NULL);
288 BUG_ON(thread_stack_cache == NULL); 289 BUG_ON(thread_stack_cache == NULL);
289} 290}
290# endif 291# endif
@@ -693,6 +694,21 @@ static void set_max_threads(unsigned int max_threads_suggested)
693int arch_task_struct_size __read_mostly; 694int arch_task_struct_size __read_mostly;
694#endif 695#endif
695 696
697static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
698{
699 /* Fetch thread_struct whitelist for the architecture. */
700 arch_thread_struct_whitelist(offset, size);
701
702 /*
703 * Handle zero-sized whitelist or empty thread_struct, otherwise
704 * adjust offset to position of thread_struct in task_struct.
705 */
706 if (unlikely(*size == 0))
707 *offset = 0;
708 else
709 *offset += offsetof(struct task_struct, thread);
710}
711
696void __init fork_init(void) 712void __init fork_init(void)
697{ 713{
698 int i; 714 int i;
@@ -701,11 +717,14 @@ void __init fork_init(void)
701#define ARCH_MIN_TASKALIGN 0 717#define ARCH_MIN_TASKALIGN 0
702#endif 718#endif
703 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 719 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
720 unsigned long useroffset, usersize;
704 721
705 /* create a slab on which task_structs can be allocated */ 722 /* create a slab on which task_structs can be allocated */
706 task_struct_cachep = kmem_cache_create("task_struct", 723 task_struct_whitelist(&useroffset, &usersize);
724 task_struct_cachep = kmem_cache_create_usercopy("task_struct",
707 arch_task_struct_size, align, 725 arch_task_struct_size, align,
708 SLAB_PANIC|SLAB_ACCOUNT, NULL); 726 SLAB_PANIC|SLAB_ACCOUNT,
727 useroffset, usersize, NULL);
709#endif 728#endif
710 729
711 /* do the arch specific task caches init */ 730 /* do the arch specific task caches init */
@@ -2248,9 +2267,11 @@ void __init proc_caches_init(void)
2248 * maximum number of CPU's we can ever have. The cpumask_allocation 2267 * maximum number of CPU's we can ever have. The cpumask_allocation
2249 * is at the end of the structure, exactly for that reason. 2268 * is at the end of the structure, exactly for that reason.
2250 */ 2269 */
2251 mm_cachep = kmem_cache_create("mm_struct", 2270 mm_cachep = kmem_cache_create_usercopy("mm_struct",
2252 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 2271 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2253 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2272 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2273 offsetof(struct mm_struct, saved_auxv),
2274 sizeof_field(struct mm_struct, saved_auxv),
2254 NULL); 2275 NULL);
2255 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 2276 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2256 mmap_init(); 2277 mmap_init();