aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-11 06:28:23 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-11 06:28:23 -0400
commit9f203e2f2f065cd74553e6474f0ae3675f39fb0f (patch)
tree43dbe4103fe13a6305b5118f1905056f78788c3e
parenta22dd3629e257e5db51ad12610d00bb2856b291d (diff)
parent184d47f0fd365108bd06ab26cdb3450b716269fd (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Ingo writes: "x86 fixes An intel_rdt memory access fix and a VLA fix in pgd_alloc()." * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Avoid VLA in pgd_alloc() x86/intel_rdt: Fix out-of-bounds memory access in CBM tests
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h6
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c20
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c36
-rw-r--r--arch/x86/mm/pgtable.c10
4 files changed, 45 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 285eb3ec4200..3736f6dc9545 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
529int rdtgroup_schemata_show(struct kernfs_open_file *of, 529int rdtgroup_schemata_show(struct kernfs_open_file *of,
530 struct seq_file *s, void *v); 530 struct seq_file *s, void *v);
531bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 531bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
532 u32 _cbm, int closid, bool exclusive); 532 unsigned long cbm, int closid, bool exclusive);
533unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, 533unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
534 u32 cbm); 534 unsigned long cbm);
535enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); 535enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
536int rdtgroup_tasks_assigned(struct rdtgroup *r); 536int rdtgroup_tasks_assigned(struct rdtgroup *r);
537int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); 537int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
538int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); 538int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
539bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm); 539bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
540bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); 540bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
541int rdt_pseudo_lock_init(void); 541int rdt_pseudo_lock_init(void);
542void rdt_pseudo_lock_release(void); 542void rdt_pseudo_lock_release(void);
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index 40f3903ae5d9..f8c260d522ca 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
797/** 797/**
798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked 798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
799 * @d: RDT domain 799 * @d: RDT domain
800 * @_cbm: CBM to test 800 * @cbm: CBM to test
801 * 801 *
802 * @d represents a cache instance and @_cbm a capacity bitmask that is 802 * @d represents a cache instance and @cbm a capacity bitmask that is
803 * considered for it. Determine if @_cbm overlaps with any existing 803 * considered for it. Determine if @cbm overlaps with any existing
804 * pseudo-locked region on @d. 804 * pseudo-locked region on @d.
805 * 805 *
806 * Return: true if @_cbm overlaps with pseudo-locked region on @d, false 806 * @cbm is unsigned long, even if only 32 bits are used, to make the
807 * bitmap functions work correctly.
808 *
809 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
807 * otherwise. 810 * otherwise.
808 */ 811 */
809bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm) 812bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
810{ 813{
811 unsigned long *cbm = (unsigned long *)&_cbm;
812 unsigned long *cbm_b;
813 unsigned int cbm_len; 814 unsigned int cbm_len;
815 unsigned long cbm_b;
814 816
815 if (d->plr) { 817 if (d->plr) {
816 cbm_len = d->plr->r->cache.cbm_len; 818 cbm_len = d->plr->r->cache.cbm_len;
817 cbm_b = (unsigned long *)&d->plr->cbm; 819 cbm_b = d->plr->cbm;
818 if (bitmap_intersects(cbm, cbm_b, cbm_len)) 820 if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
819 return true; 821 return true;
820 } 822 }
821 return false; 823 return false;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 1b8e86a5d5e1..b140c68bc14b 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
975 * is false then overlaps with any resource group or hardware entities 975 * is false then overlaps with any resource group or hardware entities
976 * will be considered. 976 * will be considered.
977 * 977 *
978 * @cbm is unsigned long, even if only 32 bits are used, to make the
979 * bitmap functions work correctly.
980 *
978 * Return: false if CBM does not overlap, true if it does. 981 * Return: false if CBM does not overlap, true if it does.
979 */ 982 */
980bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 983bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
981 u32 _cbm, int closid, bool exclusive) 984 unsigned long cbm, int closid, bool exclusive)
982{ 985{
983 unsigned long *cbm = (unsigned long *)&_cbm;
984 unsigned long *ctrl_b;
985 enum rdtgrp_mode mode; 986 enum rdtgrp_mode mode;
987 unsigned long ctrl_b;
986 u32 *ctrl; 988 u32 *ctrl;
987 int i; 989 int i;
988 990
989 /* Check for any overlap with regions used by hardware directly */ 991 /* Check for any overlap with regions used by hardware directly */
990 if (!exclusive) { 992 if (!exclusive) {
991 if (bitmap_intersects(cbm, 993 ctrl_b = r->cache.shareable_bits;
992 (unsigned long *)&r->cache.shareable_bits, 994 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
993 r->cache.cbm_len))
994 return true; 995 return true;
995 } 996 }
996 997
997 /* Check for overlap with other resource groups */ 998 /* Check for overlap with other resource groups */
998 ctrl = d->ctrl_val; 999 ctrl = d->ctrl_val;
999 for (i = 0; i < closids_supported(); i++, ctrl++) { 1000 for (i = 0; i < closids_supported(); i++, ctrl++) {
1000 ctrl_b = (unsigned long *)ctrl; 1001 ctrl_b = *ctrl;
1001 mode = rdtgroup_mode_by_closid(i); 1002 mode = rdtgroup_mode_by_closid(i);
1002 if (closid_allocated(i) && i != closid && 1003 if (closid_allocated(i) && i != closid &&
1003 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1004 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1004 if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) { 1005 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1005 if (exclusive) { 1006 if (exclusive) {
1006 if (mode == RDT_MODE_EXCLUSIVE) 1007 if (mode == RDT_MODE_EXCLUSIVE)
1007 return true; 1008 return true;
@@ -1138,15 +1139,18 @@ out:
1138 * computed by first dividing the total cache size by the CBM length to 1139 * computed by first dividing the total cache size by the CBM length to
1139 * determine how many bytes each bit in the bitmask represents. The result 1140 * determine how many bytes each bit in the bitmask represents. The result
1140 * is multiplied with the number of bits set in the bitmask. 1141 * is multiplied with the number of bits set in the bitmask.
1142 *
1143 * @cbm is unsigned long, even if only 32 bits are used to make the
1144 * bitmap functions work correctly.
1141 */ 1145 */
1142unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1146unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1143 struct rdt_domain *d, u32 cbm) 1147 struct rdt_domain *d, unsigned long cbm)
1144{ 1148{
1145 struct cpu_cacheinfo *ci; 1149 struct cpu_cacheinfo *ci;
1146 unsigned int size = 0; 1150 unsigned int size = 0;
1147 int num_b, i; 1151 int num_b, i;
1148 1152
1149 num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len); 1153 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1150 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1154 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1151 for (i = 0; i < ci->num_leaves; i++) { 1155 for (i = 0; i < ci->num_leaves; i++) {
1152 if (ci->info_list[i].level == r->cache_level) { 1156 if (ci->info_list[i].level == r->cache_level) {
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2353 u32 used_b = 0, unused_b = 0; 2357 u32 used_b = 0, unused_b = 0;
2354 u32 closid = rdtgrp->closid; 2358 u32 closid = rdtgrp->closid;
2355 struct rdt_resource *r; 2359 struct rdt_resource *r;
2360 unsigned long tmp_cbm;
2356 enum rdtgrp_mode mode; 2361 enum rdtgrp_mode mode;
2357 struct rdt_domain *d; 2362 struct rdt_domain *d;
2358 int i, ret; 2363 int i, ret;
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2390 * modify the CBM based on system availability. 2395 * modify the CBM based on system availability.
2391 */ 2396 */
2392 cbm_ensure_valid(&d->new_ctrl, r); 2397 cbm_ensure_valid(&d->new_ctrl, r);
2393 if (bitmap_weight((unsigned long *) &d->new_ctrl, 2398 /*
2394 r->cache.cbm_len) < 2399 * Assign the u32 CBM to an unsigned long to ensure
2395 r->cache.min_cbm_bits) { 2400 * that bitmap_weight() does not access out-of-bound
2401 * memory.
2402 */
2403 tmp_cbm = d->new_ctrl;
2404 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
2405 r->cache.min_cbm_bits) {
2396 rdt_last_cmd_printf("no space on %s:%d\n", 2406 rdt_last_cmd_printf("no space on %s:%d\n",
2397 r->name, d->id); 2407 r->name, d->id);
2398 return -ENOSPC; 2408 return -ENOSPC;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 089e78c4effd..59274e2c1ac4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
115 115
116#define UNSHARED_PTRS_PER_PGD \ 116#define UNSHARED_PTRS_PER_PGD \
117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
118#define MAX_UNSHARED_PTRS_PER_PGD \
119 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
118 120
119 121
120static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) 122static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
181 * and initialize the kernel pmds here. 183 * and initialize the kernel pmds here.
182 */ 184 */
183#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 185#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
186#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
184 187
185/* 188/*
186 * We allocate separate PMDs for the kernel part of the user page-table 189 * We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
189 */ 192 */
190#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ 193#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
191 KERNEL_PGD_PTRS : 0) 194 KERNEL_PGD_PTRS : 0)
195#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
192 196
193void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 197void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
194{ 198{
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
210 214
211/* No need to prepopulate any pagetable entries in non-PAE modes. */ 215/* No need to prepopulate any pagetable entries in non-PAE modes. */
212#define PREALLOCATED_PMDS 0 216#define PREALLOCATED_PMDS 0
217#define MAX_PREALLOCATED_PMDS 0
213#define PREALLOCATED_USER_PMDS 0 218#define PREALLOCATED_USER_PMDS 0
219#define MAX_PREALLOCATED_USER_PMDS 0
214#endif /* CONFIG_X86_PAE */ 220#endif /* CONFIG_X86_PAE */
215 221
216static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) 222static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
428pgd_t *pgd_alloc(struct mm_struct *mm) 434pgd_t *pgd_alloc(struct mm_struct *mm)
429{ 435{
430 pgd_t *pgd; 436 pgd_t *pgd;
431 pmd_t *u_pmds[PREALLOCATED_USER_PMDS]; 437 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
432 pmd_t *pmds[PREALLOCATED_PMDS]; 438 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
433 439
434 pgd = _pgd_alloc(); 440 pgd = _pgd_alloc();
435 441