diff options
| author | Jithu Joseph <jithu.joseph@intel.com> | 2018-10-12 18:51:01 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-10-19 08:54:28 -0400 |
| commit | b61b8bba18fe2b63d38fdaf9b83de25e2d787dfe (patch) | |
| tree | 08cd45e0ebebb2d96b0073ce1d858d695c075a44 | |
| parent | 2a7adf6ce643fdeec051dc88e5250b08c83bbb67 (diff) | |
x86/intel_rdt: Prevent pseudo-locking from using stale pointers
When the last CPU in an rdt_domain goes offline, its rdt_domain struct gets
freed. Current pseudo-locking code is unaware of this scenario and tries to
dereference the freed structure in a few places.
Add checks to prevent pseudo-locking code from doing this.
While further work is needed to seamlessly restore resource groups (not
just pseudo-locking) to their configuration when the domain is brought back
online, the immediate issue of invalid pointers is addressed here.
Fixes: f4e80d67a5274 ("x86/intel_rdt: Resctrl files reflect pseudo-locked information")
Fixes: 443810fe61605 ("x86/intel_rdt: Create debugfs files for pseudo-locking testing")
Fixes: 746e08590b864 ("x86/intel_rdt: Create character device exposing pseudo-locked region")
Fixes: 33dc3e410a0d9 ("x86/intel_rdt: Make CPU information accessible for pseudo-locked regions")
Signed-off-by: Jithu Joseph <jithu.joseph@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: fenghua.yu@intel.com
Cc: tony.luck@intel.com
Cc: gavin.hindman@intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/231f742dbb7b00a31cc104416860e27dba6b072d.1539384145.git.reinette.chatre@intel.com
| -rw-r--r-- | arch/x86/kernel/cpu/intel_rdt.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c | 12 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 38 |
4 files changed, 55 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 1214f3f7ec6d..44272b7107ad 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c | |||
| @@ -608,6 +608,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) | |||
| 608 | cancel_delayed_work(&d->cqm_limbo); | 608 | cancel_delayed_work(&d->cqm_limbo); |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | /* | ||
| 612 | * rdt_domain "d" is going to be freed below, so clear | ||
| 613 | * its pointer from pseudo_lock_region struct. | ||
| 614 | */ | ||
| 615 | if (d->plr) | ||
| 616 | d->plr->d = NULL; | ||
| 617 | |||
| 611 | kfree(d->ctrl_val); | 618 | kfree(d->ctrl_val); |
| 612 | kfree(d->mbps_val); | 619 | kfree(d->mbps_val); |
| 613 | bitmap_free(d->rmid_busy_llc); | 620 | bitmap_free(d->rmid_busy_llc); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index 0f53049719cd..27937458c231 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c | |||
| @@ -404,8 +404,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, | |||
| 404 | for_each_alloc_enabled_rdt_resource(r) | 404 | for_each_alloc_enabled_rdt_resource(r) |
| 405 | seq_printf(s, "%s:uninitialized\n", r->name); | 405 | seq_printf(s, "%s:uninitialized\n", r->name); |
| 406 | } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { | 406 | } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { |
| 407 | seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name, | 407 | if (!rdtgrp->plr->d) { |
| 408 | rdtgrp->plr->d->id, rdtgrp->plr->cbm); | 408 | rdt_last_cmd_clear(); |
| 409 | rdt_last_cmd_puts("Cache domain offline\n"); | ||
| 410 | ret = -ENODEV; | ||
| 411 | } else { | ||
| 412 | seq_printf(s, "%s:%d=%x\n", | ||
| 413 | rdtgrp->plr->r->name, | ||
| 414 | rdtgrp->plr->d->id, | ||
| 415 | rdtgrp->plr->cbm); | ||
| 416 | } | ||
| 409 | } else { | 417 | } else { |
| 410 | closid = rdtgrp->closid; | 418 | closid = rdtgrp->closid; |
| 411 | for_each_alloc_enabled_rdt_resource(r) { | 419 | for_each_alloc_enabled_rdt_resource(r) { |
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c index 41aeb431e834..966ac0c20d67 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | |||
| @@ -1174,6 +1174,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) | |||
| 1174 | goto out; | 1174 | goto out; |
| 1175 | } | 1175 | } |
| 1176 | 1176 | ||
| 1177 | if (!plr->d) { | ||
| 1178 | ret = -ENODEV; | ||
| 1179 | goto out; | ||
| 1180 | } | ||
| 1181 | |||
| 1177 | plr->thread_done = 0; | 1182 | plr->thread_done = 0; |
| 1178 | cpu = cpumask_first(&plr->d->cpu_mask); | 1183 | cpu = cpumask_first(&plr->d->cpu_mask); |
| 1179 | if (!cpu_online(cpu)) { | 1184 | if (!cpu_online(cpu)) { |
| @@ -1494,6 +1499,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 1494 | 1499 | ||
| 1495 | plr = rdtgrp->plr; | 1500 | plr = rdtgrp->plr; |
| 1496 | 1501 | ||
| 1502 | if (!plr->d) { | ||
| 1503 | mutex_unlock(&rdtgroup_mutex); | ||
| 1504 | return -ENODEV; | ||
| 1505 | } | ||
| 1506 | |||
| 1497 | /* | 1507 | /* |
| 1498 | * Task is required to run with affinity to the cpus associated | 1508 | * Task is required to run with affinity to the cpus associated |
| 1499 | * with the pseudo-locked region. If this is not the case the task | 1509 | * with the pseudo-locked region. If this is not the case the task |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index dbc7fc98b60a..f27b8115ffa2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
| @@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, | |||
| 268 | struct seq_file *s, void *v) | 268 | struct seq_file *s, void *v) |
| 269 | { | 269 | { |
| 270 | struct rdtgroup *rdtgrp; | 270 | struct rdtgroup *rdtgrp; |
| 271 | struct cpumask *mask; | ||
| 271 | int ret = 0; | 272 | int ret = 0; |
| 272 | 273 | ||
| 273 | rdtgrp = rdtgroup_kn_lock_live(of->kn); | 274 | rdtgrp = rdtgroup_kn_lock_live(of->kn); |
| 274 | 275 | ||
| 275 | if (rdtgrp) { | 276 | if (rdtgrp) { |
| 276 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) | 277 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { |
| 277 | seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", | 278 | if (!rdtgrp->plr->d) { |
| 278 | cpumask_pr_args(&rdtgrp->plr->d->cpu_mask)); | 279 | rdt_last_cmd_clear(); |
| 279 | else | 280 | rdt_last_cmd_puts("Cache domain offline\n"); |
| 281 | ret = -ENODEV; | ||
| 282 | } else { | ||
| 283 | mask = &rdtgrp->plr->d->cpu_mask; | ||
| 284 | seq_printf(s, is_cpu_list(of) ? | ||
| 285 | "%*pbl\n" : "%*pb\n", | ||
| 286 | cpumask_pr_args(mask)); | ||
| 287 | } | ||
| 288 | } else { | ||
| 280 | seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", | 289 | seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", |
| 281 | cpumask_pr_args(&rdtgrp->cpu_mask)); | 290 | cpumask_pr_args(&rdtgrp->cpu_mask)); |
| 291 | } | ||
| 282 | } else { | 292 | } else { |
| 283 | ret = -ENOENT; | 293 | ret = -ENOENT; |
| 284 | } | 294 | } |
| @@ -1282,6 +1292,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
| 1282 | struct rdt_resource *r; | 1292 | struct rdt_resource *r; |
| 1283 | struct rdt_domain *d; | 1293 | struct rdt_domain *d; |
| 1284 | unsigned int size; | 1294 | unsigned int size; |
| 1295 | int ret = 0; | ||
| 1285 | bool sep; | 1296 | bool sep; |
| 1286 | u32 ctrl; | 1297 | u32 ctrl; |
| 1287 | 1298 | ||
| @@ -1292,11 +1303,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
| 1292 | } | 1303 | } |
| 1293 | 1304 | ||
| 1294 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { | 1305 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { |
| 1295 | seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name); | 1306 | if (!rdtgrp->plr->d) { |
| 1296 | size = rdtgroup_cbm_to_size(rdtgrp->plr->r, | 1307 | rdt_last_cmd_clear(); |
| 1297 | rdtgrp->plr->d, | 1308 | rdt_last_cmd_puts("Cache domain offline\n"); |
| 1298 | rdtgrp->plr->cbm); | 1309 | ret = -ENODEV; |
| 1299 | seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); | 1310 | } else { |
| 1311 | seq_printf(s, "%*s:", max_name_width, | ||
| 1312 | rdtgrp->plr->r->name); | ||
| 1313 | size = rdtgroup_cbm_to_size(rdtgrp->plr->r, | ||
| 1314 | rdtgrp->plr->d, | ||
| 1315 | rdtgrp->plr->cbm); | ||
| 1316 | seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); | ||
| 1317 | } | ||
| 1300 | goto out; | 1318 | goto out; |
| 1301 | } | 1319 | } |
| 1302 | 1320 | ||
| @@ -1326,7 +1344,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
| 1326 | out: | 1344 | out: |
| 1327 | rdtgroup_kn_unlock(of->kn); | 1345 | rdtgroup_kn_unlock(of->kn); |
| 1328 | 1346 | ||
| 1329 | return 0; | 1347 | return ret; |
| 1330 | } | 1348 | } |
| 1331 | 1349 | ||
| 1332 | /* rdtgroup information files for one cache resource. */ | 1350 | /* rdtgroup information files for one cache resource. */ |
