diff options
author | Reinette Chatre <reinette.chatre@intel.com> | 2018-07-01 01:03:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-07-03 02:38:39 -0400 |
commit | 546d3c74277398a3d76d059bd2db47186bb47fc8 (patch) | |
tree | 14ead7a2673f43d27663590f813ada8e5c595c9d | |
parent | ce730f1cc1255be152c879a2bc5f295d341d8036 (diff) |
x86/intel_rdt: Fix cleanup of plr structure on error
When a resource group enters pseudo-locksetup mode a pseudo_lock_region is
associated with it. When the user writes to the resource group's schemata
file the CBM of the requested pseudo-locked region is entered into the
pseudo_lock_region struct. If any part of pseudo-lock region creation fails
the resource group will remain in pseudo-locksetup mode with the
pseudo_lock_region associated with it.
In case of failure during pseudo-lock region creation care needs to be
taken to ensure that the pseudo_lock_region struct associated with the
resource group is cleared from any pseudo-locking data - especially the
CBM. This is because the existence of a pseudo_lock_region struct with a
CBM is significant in other areas of the code, for example, the display of
bit_usage and initialization of a new resource group.
Fix the error path of pseudo-lock region creation to ensure that the
pseudo_lock_region struct is cleared at each error exit.
Fixes: 018961ae5579 ("x86/intel_rdt: Pseudo-lock region creation/removal core")
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: fenghua.yu@intel.com
Cc: tony.luck@intel.com
Cc: vikas.shivappa@linux.intel.com
Cc: gavin.hindman@intel.com
Cc: jithu.joseph@intel.com
Cc: dave.hansen@intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/49b4782f6d204d122cee3499e642b2772a98d2b4.1530421026.git.reinette.chatre@intel.com
-rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c index 1860ec10302d..8fd79c281ee6 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | |||
@@ -290,6 +290,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) | |||
290 | static int pseudo_lock_region_init(struct pseudo_lock_region *plr) | 290 | static int pseudo_lock_region_init(struct pseudo_lock_region *plr) |
291 | { | 291 | { |
292 | struct cpu_cacheinfo *ci; | 292 | struct cpu_cacheinfo *ci; |
293 | int ret; | ||
293 | int i; | 294 | int i; |
294 | 295 | ||
295 | /* Pick the first cpu we find that is associated with the cache. */ | 296 | /* Pick the first cpu we find that is associated with the cache. */ |
@@ -298,7 +299,8 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) | |||
298 | if (!cpu_online(plr->cpu)) { | 299 | if (!cpu_online(plr->cpu)) { |
299 | rdt_last_cmd_printf("cpu %u associated with cache not online\n", | 300 | rdt_last_cmd_printf("cpu %u associated with cache not online\n", |
300 | plr->cpu); | 301 | plr->cpu); |
301 | return -ENODEV; | 302 | ret = -ENODEV; |
303 | goto out_region; | ||
302 | } | 304 | } |
303 | 305 | ||
304 | ci = get_cpu_cacheinfo(plr->cpu); | 306 | ci = get_cpu_cacheinfo(plr->cpu); |
@@ -312,8 +314,11 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) | |||
312 | } | 314 | } |
313 | } | 315 | } |
314 | 316 | ||
317 | ret = -1; | ||
315 | rdt_last_cmd_puts("unable to determine cache line size\n"); | 318 | rdt_last_cmd_puts("unable to determine cache line size\n"); |
316 | return -1; | 319 | out_region: |
320 | pseudo_lock_region_clear(plr); | ||
321 | return ret; | ||
317 | } | 322 | } |
318 | 323 | ||
319 | /** | 324 | /** |
@@ -365,16 +370,23 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) | |||
365 | */ | 370 | */ |
366 | if (plr->size > KMALLOC_MAX_SIZE) { | 371 | if (plr->size > KMALLOC_MAX_SIZE) { |
367 | rdt_last_cmd_puts("requested region exceeds maximum size\n"); | 372 | rdt_last_cmd_puts("requested region exceeds maximum size\n"); |
368 | return -E2BIG; | 373 | ret = -E2BIG; |
374 | goto out_region; | ||
369 | } | 375 | } |
370 | 376 | ||
371 | plr->kmem = kzalloc(plr->size, GFP_KERNEL); | 377 | plr->kmem = kzalloc(plr->size, GFP_KERNEL); |
372 | if (!plr->kmem) { | 378 | if (!plr->kmem) { |
373 | rdt_last_cmd_puts("unable to allocate memory\n"); | 379 | rdt_last_cmd_puts("unable to allocate memory\n"); |
374 | return -ENOMEM; | 380 | ret = -ENOMEM; |
381 | goto out_region; | ||
375 | } | 382 | } |
376 | 383 | ||
377 | return 0; | 384 | ret = 0; |
385 | goto out; | ||
386 | out_region: | ||
387 | pseudo_lock_region_clear(plr); | ||
388 | out: | ||
389 | return ret; | ||
378 | } | 390 | } |
379 | 391 | ||
380 | /** | 392 | /** |