aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 22:05:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 22:05:19 -0500
commit3643b7e05b16a9fc4077ec56b655a1f8547d259c (patch)
treeeaa3a14a60082a374591ae6f7a44f29121d65c30
parentb18d62891aaff49d0ee8367d4b6bb9452469f807 (diff)
parent2244645ab194fe45ffcbaa08f235c8f0c7fb54fc (diff)
Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cache resource updates from Thomas Gleixner: "This update provides updates to RDT: - A diagnostic framework for the Resource Director Technology (RDT) user interface (sysfs). The failure modes of the user interface are hard to diagnose from the error codes. An extra last command status file provides now sensible textual information about the failure so its simpler to use. - A few minor cleanups and updates in the RDT code" * 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel_rdt: Fix a silent failure when writing zero value schemata x86/intel_rdt: Fix potential deadlock during resctrl mount x86/intel_rdt: Fix potential deadlock during resctrl unmount x86/intel_rdt: Initialize bitmask of shareable resource if CDP enabled x86/intel_rdt: Remove redundant assignment x86/intel_rdt/cqm: Make integer rmid_limbo_count static x86/intel_rdt: Add documentation for "info/last_cmd_status" x86/intel_rdt: Add diagnostics when making directories x86/intel_rdt: Add diagnostics when writing the cpus file x86/intel_rdt: Add diagnostics when writing the tasks file x86/intel_rdt: Add diagnostics when writing the schemata file x86/intel_rdt: Add framework for better RDT UI diagnostics
-rw-r--r--Documentation/x86/intel_rdt_ui.txt11
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h7
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c50
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_monitor.c2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c131
6 files changed, 170 insertions, 32 deletions
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index 4d8848e4e224..6851854cf69d 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -87,6 +87,17 @@ with the following files:
87 bytes) at which a previously used LLC_occupancy 87 bytes) at which a previously used LLC_occupancy
88 counter can be considered for re-use. 88 counter can be considered for re-use.
89 89
90Finally, in the top level of the "info" directory there is a file
91named "last_cmd_status". This is reset with every "command" issued
92via the file system (making new directories or writing to any of the
93control files). If the command was successful, it will read as "ok".
94If the command failed, it will provide more information that can be
95conveyed in the error returns from file operations. E.g.
96
97 # echo L3:0=f7 > schemata
98 bash: echo: write error: Invalid argument
99 # cat info/last_cmd_status
100 mask f7 has non-consecutive 1-bits
90 101
91Resource alloc and monitor groups 102Resource alloc and monitor groups
92--------------------------------- 103---------------------------------
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index cd5fc61ba450..88dcf8479013 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type)
267 r->num_closid = r_l3->num_closid / 2; 267 r->num_closid = r_l3->num_closid / 2;
268 r->cache.cbm_len = r_l3->cache.cbm_len; 268 r->cache.cbm_len = r_l3->cache.cbm_len;
269 r->default_ctrl = r_l3->default_ctrl; 269 r->default_ctrl = r_l3->default_ctrl;
270 r->cache.shareable_bits = r_l3->cache.shareable_bits;
270 r->data_width = (r->cache.cbm_len + 3) / 4; 271 r->data_width = (r->cache.cbm_len + 3) / 4;
271 r->alloc_capable = true; 272 r->alloc_capable = true;
272 /* 273 /*
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index a43a72d8e88e..3397244984f5 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -127,12 +127,15 @@ struct rdtgroup {
127#define RFTYPE_BASE BIT(1) 127#define RFTYPE_BASE BIT(1)
128#define RF_CTRLSHIFT 4 128#define RF_CTRLSHIFT 4
129#define RF_MONSHIFT 5 129#define RF_MONSHIFT 5
130#define RF_TOPSHIFT 6
130#define RFTYPE_CTRL BIT(RF_CTRLSHIFT) 131#define RFTYPE_CTRL BIT(RF_CTRLSHIFT)
131#define RFTYPE_MON BIT(RF_MONSHIFT) 132#define RFTYPE_MON BIT(RF_MONSHIFT)
133#define RFTYPE_TOP BIT(RF_TOPSHIFT)
132#define RFTYPE_RES_CACHE BIT(8) 134#define RFTYPE_RES_CACHE BIT(8)
133#define RFTYPE_RES_MB BIT(9) 135#define RFTYPE_RES_MB BIT(9)
134#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) 136#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL)
135#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) 137#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON)
138#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP)
136#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) 139#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL)
137 140
138/* List of all resource groups */ 141/* List of all resource groups */
@@ -409,6 +412,10 @@ union cpuid_0x10_x_edx {
409 unsigned int full; 412 unsigned int full;
410}; 413};
411 414
415void rdt_last_cmd_clear(void);
416void rdt_last_cmd_puts(const char *s);
417void rdt_last_cmd_printf(const char *fmt, ...);
418
412void rdt_ctrl_update(void *arg); 419void rdt_ctrl_update(void *arg);
413struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); 420struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
414void rdtgroup_kn_unlock(struct kernfs_node *kn); 421void rdtgroup_kn_unlock(struct kernfs_node *kn);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index f6ea94f8954a..23e1d5c249c6 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -42,15 +42,22 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
42 /* 42 /*
43 * Only linear delay values is supported for current Intel SKUs. 43 * Only linear delay values is supported for current Intel SKUs.
44 */ 44 */
45 if (!r->membw.delay_linear) 45 if (!r->membw.delay_linear) {
46 rdt_last_cmd_puts("No support for non-linear MB domains\n");
46 return false; 47 return false;
48 }
47 49
48 ret = kstrtoul(buf, 10, &bw); 50 ret = kstrtoul(buf, 10, &bw);
49 if (ret) 51 if (ret) {
52 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
50 return false; 53 return false;
54 }
51 55
52 if (bw < r->membw.min_bw || bw > r->default_ctrl) 56 if (bw < r->membw.min_bw || bw > r->default_ctrl) {
57 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
58 r->membw.min_bw, r->default_ctrl);
53 return false; 59 return false;
60 }
54 61
55 *data = roundup(bw, (unsigned long)r->membw.bw_gran); 62 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
56 return true; 63 return true;
@@ -60,8 +67,10 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
60{ 67{
61 unsigned long data; 68 unsigned long data;
62 69
63 if (d->have_new_ctrl) 70 if (d->have_new_ctrl) {
71 rdt_last_cmd_printf("duplicate domain %d\n", d->id);
64 return -EINVAL; 72 return -EINVAL;
73 }
65 74
66 if (!bw_validate(buf, &data, r)) 75 if (!bw_validate(buf, &data, r))
67 return -EINVAL; 76 return -EINVAL;
@@ -84,20 +93,29 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
84 int ret; 93 int ret;
85 94
86 ret = kstrtoul(buf, 16, &val); 95 ret = kstrtoul(buf, 16, &val);
87 if (ret) 96 if (ret) {
97 rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
88 return false; 98 return false;
99 }
89 100
90 if (val == 0 || val > r->default_ctrl) 101 if (val == 0 || val > r->default_ctrl) {
102 rdt_last_cmd_puts("mask out of range\n");
91 return false; 103 return false;
104 }
92 105
93 first_bit = find_first_bit(&val, cbm_len); 106 first_bit = find_first_bit(&val, cbm_len);
94 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 107 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
95 108
96 if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) 109 if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
110 rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
97 return false; 111 return false;
112 }
98 113
99 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) 114 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
115 rdt_last_cmd_printf("Need at least %d bits in mask\n",
116 r->cache.min_cbm_bits);
100 return false; 117 return false;
118 }
101 119
102 *data = val; 120 *data = val;
103 return true; 121 return true;
@@ -111,8 +129,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
111{ 129{
112 unsigned long data; 130 unsigned long data;
113 131
114 if (d->have_new_ctrl) 132 if (d->have_new_ctrl) {
133 rdt_last_cmd_printf("duplicate domain %d\n", d->id);
115 return -EINVAL; 134 return -EINVAL;
135 }
116 136
117 if(!cbm_validate(buf, &data, r)) 137 if(!cbm_validate(buf, &data, r))
118 return -EINVAL; 138 return -EINVAL;
@@ -139,8 +159,10 @@ next:
139 return 0; 159 return 0;
140 dom = strsep(&line, ";"); 160 dom = strsep(&line, ";");
141 id = strsep(&dom, "="); 161 id = strsep(&dom, "=");
142 if (!dom || kstrtoul(id, 10, &dom_id)) 162 if (!dom || kstrtoul(id, 10, &dom_id)) {
163 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
143 return -EINVAL; 164 return -EINVAL;
165 }
144 dom = strim(dom); 166 dom = strim(dom);
145 list_for_each_entry(d, &r->domains, list) { 167 list_for_each_entry(d, &r->domains, list) {
146 if (d->id == dom_id) { 168 if (d->id == dom_id) {
@@ -196,6 +218,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
196 if (!strcmp(resname, r->name) && closid < r->num_closid) 218 if (!strcmp(resname, r->name) && closid < r->num_closid)
197 return parse_line(tok, r); 219 return parse_line(tok, r);
198 } 220 }
221 rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
199 return -EINVAL; 222 return -EINVAL;
200} 223}
201 224
@@ -218,6 +241,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
218 rdtgroup_kn_unlock(of->kn); 241 rdtgroup_kn_unlock(of->kn);
219 return -ENOENT; 242 return -ENOENT;
220 } 243 }
244 rdt_last_cmd_clear();
221 245
222 closid = rdtgrp->closid; 246 closid = rdtgrp->closid;
223 247
@@ -229,6 +253,12 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
229 while ((tok = strsep(&buf, "\n")) != NULL) { 253 while ((tok = strsep(&buf, "\n")) != NULL) {
230 resname = strim(strsep(&tok, ":")); 254 resname = strim(strsep(&tok, ":"));
231 if (!tok) { 255 if (!tok) {
256 rdt_last_cmd_puts("Missing ':'\n");
257 ret = -EINVAL;
258 goto out;
259 }
260 if (tok[0] == '\0') {
261 rdt_last_cmd_printf("Missing '%s' value\n", resname);
232 ret = -EINVAL; 262 ret = -EINVAL;
233 goto out; 263 goto out;
234 } 264 }
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index 30827510094b..681450eee428 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -51,7 +51,7 @@ static LIST_HEAD(rmid_free_lru);
51 * may have a occupancy value > intel_cqm_threshold. User can change 51 * may have a occupancy value > intel_cqm_threshold. User can change
52 * the threshold occupancy value. 52 * the threshold occupancy value.
53 */ 53 */
54unsigned int rmid_limbo_count; 54static unsigned int rmid_limbo_count;
55 55
56/** 56/**
57 * @rmid_entry - The entry in the limbo and free lists. 57 * @rmid_entry - The entry in the limbo and free lists.
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index a869d4a073c5..64c5ff97ee0d 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/sysfs.h> 25#include <linux/sysfs.h>
26#include <linux/kernfs.h> 26#include <linux/kernfs.h>
27#include <linux/seq_buf.h>
27#include <linux/seq_file.h> 28#include <linux/seq_file.h>
28#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
29#include <linux/sched/task.h> 30#include <linux/sched/task.h>
@@ -51,6 +52,31 @@ static struct kernfs_node *kn_mongrp;
51/* Kernel fs node for "mon_data" directory under root */ 52/* Kernel fs node for "mon_data" directory under root */
52static struct kernfs_node *kn_mondata; 53static struct kernfs_node *kn_mondata;
53 54
55static struct seq_buf last_cmd_status;
56static char last_cmd_status_buf[512];
57
58void rdt_last_cmd_clear(void)
59{
60 lockdep_assert_held(&rdtgroup_mutex);
61 seq_buf_clear(&last_cmd_status);
62}
63
64void rdt_last_cmd_puts(const char *s)
65{
66 lockdep_assert_held(&rdtgroup_mutex);
67 seq_buf_puts(&last_cmd_status, s);
68}
69
70void rdt_last_cmd_printf(const char *fmt, ...)
71{
72 va_list ap;
73
74 va_start(ap, fmt);
75 lockdep_assert_held(&rdtgroup_mutex);
76 seq_buf_vprintf(&last_cmd_status, fmt, ap);
77 va_end(ap);
78}
79
54/* 80/*
55 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 81 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
56 * we can keep a bitmap of free CLOSIDs in a single integer. 82 * we can keep a bitmap of free CLOSIDs in a single integer.
@@ -238,8 +264,10 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
238 264
239 /* Check whether cpus belong to parent ctrl group */ 265 /* Check whether cpus belong to parent ctrl group */
240 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 266 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
241 if (cpumask_weight(tmpmask)) 267 if (cpumask_weight(tmpmask)) {
268 rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
242 return -EINVAL; 269 return -EINVAL;
270 }
243 271
244 /* Check whether cpus are dropped from this group */ 272 /* Check whether cpus are dropped from this group */
245 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 273 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
@@ -291,8 +319,10 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
291 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 319 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
292 if (cpumask_weight(tmpmask)) { 320 if (cpumask_weight(tmpmask)) {
293 /* Can't drop from default group */ 321 /* Can't drop from default group */
294 if (rdtgrp == &rdtgroup_default) 322 if (rdtgrp == &rdtgroup_default) {
323 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
295 return -EINVAL; 324 return -EINVAL;
325 }
296 326
297 /* Give any dropped cpus to rdtgroup_default */ 327 /* Give any dropped cpus to rdtgroup_default */
298 cpumask_or(&rdtgroup_default.cpu_mask, 328 cpumask_or(&rdtgroup_default.cpu_mask,
@@ -357,8 +387,10 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
357 } 387 }
358 388
359 rdtgrp = rdtgroup_kn_lock_live(of->kn); 389 rdtgrp = rdtgroup_kn_lock_live(of->kn);
390 rdt_last_cmd_clear();
360 if (!rdtgrp) { 391 if (!rdtgrp) {
361 ret = -ENOENT; 392 ret = -ENOENT;
393 rdt_last_cmd_puts("directory was removed\n");
362 goto unlock; 394 goto unlock;
363 } 395 }
364 396
@@ -367,13 +399,16 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
367 else 399 else
368 ret = cpumask_parse(buf, newmask); 400 ret = cpumask_parse(buf, newmask);
369 401
370 if (ret) 402 if (ret) {
403 rdt_last_cmd_puts("bad cpu list/mask\n");
371 goto unlock; 404 goto unlock;
405 }
372 406
373 /* check that user didn't specify any offline cpus */ 407 /* check that user didn't specify any offline cpus */
374 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 408 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
375 if (cpumask_weight(tmpmask)) { 409 if (cpumask_weight(tmpmask)) {
376 ret = -EINVAL; 410 ret = -EINVAL;
411 rdt_last_cmd_puts("can only assign online cpus\n");
377 goto unlock; 412 goto unlock;
378 } 413 }
379 414
@@ -452,6 +487,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
452 */ 487 */
453 atomic_dec(&rdtgrp->waitcount); 488 atomic_dec(&rdtgrp->waitcount);
454 kfree(callback); 489 kfree(callback);
490 rdt_last_cmd_puts("task exited\n");
455 } else { 491 } else {
456 /* 492 /*
457 * For ctrl_mon groups move both closid and rmid. 493 * For ctrl_mon groups move both closid and rmid.
@@ -462,10 +498,12 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
462 tsk->closid = rdtgrp->closid; 498 tsk->closid = rdtgrp->closid;
463 tsk->rmid = rdtgrp->mon.rmid; 499 tsk->rmid = rdtgrp->mon.rmid;
464 } else if (rdtgrp->type == RDTMON_GROUP) { 500 } else if (rdtgrp->type == RDTMON_GROUP) {
465 if (rdtgrp->mon.parent->closid == tsk->closid) 501 if (rdtgrp->mon.parent->closid == tsk->closid) {
466 tsk->rmid = rdtgrp->mon.rmid; 502 tsk->rmid = rdtgrp->mon.rmid;
467 else 503 } else {
504 rdt_last_cmd_puts("Can't move task to different control group\n");
468 ret = -EINVAL; 505 ret = -EINVAL;
506 }
469 } 507 }
470 } 508 }
471 return ret; 509 return ret;
@@ -484,8 +522,10 @@ static int rdtgroup_task_write_permission(struct task_struct *task,
484 */ 522 */
485 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 523 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
486 !uid_eq(cred->euid, tcred->uid) && 524 !uid_eq(cred->euid, tcred->uid) &&
487 !uid_eq(cred->euid, tcred->suid)) 525 !uid_eq(cred->euid, tcred->suid)) {
526 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
488 ret = -EPERM; 527 ret = -EPERM;
528 }
489 529
490 put_cred(tcred); 530 put_cred(tcred);
491 return ret; 531 return ret;
@@ -502,6 +542,7 @@ static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
502 tsk = find_task_by_vpid(pid); 542 tsk = find_task_by_vpid(pid);
503 if (!tsk) { 543 if (!tsk) {
504 rcu_read_unlock(); 544 rcu_read_unlock();
545 rdt_last_cmd_printf("No task %d\n", pid);
505 return -ESRCH; 546 return -ESRCH;
506 } 547 }
507 } else { 548 } else {
@@ -529,6 +570,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
529 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 570 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
530 return -EINVAL; 571 return -EINVAL;
531 rdtgrp = rdtgroup_kn_lock_live(of->kn); 572 rdtgrp = rdtgroup_kn_lock_live(of->kn);
573 rdt_last_cmd_clear();
532 574
533 if (rdtgrp) 575 if (rdtgrp)
534 ret = rdtgroup_move_task(pid, rdtgrp, of); 576 ret = rdtgroup_move_task(pid, rdtgrp, of);
@@ -569,6 +611,21 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of,
569 return ret; 611 return ret;
570} 612}
571 613
614static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
615 struct seq_file *seq, void *v)
616{
617 int len;
618
619 mutex_lock(&rdtgroup_mutex);
620 len = seq_buf_used(&last_cmd_status);
621 if (len)
622 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
623 else
624 seq_puts(seq, "ok\n");
625 mutex_unlock(&rdtgroup_mutex);
626 return 0;
627}
628
572static int rdt_num_closids_show(struct kernfs_open_file *of, 629static int rdt_num_closids_show(struct kernfs_open_file *of,
573 struct seq_file *seq, void *v) 630 struct seq_file *seq, void *v)
574{ 631{
@@ -686,6 +743,13 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
686/* rdtgroup information files for one cache resource. */ 743/* rdtgroup information files for one cache resource. */
687static struct rftype res_common_files[] = { 744static struct rftype res_common_files[] = {
688 { 745 {
746 .name = "last_cmd_status",
747 .mode = 0444,
748 .kf_ops = &rdtgroup_kf_single_ops,
749 .seq_show = rdt_last_cmd_status_show,
750 .fflags = RF_TOP_INFO,
751 },
752 {
689 .name = "num_closids", 753 .name = "num_closids",
690 .mode = 0444, 754 .mode = 0444,
691 .kf_ops = &rdtgroup_kf_single_ops, 755 .kf_ops = &rdtgroup_kf_single_ops,
@@ -855,6 +919,10 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
855 return PTR_ERR(kn_info); 919 return PTR_ERR(kn_info);
856 kernfs_get(kn_info); 920 kernfs_get(kn_info);
857 921
922 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
923 if (ret)
924 goto out_destroy;
925
858 for_each_alloc_enabled_rdt_resource(r) { 926 for_each_alloc_enabled_rdt_resource(r) {
859 fflags = r->fflags | RF_CTRL_INFO; 927 fflags = r->fflags | RF_CTRL_INFO;
860 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags); 928 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
@@ -1081,6 +1149,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
1081 struct dentry *dentry; 1149 struct dentry *dentry;
1082 int ret; 1150 int ret;
1083 1151
1152 cpus_read_lock();
1084 mutex_lock(&rdtgroup_mutex); 1153 mutex_lock(&rdtgroup_mutex);
1085 /* 1154 /*
1086 * resctrl file system can only be mounted once. 1155 * resctrl file system can only be mounted once.
@@ -1130,12 +1199,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
1130 goto out_mondata; 1199 goto out_mondata;
1131 1200
1132 if (rdt_alloc_capable) 1201 if (rdt_alloc_capable)
1133 static_branch_enable(&rdt_alloc_enable_key); 1202 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
1134 if (rdt_mon_capable) 1203 if (rdt_mon_capable)
1135 static_branch_enable(&rdt_mon_enable_key); 1204 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
1136 1205
1137 if (rdt_alloc_capable || rdt_mon_capable) 1206 if (rdt_alloc_capable || rdt_mon_capable)
1138 static_branch_enable(&rdt_enable_key); 1207 static_branch_enable_cpuslocked(&rdt_enable_key);
1139 1208
1140 if (is_mbm_enabled()) { 1209 if (is_mbm_enabled()) {
1141 r = &rdt_resources_all[RDT_RESOURCE_L3]; 1210 r = &rdt_resources_all[RDT_RESOURCE_L3];
@@ -1156,7 +1225,9 @@ out_info:
1156out_cdp: 1225out_cdp:
1157 cdp_disable(); 1226 cdp_disable();
1158out: 1227out:
1228 rdt_last_cmd_clear();
1159 mutex_unlock(&rdtgroup_mutex); 1229 mutex_unlock(&rdtgroup_mutex);
1230 cpus_read_unlock();
1160 1231
1161 return dentry; 1232 return dentry;
1162} 1233}
@@ -1295,9 +1366,7 @@ static void rmdir_all_sub(void)
1295 kfree(rdtgrp); 1366 kfree(rdtgrp);
1296 } 1367 }
1297 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 1368 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
1298 get_online_cpus();
1299 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 1369 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
1300 put_online_cpus();
1301 1370
1302 kernfs_remove(kn_info); 1371 kernfs_remove(kn_info);
1303 kernfs_remove(kn_mongrp); 1372 kernfs_remove(kn_mongrp);
@@ -1308,6 +1377,7 @@ static void rdt_kill_sb(struct super_block *sb)
1308{ 1377{
1309 struct rdt_resource *r; 1378 struct rdt_resource *r;
1310 1379
1380 cpus_read_lock();
1311 mutex_lock(&rdtgroup_mutex); 1381 mutex_lock(&rdtgroup_mutex);
1312 1382
1313 /*Put everything back to default values. */ 1383 /*Put everything back to default values. */
@@ -1315,11 +1385,12 @@ static void rdt_kill_sb(struct super_block *sb)
1315 reset_all_ctrls(r); 1385 reset_all_ctrls(r);
1316 cdp_disable(); 1386 cdp_disable();
1317 rmdir_all_sub(); 1387 rmdir_all_sub();
1318 static_branch_disable(&rdt_alloc_enable_key); 1388 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
1319 static_branch_disable(&rdt_mon_enable_key); 1389 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
1320 static_branch_disable(&rdt_enable_key); 1390 static_branch_disable_cpuslocked(&rdt_enable_key);
1321 kernfs_kill_sb(sb); 1391 kernfs_kill_sb(sb);
1322 mutex_unlock(&rdtgroup_mutex); 1392 mutex_unlock(&rdtgroup_mutex);
1393 cpus_read_unlock();
1323} 1394}
1324 1395
1325static struct file_system_type rdt_fs_type = { 1396static struct file_system_type rdt_fs_type = {
@@ -1524,8 +1595,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1524 int ret; 1595 int ret;
1525 1596
1526 prdtgrp = rdtgroup_kn_lock_live(prgrp_kn); 1597 prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
1598 rdt_last_cmd_clear();
1527 if (!prdtgrp) { 1599 if (!prdtgrp) {
1528 ret = -ENODEV; 1600 ret = -ENODEV;
1601 rdt_last_cmd_puts("directory was removed\n");
1529 goto out_unlock; 1602 goto out_unlock;
1530 } 1603 }
1531 1604
@@ -1533,6 +1606,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1533 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 1606 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
1534 if (!rdtgrp) { 1607 if (!rdtgrp) {
1535 ret = -ENOSPC; 1608 ret = -ENOSPC;
1609 rdt_last_cmd_puts("kernel out of memory\n");
1536 goto out_unlock; 1610 goto out_unlock;
1537 } 1611 }
1538 *r = rdtgrp; 1612 *r = rdtgrp;
@@ -1544,6 +1618,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1544 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 1618 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
1545 if (IS_ERR(kn)) { 1619 if (IS_ERR(kn)) {
1546 ret = PTR_ERR(kn); 1620 ret = PTR_ERR(kn);
1621 rdt_last_cmd_puts("kernfs create error\n");
1547 goto out_free_rgrp; 1622 goto out_free_rgrp;
1548 } 1623 }
1549 rdtgrp->kn = kn; 1624 rdtgrp->kn = kn;
@@ -1557,24 +1632,31 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1557 kernfs_get(kn); 1632 kernfs_get(kn);
1558 1633
1559 ret = rdtgroup_kn_set_ugid(kn); 1634 ret = rdtgroup_kn_set_ugid(kn);
1560 if (ret) 1635 if (ret) {
1636 rdt_last_cmd_puts("kernfs perm error\n");
1561 goto out_destroy; 1637 goto out_destroy;
1638 }
1562 1639
1563 files = RFTYPE_BASE | RFTYPE_CTRL;
1564 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 1640 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
1565 ret = rdtgroup_add_files(kn, files); 1641 ret = rdtgroup_add_files(kn, files);
1566 if (ret) 1642 if (ret) {
1643 rdt_last_cmd_puts("kernfs fill error\n");
1567 goto out_destroy; 1644 goto out_destroy;
1645 }
1568 1646
1569 if (rdt_mon_capable) { 1647 if (rdt_mon_capable) {
1570 ret = alloc_rmid(); 1648 ret = alloc_rmid();
1571 if (ret < 0) 1649 if (ret < 0) {
1650 rdt_last_cmd_puts("out of RMIDs\n");
1572 goto out_destroy; 1651 goto out_destroy;
1652 }
1573 rdtgrp->mon.rmid = ret; 1653 rdtgrp->mon.rmid = ret;
1574 1654
1575 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 1655 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
1576 if (ret) 1656 if (ret) {
1657 rdt_last_cmd_puts("kernfs subdir error\n");
1577 goto out_idfree; 1658 goto out_idfree;
1659 }
1578 } 1660 }
1579 kernfs_activate(kn); 1661 kernfs_activate(kn);
1580 1662
@@ -1652,8 +1734,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
1652 1734
1653 kn = rdtgrp->kn; 1735 kn = rdtgrp->kn;
1654 ret = closid_alloc(); 1736 ret = closid_alloc();
1655 if (ret < 0) 1737 if (ret < 0) {
1738 rdt_last_cmd_puts("out of CLOSIDs\n");
1656 goto out_common_fail; 1739 goto out_common_fail;
1740 }
1657 closid = ret; 1741 closid = ret;
1658 1742
1659 rdtgrp->closid = closid; 1743 rdtgrp->closid = closid;
@@ -1665,8 +1749,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
1665 * of tasks and cpus to monitor. 1749 * of tasks and cpus to monitor.
1666 */ 1750 */
1667 ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL); 1751 ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
1668 if (ret) 1752 if (ret) {
1753 rdt_last_cmd_puts("kernfs subdir error\n");
1669 goto out_id_free; 1754 goto out_id_free;
1755 }
1670 } 1756 }
1671 1757
1672 goto out_unlock; 1758 goto out_unlock;
@@ -1902,6 +1988,9 @@ int __init rdtgroup_init(void)
1902{ 1988{
1903 int ret = 0; 1989 int ret = 0;
1904 1990
1991 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
1992 sizeof(last_cmd_status_buf));
1993
1905 ret = rdtgroup_setup_root(); 1994 ret = rdtgroup_setup_root();
1906 if (ret) 1995 if (ret)
1907 return ret; 1996 return ret;