summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2019-07-03 13:04:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-07-04 08:27:09 -0400
commitd62c8deeb6e69cd7815c21171a218301822e4a06 (patch)
tree214d7345031437c3ce1cec952dbabcea91ac9527
parent5a1ea4774ddc2c6bc3ba1415880091eccf1a901e (diff)
powerpc/pseries: Provide vcpu dispatch statistics
For Shared Processor LPARs, the POWER Hypervisor maintains a relatively static mapping of the LPAR processors (vcpus) to physical processor chips (representing the "home" node) and tries to always dispatch vcpus on their associated physical processor chip. However, under certain scenarios, vcpus may be dispatched on a different processor chip (away from its home node). The actual physical processor number on which a certain vcpu is dispatched is available to the guest in the 'processor_id' field of each DTL entry. The guest can discover the home node of each vcpu through the H_HOME_NODE_ASSOCIATIVITY(flags=1) hcall. The guest can also discover the associativity of physical processors, as represented in the DTL entry, through the H_HOME_NODE_ASSOCIATIVITY(flags=2) hcall. These can then be compared to determine if the vcpu was dispatched on its home node or not. If the vcpu was not dispatched on the home node, it is possible to determine if the vcpu was dispatched in a different chip, socket or drawer. Introduce a procfs file /proc/powerpc/vcpudispatch_stats that can be used to obtain these statistics. Writing '1' to this file enables collecting the statistics, while writing '0' disables the statistics. The statistics themselves are available by reading the procfs file. By default, the DTLB log for each vcpu is processed 50 times a second so as not to miss any entries. This processing frequency can be changed through /proc/powerpc/vcpudispatch_stats_freq. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/topology.h6
-rw-r--r--arch/powerpc/mm/numa.c16
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c525
3 files changed, 545 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index f85e2b01c3df..2f7e1ea5089e 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -35,6 +35,7 @@ static inline int pcibus_to_node(struct pci_bus *bus)
35 cpu_all_mask : \ 35 cpu_all_mask : \
36 cpumask_of_node(pcibus_to_node(bus))) 36 cpumask_of_node(pcibus_to_node(bus)))
37 37
38extern int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
38extern int __node_distance(int, int); 39extern int __node_distance(int, int);
39#define node_distance(a, b) __node_distance(a, b) 40#define node_distance(a, b) __node_distance(a, b)
40 41
@@ -84,6 +85,11 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
84 85
85static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} 86static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
86 87
88static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
89{
90 return 0;
91}
92
87#endif /* CONFIG_NUMA */ 93#endif /* CONFIG_NUMA */
88 94
89#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) 95#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 50fadc99897b..26f479e6c8ed 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -167,6 +167,22 @@ static void unmap_cpu_from_node(unsigned long cpu)
167} 167}
168#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 168#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
169 169
170int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
171{
172 int dist = 0;
173
174 int i, index;
175
176 for (i = 0; i < distance_ref_points_depth; i++) {
177 index = be32_to_cpu(distance_ref_points[i]);
178 if (cpu1_assoc[index] == cpu2_assoc[index])
179 break;
180 dist++;
181 }
182
183 return dist;
184}
185
170/* must hold reference to node during call */ 186/* must hold reference to node during call */
171static const __be32 *of_get_associativity(struct device_node *dev) 187static const __be32 *of_get_associativity(struct device_node *dev)
172{ 188{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7869121ab431..ec5a7893f71b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -30,6 +30,10 @@
30#include <linux/jump_label.h> 30#include <linux/jump_label.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/stop_machine.h> 32#include <linux/stop_machine.h>
33#include <linux/spinlock.h>
34#include <linux/cpuhotplug.h>
35#include <linux/workqueue.h>
36#include <linux/proc_fs.h>
33#include <asm/processor.h> 37#include <asm/processor.h>
34#include <asm/mmu.h> 38#include <asm/mmu.h>
35#include <asm/page.h> 39#include <asm/page.h>
@@ -65,6 +69,12 @@ EXPORT_SYMBOL(plpar_hcall);
65EXPORT_SYMBOL(plpar_hcall9); 69EXPORT_SYMBOL(plpar_hcall9);
66EXPORT_SYMBOL(plpar_hcall_norets); 70EXPORT_SYMBOL(plpar_hcall_norets);
67 71
72#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
73static u8 dtl_mask = DTL_LOG_PREEMPT;
74#else
75static u8 dtl_mask;
76#endif
77
68void alloc_dtl_buffers(void) 78void alloc_dtl_buffers(void)
69{ 79{
70 int cpu; 80 int cpu;
@@ -73,11 +83,15 @@ void alloc_dtl_buffers(void)
73 83
74 for_each_possible_cpu(cpu) { 84 for_each_possible_cpu(cpu) {
75 pp = paca_ptrs[cpu]; 85 pp = paca_ptrs[cpu];
86 if (pp->dispatch_log)
87 continue;
76 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); 88 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
77 if (!dtl) { 89 if (!dtl) {
78 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 90 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
79 cpu); 91 cpu);
92#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
80 pr_warn("Stolen time statistics will be unreliable\n"); 93 pr_warn("Stolen time statistics will be unreliable\n");
94#endif
81 break; 95 break;
82 } 96 }
83 97
@@ -97,7 +111,7 @@ void register_dtl_buffer(int cpu)
97 111
98 pp = paca_ptrs[cpu]; 112 pp = paca_ptrs[cpu];
99 dtl = pp->dispatch_log; 113 dtl = pp->dispatch_log;
100 if (dtl) { 114 if (dtl && dtl_mask) {
101 pp->dtl_ridx = 0; 115 pp->dtl_ridx = 0;
102 pp->dtl_curr = dtl; 116 pp->dtl_curr = dtl;
103 lppaca_of(cpu).dtl_idx = 0; 117 lppaca_of(cpu).dtl_idx = 0;
@@ -109,12 +123,519 @@ void register_dtl_buffer(int cpu)
109 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", 123 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
110 cpu, hwcpu, ret); 124 cpu, hwcpu, ret);
111 125
112 lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT; 126 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
113 } 127 }
114} 128}
115 129
116#ifdef CONFIG_PPC_SPLPAR 130#ifdef CONFIG_PPC_SPLPAR
131struct dtl_worker {
132 struct delayed_work work;
133 int cpu;
134};
135
136struct vcpu_dispatch_data {
137 int last_disp_cpu;
138
139 int total_disp;
140
141 int same_cpu_disp;
142 int same_chip_disp;
143 int diff_chip_disp;
144 int far_chip_disp;
145
146 int numa_home_disp;
147 int numa_remote_disp;
148 int numa_far_disp;
149};
150
151/*
152 * This represents the number of cpus in the hypervisor. Since there is no
153 * architected way to discover the number of processors in the host, we
154 * provision for dealing with NR_CPUS. This is currently 2048 by default, and
155 * is sufficient for our purposes. This will need to be tweaked if
156 * CONFIG_NR_CPUS is changed.
157 */
158#define NR_CPUS_H NR_CPUS
159
117DEFINE_RWLOCK(dtl_access_lock); 160DEFINE_RWLOCK(dtl_access_lock);
161static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
162static DEFINE_PER_CPU(u64, dtl_entry_ridx);
163static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
164static enum cpuhp_state dtl_worker_state;
165static DEFINE_MUTEX(dtl_enable_mutex);
166static int vcpudispatch_stats_on __read_mostly;
167static int vcpudispatch_stats_freq = 50;
168static __be32 *vcpu_associativity, *pcpu_associativity;
169
170
171static void free_dtl_buffers(void)
172{
173#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
174 int cpu;
175 struct paca_struct *pp;
176
177 for_each_possible_cpu(cpu) {
178 pp = paca_ptrs[cpu];
179 if (!pp->dispatch_log)
180 continue;
181 kmem_cache_free(dtl_cache, pp->dispatch_log);
182 pp->dtl_ridx = 0;
183 pp->dispatch_log = 0;
184 pp->dispatch_log_end = 0;
185 pp->dtl_curr = 0;
186 }
187#endif
188}
189
190static int init_cpu_associativity(void)
191{
192 vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
193 VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
194 pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
195 VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
196
197 if (!vcpu_associativity || !pcpu_associativity) {
198 pr_err("error allocating memory for associativity information\n");
199 return -ENOMEM;
200 }
201
202 return 0;
203}
204
205static void destroy_cpu_associativity(void)
206{
207 kfree(vcpu_associativity);
208 kfree(pcpu_associativity);
209 vcpu_associativity = pcpu_associativity = 0;
210}
211
212static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
213{
214 __be32 *assoc;
215 int rc = 0;
216
217 assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
218 if (!assoc[0]) {
219 rc = hcall_vphn(cpu, flag, &assoc[0]);
220 if (rc)
221 return NULL;
222 }
223
224 return assoc;
225}
226
227static __be32 *get_pcpu_associativity(int cpu)
228{
229 return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
230}
231
232static __be32 *get_vcpu_associativity(int cpu)
233{
234 return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
235}
236
237static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
238{
239 __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
240
241 if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
242 return -EINVAL;
243
244 last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
245 cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
246
247 if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
248 return -EIO;
249
250 return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
251}
252
253static int cpu_home_node_dispatch_distance(int disp_cpu)
254{
255 __be32 *disp_cpu_assoc, *vcpu_assoc;
256 int vcpu_id = smp_processor_id();
257
258 if (disp_cpu >= NR_CPUS_H) {
259 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
260 disp_cpu, NR_CPUS_H);
261 return -EINVAL;
262 }
263
264 disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
265 vcpu_assoc = get_vcpu_associativity(vcpu_id);
266
267 if (!disp_cpu_assoc || !vcpu_assoc)
268 return -EIO;
269
270 return cpu_distance(disp_cpu_assoc, vcpu_assoc);
271}
272
273static void update_vcpu_disp_stat(int disp_cpu)
274{
275 struct vcpu_dispatch_data *disp;
276 int distance;
277
278 disp = this_cpu_ptr(&vcpu_disp_data);
279 if (disp->last_disp_cpu == -1) {
280 disp->last_disp_cpu = disp_cpu;
281 return;
282 }
283
284 disp->total_disp++;
285
286 if (disp->last_disp_cpu == disp_cpu ||
287 (cpu_first_thread_sibling(disp->last_disp_cpu) ==
288 cpu_first_thread_sibling(disp_cpu)))
289 disp->same_cpu_disp++;
290 else {
291 distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
292 disp_cpu);
293 if (distance < 0)
294 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
295 smp_processor_id());
296 else {
297 switch (distance) {
298 case 0:
299 disp->same_chip_disp++;
300 break;
301 case 1:
302 disp->diff_chip_disp++;
303 break;
304 case 2:
305 disp->far_chip_disp++;
306 break;
307 default:
308 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
309 smp_processor_id(),
310 disp->last_disp_cpu,
311 disp_cpu,
312 distance);
313 }
314 }
315 }
316
317 distance = cpu_home_node_dispatch_distance(disp_cpu);
318 if (distance < 0)
319 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
320 smp_processor_id());
321 else {
322 switch (distance) {
323 case 0:
324 disp->numa_home_disp++;
325 break;
326 case 1:
327 disp->numa_remote_disp++;
328 break;
329 case 2:
330 disp->numa_far_disp++;
331 break;
332 default:
333 pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
334 smp_processor_id(),
335 disp_cpu,
336 distance);
337 }
338 }
339
340 disp->last_disp_cpu = disp_cpu;
341}
342
343static void process_dtl_buffer(struct work_struct *work)
344{
345 struct dtl_entry dtle;
346 u64 i = __this_cpu_read(dtl_entry_ridx);
347 struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
348 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
349 struct lppaca *vpa = local_paca->lppaca_ptr;
350 struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
351
352 if (!local_paca->dispatch_log)
353 return;
354
355 /* if we have been migrated away, we cancel ourself */
356 if (d->cpu != smp_processor_id()) {
357 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
358 smp_processor_id());
359 return;
360 }
361
362 if (i == be64_to_cpu(vpa->dtl_idx))
363 goto out;
364
365 while (i < be64_to_cpu(vpa->dtl_idx)) {
366 dtle = *dtl;
367 barrier();
368 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
369 /* buffer has overflowed */
370 pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
371 d->cpu,
372 be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
373 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
374 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
375 continue;
376 }
377 update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
378 ++i;
379 ++dtl;
380 if (dtl == dtl_end)
381 dtl = local_paca->dispatch_log;
382 }
383
384 __this_cpu_write(dtl_entry_ridx, i);
385
386out:
387 schedule_delayed_work_on(d->cpu, to_delayed_work(work),
388 HZ / vcpudispatch_stats_freq);
389}
390
391static int dtl_worker_online(unsigned int cpu)
392{
393 struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
394
395 memset(d, 0, sizeof(*d));
396 INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
397 d->cpu = cpu;
398
399#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
400 per_cpu(dtl_entry_ridx, cpu) = 0;
401 register_dtl_buffer(cpu);
402#else
403 per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
404#endif
405
406 schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
407 return 0;
408}
409
410static int dtl_worker_offline(unsigned int cpu)
411{
412 struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
413
414 cancel_delayed_work_sync(&d->work);
415
416#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
417 unregister_dtl(get_hard_smp_processor_id(cpu));
418#endif
419
420 return 0;
421}
422
423static void set_global_dtl_mask(u8 mask)
424{
425 int cpu;
426
427 dtl_mask = mask;
428 for_each_present_cpu(cpu)
429 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
430}
431
432static void reset_global_dtl_mask(void)
433{
434 int cpu;
435
436#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
437 dtl_mask = DTL_LOG_PREEMPT;
438#else
439 dtl_mask = 0;
440#endif
441 for_each_present_cpu(cpu)
442 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
443}
444
445static int dtl_worker_enable(void)
446{
447 int rc = 0, state;
448
449 if (!write_trylock(&dtl_access_lock)) {
450 rc = -EBUSY;
451 goto out;
452 }
453
454 set_global_dtl_mask(DTL_LOG_ALL);
455
456 /* Setup dtl buffers and register those */
457 alloc_dtl_buffers();
458
459 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
460 dtl_worker_online, dtl_worker_offline);
461 if (state < 0) {
462 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
463 free_dtl_buffers();
464 reset_global_dtl_mask();
465 write_unlock(&dtl_access_lock);
466 rc = -EINVAL;
467 goto out;
468 }
469 dtl_worker_state = state;
470
471out:
472 return rc;
473}
474
475static void dtl_worker_disable(void)
476{
477 cpuhp_remove_state(dtl_worker_state);
478 free_dtl_buffers();
479 reset_global_dtl_mask();
480 write_unlock(&dtl_access_lock);
481}
482
483static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
484 size_t count, loff_t *ppos)
485{
486 struct vcpu_dispatch_data *disp;
487 int rc, cmd, cpu;
488 char buf[16];
489
490 if (count > 15)
491 return -EINVAL;
492
493 if (copy_from_user(buf, p, count))
494 return -EFAULT;
495
496 buf[count] = 0;
497 rc = kstrtoint(buf, 0, &cmd);
498 if (rc || cmd < 0 || cmd > 1) {
499 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
500 return rc ? rc : -EINVAL;
501 }
502
503 mutex_lock(&dtl_enable_mutex);
504
505 if ((cmd == 0 && !vcpudispatch_stats_on) ||
506 (cmd == 1 && vcpudispatch_stats_on))
507 goto out;
508
509 if (cmd) {
510 rc = init_cpu_associativity();
511 if (rc)
512 goto out;
513
514 for_each_possible_cpu(cpu) {
515 disp = per_cpu_ptr(&vcpu_disp_data, cpu);
516 memset(disp, 0, sizeof(*disp));
517 disp->last_disp_cpu = -1;
518 }
519
520 rc = dtl_worker_enable();
521 if (rc) {
522 destroy_cpu_associativity();
523 goto out;
524 }
525 } else {
526 dtl_worker_disable();
527 destroy_cpu_associativity();
528 }
529
530 vcpudispatch_stats_on = cmd;
531
532out:
533 mutex_unlock(&dtl_enable_mutex);
534 if (rc)
535 return rc;
536 return count;
537}
538
539static int vcpudispatch_stats_display(struct seq_file *p, void *v)
540{
541 int cpu;
542 struct vcpu_dispatch_data *disp;
543
544 if (!vcpudispatch_stats_on) {
545 seq_puts(p, "off\n");
546 return 0;
547 }
548
549 for_each_online_cpu(cpu) {
550 disp = per_cpu_ptr(&vcpu_disp_data, cpu);
551 seq_printf(p, "cpu%d", cpu);
552 seq_put_decimal_ull(p, " ", disp->total_disp);
553 seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
554 seq_put_decimal_ull(p, " ", disp->same_chip_disp);
555 seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
556 seq_put_decimal_ull(p, " ", disp->far_chip_disp);
557 seq_put_decimal_ull(p, " ", disp->numa_home_disp);
558 seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
559 seq_put_decimal_ull(p, " ", disp->numa_far_disp);
560 seq_puts(p, "\n");
561 }
562
563 return 0;
564}
565
566static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
567{
568 return single_open(file, vcpudispatch_stats_display, NULL);
569}
570
571static const struct file_operations vcpudispatch_stats_proc_ops = {
572 .open = vcpudispatch_stats_open,
573 .read = seq_read,
574 .write = vcpudispatch_stats_write,
575 .llseek = seq_lseek,
576 .release = single_release,
577};
578
579static ssize_t vcpudispatch_stats_freq_write(struct file *file,
580 const char __user *p, size_t count, loff_t *ppos)
581{
582 int rc, freq;
583 char buf[16];
584
585 if (count > 15)
586 return -EINVAL;
587
588 if (copy_from_user(buf, p, count))
589 return -EFAULT;
590
591 buf[count] = 0;
592 rc = kstrtoint(buf, 0, &freq);
593 if (rc || freq < 1 || freq > HZ) {
594 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
595 HZ);
596 return rc ? rc : -EINVAL;
597 }
598
599 vcpudispatch_stats_freq = freq;
600
601 return count;
602}
603
604static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
605{
606 seq_printf(p, "%d\n", vcpudispatch_stats_freq);
607 return 0;
608}
609
610static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
611{
612 return single_open(file, vcpudispatch_stats_freq_display, NULL);
613}
614
615static const struct file_operations vcpudispatch_stats_freq_proc_ops = {
616 .open = vcpudispatch_stats_freq_open,
617 .read = seq_read,
618 .write = vcpudispatch_stats_freq_write,
619 .llseek = seq_lseek,
620 .release = single_release,
621};
622
623static int __init vcpudispatch_stats_procfs_init(void)
624{
625 if (!lppaca_shared_proc(get_lppaca()))
626 return 0;
627
628 if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
629 &vcpudispatch_stats_proc_ops))
630 pr_err("vcpudispatch_stats: error creating procfs file\n");
631 else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
632 &vcpudispatch_stats_freq_proc_ops))
633 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
634
635 return 0;
636}
637
638machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
118#endif /* CONFIG_PPC_SPLPAR */ 639#endif /* CONFIG_PPC_SPLPAR */
119 640
120void vpa_init(int cpu) 641void vpa_init(int cpu)