summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-05-24 08:07:04 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-02 09:53:35 -0400
commit6090a8a7ee347f92d806f104d3a0082208f5df64 (patch)
tree74b0d7057ea1b112d7de41f1bbce5e212f1525de /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parentbe7f22db8bc5bff131432a4f6d127ecc8ce5096d (diff)
gpu: nvgpu: move debugfs code to linux module
Since all debugfs code is Linux specific, remove it from common code and move it to Linux module Debugfs code is now divided into below module specific files : common/linux/debug.c common/linux/debug_cde.c common/linux/debug_ce.c common/linux/debug_fifo.c common/linux/debug_gr.c common/linux/debug_mm.c common/linux/debug_allocator.c common/linux/debug_kmem.c common/linux/debug_pmu.c common/linux/debug_sched.c Add corresponding header files for above modules too And compile all of above files only if CONFIG_DEBUG_FS is set Some more details of the changes made - Move and rename gk20a/debug_gk20a.c to common/linux/debug.c - Move and rename gk20a/debug_gk20a.h to include/nvgpu/debug.h - Remove gm20b/debug_gm20b.c and gm20b/debug_gm20b.h and call gk20a_init_debug_ops() directly from gm20b_init_hal() - Update all debug APIs to receive struct gk20a as parameter instead of receiving struct device pointer - Update API gk20a_dmabuf_get_state() to receive struct gk20a pointer instead of struct device - Include <nvgpu/debug.h> explicitly in all files where debug operations are used - Remove "gk20a/platform_gk20a.h" include from HAL files which no longer need this include - Add new API gk20a_debug_deinit() to deinitialize debugfs and call it from gk20a_remove() - Move API gk20a_debug_dump_all_channel_status_ramfc() to gk20a/fifo_gk20a.c Jira NVGPU-62 Change-Id: I076975d3d7f669bdbe9212fa33d98529377feeb6 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1488902 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c487
1 files changed, 9 insertions, 478 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index a9e03943..552d5d73 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -35,12 +35,6 @@
35#include "nvgpu_gpuid_t19x.h" 35#include "nvgpu_gpuid_t19x.h"
36#endif 36#endif
37 37
38#ifdef CONFIG_DEBUG_FS
39#include <linux/debugfs.h>
40#include <linux/uaccess.h>
41#include "platform_gk20a.h"
42#endif
43
44#define GK20A_PMU_UCODE_IMAGE "gpmu_ucode.bin" 38#define GK20A_PMU_UCODE_IMAGE "gpmu_ucode.bin"
45 39
46#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 40#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
@@ -49,7 +43,7 @@
49#define gk20a_dbg_pmu(fmt, arg...) \ 43#define gk20a_dbg_pmu(fmt, arg...) \
50 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 44 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
51 45
52static int gk20a_pmu_get_pg_stats(struct gk20a *g, 46int gk20a_pmu_get_pg_stats(struct gk20a *g,
53 u32 pg_engine_id, 47 u32 pg_engine_id,
54 struct pmu_pg_stats_data *pg_stat_data); 48 struct pmu_pg_stats_data *pg_stat_data);
55static void ap_callback_init_and_enable_ctrl( 49static void ap_callback_init_and_enable_ctrl(
@@ -281,7 +275,7 @@ static void set_pmu_cmdline_args_falctracesize_v1(
281 pmu->args_v1.falc_trace_size = size; 275 pmu->args_v1.falc_trace_size = size;
282} 276}
283 277
284static bool find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 278bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
285{ 279{
286 u32 i = 0, j = strlen(strings); 280 u32 i = 0, j = strlen(strings);
287 for (; i < j; i++) { 281 for (; i < j; i++) {
@@ -326,7 +320,7 @@ static void printtrace(struct pmu_gk20a *pmu)
326 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); 320 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
327 l = 0; 321 l = 0;
328 m = 0; 322 m = 0;
329 while (find_hex_in_string((trace+i+20+m), g, &k)) { 323 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) {
330 if (k >= 40) 324 if (k >= 40)
331 break; 325 break;
332 strncpy(part_str, (trace+i+20+m), k); 326 strncpy(part_str, (trace+i+20+m), k);
@@ -4141,7 +4135,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4141 nvgpu_err(g, "ZBC save timeout"); 4135 nvgpu_err(g, "ZBC save timeout");
4142} 4136}
4143 4137
4144static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) 4138int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
4145{ 4139{
4146 struct gk20a *g = gk20a_from_pmu(pmu); 4140 struct gk20a *g = gk20a_from_pmu(pmu);
4147 struct pmu_v *pv = &g->ops.pmu_ver; 4141 struct pmu_v *pv = &g->ops.pmu_ver;
@@ -4185,7 +4179,7 @@ static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
4185 return 0; 4179 return 0;
4186} 4180}
4187 4181
4188static int pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) 4182int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu)
4189{ 4183{
4190 struct gk20a *g = gk20a_from_pmu(pmu); 4184 struct gk20a *g = gk20a_from_pmu(pmu);
4191 struct pmu_cmd cmd; 4185 struct pmu_cmd cmd;
@@ -4231,7 +4225,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu,
4231 4225
4232 /* restart sampling */ 4226 /* restart sampling */
4233 if (pmu->perfmon_sampling_enabled) 4227 if (pmu->perfmon_sampling_enabled)
4234 return pmu_perfmon_start_sampling(pmu); 4228 return nvgpu_pmu_perfmon_start_sampling(pmu);
4235 return 0; 4229 return 0;
4236} 4230}
4237 4231
@@ -5173,9 +5167,9 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
5173 gk20a_dbg_fn(""); 5167 gk20a_dbg_fn("");
5174 5168
5175 if (enable) 5169 if (enable)
5176 err = pmu_perfmon_start_sampling(pmu); 5170 err = nvgpu_pmu_perfmon_start_sampling(pmu);
5177 else 5171 else
5178 err = pmu_perfmon_stop_sampling(pmu); 5172 err = nvgpu_pmu_perfmon_stop_sampling(pmu);
5179 5173
5180 return err; 5174 return err;
5181} 5175}
@@ -5293,7 +5287,7 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
5293 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us; 5287 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us;
5294} 5288}
5295 5289
5296static int gk20a_pmu_get_pg_stats(struct gk20a *g, 5290int gk20a_pmu_get_pg_stats(struct gk20a *g,
5297 u32 pg_engine_id, 5291 u32 pg_engine_id,
5298 struct pmu_pg_stats_data *pg_stat_data) 5292 struct pmu_pg_stats_data *pg_stat_data)
5299{ 5293{
@@ -5463,466 +5457,3 @@ int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
5463 status = gk20a_pmu_ap_send_command(g, &ap_cmd, true); 5457 status = gk20a_pmu_ap_send_command(g, &ap_cmd, true);
5464 return status; 5458 return status;
5465} 5459}
5466
5467#ifdef CONFIG_DEBUG_FS
5468static int lpwr_debug_show(struct seq_file *s, void *data)
5469{
5470 struct gk20a *g = s->private;
5471
5472 if (g->ops.pmu.pmu_pg_engines_feature_list &&
5473 g->ops.pmu.pmu_pg_engines_feature_list(g,
5474 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
5475 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
5476 seq_printf(s, "PSTATE: %u\n"
5477 "RPPG Enabled: %u\n"
5478 "RPPG ref count: %u\n"
5479 "RPPG state: %u\n"
5480 "MSCG Enabled: %u\n"
5481 "MSCG pstate state: %u\n"
5482 "MSCG transition state: %u\n",
5483 g->ops.clk_arb.get_current_pstate(g),
5484 g->elpg_enabled, g->pmu.elpg_refcnt,
5485 g->pmu.elpg_stat, g->mscg_enabled,
5486 g->pmu.mscg_stat, g->pmu.mscg_transition_state);
5487
5488 } else
5489 seq_printf(s, "ELPG Enabled: %u\n"
5490 "ELPG ref count: %u\n"
5491 "ELPG state: %u\n",
5492 g->elpg_enabled, g->pmu.elpg_refcnt,
5493 g->pmu.elpg_stat);
5494
5495 return 0;
5496
5497}
5498
5499static int lpwr_debug_open(struct inode *inode, struct file *file)
5500{
5501 return single_open(file, lpwr_debug_show, inode->i_private);
5502}
5503
5504static const struct file_operations lpwr_debug_fops = {
5505 .open = lpwr_debug_open,
5506 .read = seq_read,
5507 .llseek = seq_lseek,
5508 .release = single_release,
5509};
5510
5511static int mscg_stat_show(struct seq_file *s, void *data)
5512{
5513 struct gk20a *g = s->private;
5514 u64 total_ingating, total_ungating, residency, divisor, dividend;
5515 struct pmu_pg_stats_data pg_stat_data = { 0 };
5516 int err;
5517
5518 /* Don't unnecessarily power on the device */
5519 if (g->power_on) {
5520 err = gk20a_busy(g);
5521 if (err)
5522 return err;
5523
5524 gk20a_pmu_get_pg_stats(g,
5525 PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
5526 gk20a_idle(g);
5527 }
5528 total_ingating = g->pg_ingating_time_us +
5529 (u64)pg_stat_data.ingating_time;
5530 total_ungating = g->pg_ungating_time_us +
5531 (u64)pg_stat_data.ungating_time;
5532
5533 divisor = total_ingating + total_ungating;
5534
5535 /* We compute the residency on a scale of 1000 */
5536 dividend = total_ingating * 1000;
5537
5538 if (divisor)
5539 residency = div64_u64(dividend, divisor);
5540 else
5541 residency = 0;
5542
5543 seq_printf(s,
5544 "Time in MSCG: %llu us\n"
5545 "Time out of MSCG: %llu us\n"
5546 "MSCG residency ratio: %llu\n"
5547 "MSCG Entry Count: %u\n"
5548 "MSCG Avg Entry latency %u\n"
5549 "MSCG Avg Exit latency %u\n",
5550 total_ingating, total_ungating,
5551 residency, pg_stat_data.gating_cnt,
5552 pg_stat_data.avg_entry_latency_us,
5553 pg_stat_data.avg_exit_latency_us);
5554 return 0;
5555
5556}
5557
5558static int mscg_stat_open(struct inode *inode, struct file *file)
5559{
5560 return single_open(file, mscg_stat_show, inode->i_private);
5561}
5562
5563static const struct file_operations mscg_stat_fops = {
5564 .open = mscg_stat_open,
5565 .read = seq_read,
5566 .llseek = seq_lseek,
5567 .release = single_release,
5568};
5569
5570static int mscg_transitions_show(struct seq_file *s, void *data)
5571{
5572 struct gk20a *g = s->private;
5573 struct pmu_pg_stats_data pg_stat_data = { 0 };
5574 u32 total_gating_cnt;
5575 int err;
5576
5577 if (g->power_on) {
5578 err = gk20a_busy(g);
5579 if (err)
5580 return err;
5581
5582 gk20a_pmu_get_pg_stats(g,
5583 PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
5584 gk20a_idle(g);
5585 }
5586 total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
5587
5588 seq_printf(s, "%u\n", total_gating_cnt);
5589 return 0;
5590
5591}
5592
5593static int mscg_transitions_open(struct inode *inode, struct file *file)
5594{
5595 return single_open(file, mscg_transitions_show, inode->i_private);
5596}
5597
5598static const struct file_operations mscg_transitions_fops = {
5599 .open = mscg_transitions_open,
5600 .read = seq_read,
5601 .llseek = seq_lseek,
5602 .release = single_release,
5603};
5604
5605static int elpg_stat_show(struct seq_file *s, void *data)
5606{
5607 struct gk20a *g = s->private;
5608 struct pmu_pg_stats_data pg_stat_data = { 0 };
5609 u64 total_ingating, total_ungating, residency, divisor, dividend;
5610 int err;
5611
5612 /* Don't unnecessarily power on the device */
5613 if (g->power_on) {
5614 err = gk20a_busy(g);
5615 if (err)
5616 return err;
5617
5618 gk20a_pmu_get_pg_stats(g,
5619 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
5620 gk20a_idle(g);
5621 }
5622 total_ingating = g->pg_ingating_time_us +
5623 (u64)pg_stat_data.ingating_time;
5624 total_ungating = g->pg_ungating_time_us +
5625 (u64)pg_stat_data.ungating_time;
5626 divisor = total_ingating + total_ungating;
5627
5628 /* We compute the residency on a scale of 1000 */
5629 dividend = total_ingating * 1000;
5630
5631 if (divisor)
5632 residency = div64_u64(dividend, divisor);
5633 else
5634 residency = 0;
5635
5636 seq_printf(s,
5637 "Time in ELPG: %llu us\n"
5638 "Time out of ELPG: %llu us\n"
5639 "ELPG residency ratio: %llu\n"
5640 "ELPG Entry Count: %u\n"
5641 "ELPG Avg Entry latency %u us\n"
5642 "ELPG Avg Exit latency %u us\n",
5643 total_ingating, total_ungating,
5644 residency, pg_stat_data.gating_cnt,
5645 pg_stat_data.avg_entry_latency_us,
5646 pg_stat_data.avg_exit_latency_us);
5647 return 0;
5648
5649}
5650
5651static int elpg_stat_open(struct inode *inode, struct file *file)
5652{
5653 return single_open(file, elpg_stat_show, inode->i_private);
5654}
5655
5656static const struct file_operations elpg_stat_fops = {
5657 .open = elpg_stat_open,
5658 .read = seq_read,
5659 .llseek = seq_lseek,
5660 .release = single_release,
5661};
5662
5663static int elpg_transitions_show(struct seq_file *s, void *data)
5664{
5665 struct gk20a *g = s->private;
5666 struct pmu_pg_stats_data pg_stat_data = { 0 };
5667 u32 total_gating_cnt;
5668 int err;
5669
5670 if (g->power_on) {
5671 err = gk20a_busy(g);
5672 if (err)
5673 return err;
5674
5675 gk20a_pmu_get_pg_stats(g,
5676 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
5677 gk20a_idle(g);
5678 }
5679 total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
5680
5681 seq_printf(s, "%u\n", total_gating_cnt);
5682 return 0;
5683
5684}
5685
5686static int elpg_transitions_open(struct inode *inode, struct file *file)
5687{
5688 return single_open(file, elpg_transitions_show, inode->i_private);
5689}
5690
5691static const struct file_operations elpg_transitions_fops = {
5692 .open = elpg_transitions_open,
5693 .read = seq_read,
5694 .llseek = seq_lseek,
5695 .release = single_release,
5696};
5697
5698static int falc_trace_show(struct seq_file *s, void *data)
5699{
5700 struct gk20a *g = s->private;
5701 struct pmu_gk20a *pmu = &g->pmu;
5702 u32 i = 0, j = 0, k, l, m;
5703 char part_str[40];
5704 void *tracebuffer;
5705 char *trace;
5706 u32 *trace1;
5707
5708 /* allocate system memory to copy pmu trace buffer */
5709 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE);
5710 if (tracebuffer == NULL)
5711 return -ENOMEM;
5712
5713 /* read pmu traces into system memory buffer */
5714 nvgpu_mem_rd_n(g, &pmu->trace_buf,
5715 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE);
5716
5717 trace = (char *)tracebuffer;
5718 trace1 = (u32 *)tracebuffer;
5719
5720 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
5721 for (j = 0; j < 0x40; j++)
5722 if (trace1[(i / 4) + j])
5723 break;
5724 if (j == 0x40)
5725 break;
5726 seq_printf(s, "Index %x: ", trace1[(i / 4)]);
5727 l = 0;
5728 m = 0;
5729 while (find_hex_in_string((trace+i+20+m), g, &k)) {
5730 if (k >= 40)
5731 break;
5732 strncpy(part_str, (trace+i+20+m), k);
5733 part_str[k] = 0;
5734 seq_printf(s, "%s0x%x", part_str,
5735 trace1[(i / 4) + 1 + l]);
5736 l++;
5737 m += k + 2;
5738 }
5739 seq_printf(s, "%s", (trace+i+20+m));
5740 }
5741
5742 nvgpu_kfree(g, tracebuffer);
5743 return 0;
5744}
5745
5746static int falc_trace_open(struct inode *inode, struct file *file)
5747{
5748 return single_open(file, falc_trace_show, inode->i_private);
5749}
5750
5751static const struct file_operations falc_trace_fops = {
5752 .open = falc_trace_open,
5753 .read = seq_read,
5754 .llseek = seq_lseek,
5755 .release = single_release,
5756};
5757
5758static int perfmon_events_enable_show(struct seq_file *s, void *data)
5759{
5760 struct gk20a *g = s->private;
5761
5762 seq_printf(s, "%u\n", g->pmu.perfmon_sampling_enabled ? 1 : 0);
5763 return 0;
5764
5765}
5766
5767static int perfmon_events_enable_open(struct inode *inode, struct file *file)
5768{
5769 return single_open(file, perfmon_events_enable_show, inode->i_private);
5770}
5771
5772static ssize_t perfmon_events_enable_write(struct file *file,
5773 const char __user *userbuf, size_t count, loff_t *ppos)
5774{
5775 struct seq_file *s = file->private_data;
5776 struct gk20a *g = s->private;
5777 unsigned long val = 0;
5778 char buf[40];
5779 int buf_size;
5780 int err;
5781
5782 memset(buf, 0, sizeof(buf));
5783 buf_size = min(count, (sizeof(buf)-1));
5784
5785 if (copy_from_user(buf, userbuf, buf_size))
5786 return -EFAULT;
5787
5788 if (kstrtoul(buf, 10, &val) < 0)
5789 return -EINVAL;
5790
5791 /* Don't turn on gk20a unnecessarily */
5792 if (g->power_on) {
5793 err = gk20a_busy(g);
5794 if (err)
5795 return err;
5796
5797 if (val && !g->pmu.perfmon_sampling_enabled) {
5798 g->pmu.perfmon_sampling_enabled = true;
5799 pmu_perfmon_start_sampling(&(g->pmu));
5800 } else if (!val && g->pmu.perfmon_sampling_enabled) {
5801 g->pmu.perfmon_sampling_enabled = false;
5802 pmu_perfmon_stop_sampling(&(g->pmu));
5803 }
5804 gk20a_idle(g);
5805 } else {
5806 g->pmu.perfmon_sampling_enabled = val ? true : false;
5807 }
5808
5809 return count;
5810}
5811
5812static const struct file_operations perfmon_events_enable_fops = {
5813 .open = perfmon_events_enable_open,
5814 .read = seq_read,
5815 .write = perfmon_events_enable_write,
5816 .llseek = seq_lseek,
5817 .release = single_release,
5818};
5819
5820static int perfmon_events_count_show(struct seq_file *s, void *data)
5821{
5822 struct gk20a *g = s->private;
5823
5824 seq_printf(s, "%lu\n", g->pmu.perfmon_events_cnt);
5825 return 0;
5826
5827}
5828
5829static int perfmon_events_count_open(struct inode *inode, struct file *file)
5830{
5831 return single_open(file, perfmon_events_count_show, inode->i_private);
5832}
5833
5834static const struct file_operations perfmon_events_count_fops = {
5835 .open = perfmon_events_count_open,
5836 .read = seq_read,
5837 .llseek = seq_lseek,
5838 .release = single_release,
5839};
5840
5841static int security_show(struct seq_file *s, void *data)
5842{
5843 struct gk20a *g = s->private;
5844
5845 seq_printf(s, "%d\n", g->pmu.pmu_mode);
5846 return 0;
5847
5848}
5849
5850static int security_open(struct inode *inode, struct file *file)
5851{
5852 return single_open(file, security_show, inode->i_private);
5853}
5854
5855static const struct file_operations security_fops = {
5856 .open = security_open,
5857 .read = seq_read,
5858 .llseek = seq_lseek,
5859 .release = single_release,
5860};
5861
5862int gk20a_pmu_debugfs_init(struct device *dev)
5863{
5864 struct dentry *d;
5865 struct gk20a_platform *platform = dev_get_drvdata(dev);
5866 struct gk20a *g = get_gk20a(dev);
5867
5868 d = debugfs_create_file(
5869 "lpwr_debug", S_IRUGO|S_IWUSR, platform->debugfs, g,
5870 &lpwr_debug_fops);
5871 if (!d)
5872 goto err_out;
5873
5874 d = debugfs_create_file(
5875 "mscg_residency", S_IRUGO|S_IWUSR, platform->debugfs, g,
5876 &mscg_stat_fops);
5877 if (!d)
5878 goto err_out;
5879
5880 d = debugfs_create_file(
5881 "mscg_transitions", S_IRUGO, platform->debugfs, g,
5882 &mscg_transitions_fops);
5883 if (!d)
5884 goto err_out;
5885
5886 d = debugfs_create_file(
5887 "elpg_residency", S_IRUGO|S_IWUSR, platform->debugfs, g,
5888 &elpg_stat_fops);
5889 if (!d)
5890 goto err_out;
5891
5892 d = debugfs_create_file(
5893 "elpg_transitions", S_IRUGO, platform->debugfs, g,
5894 &elpg_transitions_fops);
5895 if (!d)
5896 goto err_out;
5897
5898 d = debugfs_create_file(
5899 "falc_trace", S_IRUGO, platform->debugfs, g,
5900 &falc_trace_fops);
5901 if (!d)
5902 goto err_out;
5903
5904 d = debugfs_create_file(
5905 "perfmon_events_enable", S_IRUGO, platform->debugfs, g,
5906 &perfmon_events_enable_fops);
5907 if (!d)
5908 goto err_out;
5909
5910 d = debugfs_create_file(
5911 "perfmon_events_count", S_IRUGO, platform->debugfs, g,
5912 &perfmon_events_count_fops);
5913 if (!d)
5914 goto err_out;
5915
5916 d = debugfs_create_file(
5917 "pmu_security", S_IRUGO, platform->debugfs, g,
5918 &security_fops);
5919 if (!d)
5920 goto err_out;
5921 return 0;
5922err_out:
5923 pr_err("%s: Failed to make debugfs node\n", __func__);
5924 debugfs_remove_recursive(platform->debugfs);
5925 return -ENOMEM;
5926}
5927
5928#endif