aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:53:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:53:32 -0400
commitd19d5efd8c8840aa4f38a6dfbfe500d8cc27de46 (patch)
tree2e2f4f57de790c7de2ccd6d1afbec8695b2c7a46 /arch/powerpc/perf
parent34c9a0ffc75ad25b6a60f61e27c4a4b1189b8085 (diff)
parent2fe0753d49402aee325cc39c476b46fd51a8afec (diff)
Merge tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: - Numerous minor fixes, cleanups etc. - More EEH work from Gavin to remove its dependency on device_nodes. - Memory hotplug implemented entirely in the kernel from Nathan Fontenot. - Removal of redundant CONFIG_PPC_OF by Kevin Hao. - Rewrite of VPHN parsing logic & tests from Greg Kurz. - A fix from Nish Aravamudan to reduce memory usage by clamping nodes_possible_map. - Support for pstore on powernv from Hari Bathini. - Removal of old powerpc specific byte swap routines by David Gibson. - Fix from Vasant Hegde to prevent the flash driver telling you it was flashing your firmware when it wasn't. - Patch from Ben Herrenschmidt to add an OPAL heartbeat driver. - Fix for an oops causing get/put_cpu_var() imbalance in perf by Jan Stancek. - Some fixes for migration from Tyrel Datwyler. - A new syscall to switch the cpu endian by Michael Ellerman. - Large series from Wei Yang to implement SRIOV, reviewed and acked by Bjorn. - A fix for the OPAL sensor driver from Cédric Le Goater. - Fixes to get STRICT_MM_TYPECHECKS building again by Michael Ellerman. - Large series from Daniel Axtens to make our PCI hooks per PHB rather than per machine. - Small patch from Sam Bobroff to explicitly abort non-suspended transactions on syscalls, plus a test to exercise it. - Numerous reworks and fixes for the 24x7 PMU from Sukadev Bhattiprolu. - Small patch to enable the hard lockup detector from Anton Blanchard. - Fix from Dave Olson for missing L2 cache information on some CPUs. - Some fixes from Michael Ellerman to get Cell machines booting again. - Freescale updates from Scott: Highlights include BMan device tree nodes, an MSI erratum workaround, a couple minor performance improvements, config updates, and misc fixes/cleanup. * tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (196 commits) powerpc/powermac: Fix build error seen with powermac smp builds powerpc/pseries: Fix compile of memory hotplug without CONFIG_MEMORY_HOTREMOVE powerpc: Remove PPC32 code from pseries specific find_and_init_phbs() powerpc/cell: Fix iommu breakage caused by controller_ops change powerpc/eeh: Fix crash in eeh_add_device_early() on Cell powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH powerpc/perf/hv-24x7: Fail 24x7 initcall if create_events_from_catalog() fails powerpc/pseries: Correct memory hotplug locking powerpc: Fix missing L2 cache size in /sys/devices/system/cpu powerpc: Add ppc64 hard lockup detector support oprofile: Disable oprofile NMI timer on ppc64 powerpc/perf/hv-24x7: Add missing put_cpu_var() powerpc/perf/hv-24x7: Break up single_24x7_request powerpc/perf/hv-24x7: Define update_event_count() powerpc/perf/hv-24x7: Whitespace cleanup powerpc/perf/hv-24x7: Define add_event_to_24x7_request() powerpc/perf/hv-24x7: Rename hv_24x7_event_update powerpc/perf/hv-24x7: Move debug prints to separate function powerpc/perf/hv-24x7: Drop event_24x7_request() powerpc/perf/hv-24x7: Use pr_devel() to log message ... Conflicts: tools/testing/selftests/powerpc/Makefile tools/testing/selftests/powerpc/tm/Makefile
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c251
-rw-r--r--arch/powerpc/perf/hv-24x7.h8
4 files changed, 172 insertions, 93 deletions
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282cd..ead55351b254 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
243 sp = regs->gpr[1]; 243 sp = regs->gpr[1];
244 perf_callchain_store(entry, next_ip); 244 perf_callchain_store(entry, next_ip);
245 245
246 for (;;) { 246 while (entry->nr < PERF_MAX_STACK_DEPTH) {
247 fp = (unsigned long __user *) sp; 247 fp = (unsigned long __user *) sp;
248 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 248 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
249 return; 249 return;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7fd60dcb2cb0..12b638425bb9 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1837,8 +1837,10 @@ static int power_pmu_event_init(struct perf_event *event)
1837 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( 1837 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1838 event->attr.branch_sample_type); 1838 event->attr.branch_sample_type);
1839 1839
1840 if(cpuhw->bhrb_filter == -1) 1840 if (cpuhw->bhrb_filter == -1) {
1841 put_cpu_var(cpu_hw_events);
1841 return -EOPNOTSUPP; 1842 return -EOPNOTSUPP;
1843 }
1842 } 1844 }
1843 1845
1844 put_cpu_var(cpu_hw_events); 1846 put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index abeb9ec0d117..ec2eb20631d1 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
142 142
143static struct kmem_cache *hv_page_cache; 143static struct kmem_cache *hv_page_cache;
144 144
145/*
146 * request_buffer and result_buffer are not required to be 4k aligned,
147 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
148 * the simplest way to ensure that.
149 */
150#define H24x7_DATA_BUFFER_SIZE 4096
151DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
152DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
153
145static char *event_name(struct hv_24x7_event_data *ev, int *len) 154static char *event_name(struct hv_24x7_event_data *ev, int *len)
146{ 155{
147 *len = be16_to_cpu(ev->event_name_len) - 2; 156 *len = be16_to_cpu(ev->event_name_len) - 2;
@@ -152,6 +161,7 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
152{ 161{
153 unsigned nl = be16_to_cpu(ev->event_name_len); 162 unsigned nl = be16_to_cpu(ev->event_name_len);
154 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); 163 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
164
155 *len = be16_to_cpu(*desc_len) - 2; 165 *len = be16_to_cpu(*desc_len) - 2;
156 return (char *)ev->remainder + nl; 166 return (char *)ev->remainder + nl;
157} 167}
@@ -162,6 +172,7 @@ static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
162 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); 172 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
163 unsigned desc_len = be16_to_cpu(*desc_len_); 173 unsigned desc_len = be16_to_cpu(*desc_len_);
164 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); 174 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
175
165 *len = be16_to_cpu(*long_desc_len) - 2; 176 *len = be16_to_cpu(*long_desc_len) - 2;
166 return (char *)ev->remainder + nl + desc_len; 177 return (char *)ev->remainder + nl + desc_len;
167} 178}
@@ -239,14 +250,12 @@ static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
239 unsigned long index) 250 unsigned long index)
240{ 251{
241 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)", 252 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
242 phys_4096, 253 phys_4096, version, index);
243 version, 254
244 index);
245 WARN_ON(!IS_ALIGNED(phys_4096, 4096)); 255 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
256
246 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, 257 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
247 phys_4096, 258 phys_4096, version, index);
248 version,
249 index);
250} 259}
251 260
252static unsigned long h_get_24x7_catalog_page(char page[], 261static unsigned long h_get_24x7_catalog_page(char page[],
@@ -300,6 +309,7 @@ static ssize_t device_show_string(struct device *dev,
300 struct dev_ext_attribute *d; 309 struct dev_ext_attribute *d;
301 310
302 d = container_of(attr, struct dev_ext_attribute, attr); 311 d = container_of(attr, struct dev_ext_attribute, attr);
312
303 return sprintf(buf, "%s\n", (char *)d->var); 313 return sprintf(buf, "%s\n", (char *)d->var);
304} 314}
305 315
@@ -314,6 +324,7 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
314 attr->attr.attr.name = name; 324 attr->attr.attr.name = name;
315 attr->attr.attr.mode = 0444; 325 attr->attr.attr.mode = 0444;
316 attr->attr.show = device_show_string; 326 attr->attr.show = device_show_string;
327
317 return &attr->attr.attr; 328 return &attr->attr.attr;
318} 329}
319 330
@@ -387,7 +398,6 @@ static struct attribute *event_to_attr(unsigned ix,
387 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d", 398 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
388 (int)event_name_len, ev_name, ev_suffix, nonce); 399 (int)event_name_len, ev_name, ev_suffix, nonce);
389 400
390
391 if (!a_ev_name) 401 if (!a_ev_name)
392 goto out_val; 402 goto out_val;
393 403
@@ -637,7 +647,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
637 647
638#define MAX_4K (SIZE_MAX / 4096) 648#define MAX_4K (SIZE_MAX / 4096)
639 649
640static void create_events_from_catalog(struct attribute ***events_, 650static int create_events_from_catalog(struct attribute ***events_,
641 struct attribute ***event_descs_, 651 struct attribute ***event_descs_,
642 struct attribute ***event_long_descs_) 652 struct attribute ***event_long_descs_)
643{ 653{
@@ -655,19 +665,25 @@ static void create_events_from_catalog(struct attribute ***events_,
655 void *event_data, *end; 665 void *event_data, *end;
656 struct hv_24x7_event_data *event; 666 struct hv_24x7_event_data *event;
657 struct rb_root ev_uniq = RB_ROOT; 667 struct rb_root ev_uniq = RB_ROOT;
668 int ret = 0;
658 669
659 if (!page) 670 if (!page) {
671 ret = -ENOMEM;
660 goto e_out; 672 goto e_out;
673 }
661 674
662 hret = h_get_24x7_catalog_page(page, 0, 0); 675 hret = h_get_24x7_catalog_page(page, 0, 0);
663 if (hret) 676 if (hret) {
677 ret = -EIO;
664 goto e_free; 678 goto e_free;
679 }
665 680
666 catalog_version_num = be64_to_cpu(page_0->version); 681 catalog_version_num = be64_to_cpu(page_0->version);
667 catalog_page_len = be32_to_cpu(page_0->length); 682 catalog_page_len = be32_to_cpu(page_0->length);
668 683
669 if (MAX_4K < catalog_page_len) { 684 if (MAX_4K < catalog_page_len) {
670 pr_err("invalid page count: %zu\n", catalog_page_len); 685 pr_err("invalid page count: %zu\n", catalog_page_len);
686 ret = -EIO;
671 goto e_free; 687 goto e_free;
672 } 688 }
673 689
@@ -686,6 +702,7 @@ static void create_events_from_catalog(struct attribute ***events_,
686 || (MAX_4K - event_data_offs < event_data_len)) { 702 || (MAX_4K - event_data_offs < event_data_len)) {
687 pr_err("invalid event data offs %zu and/or len %zu\n", 703 pr_err("invalid event data offs %zu and/or len %zu\n",
688 event_data_offs, event_data_len); 704 event_data_offs, event_data_len);
705 ret = -EIO;
689 goto e_free; 706 goto e_free;
690 } 707 }
691 708
@@ -694,12 +711,14 @@ static void create_events_from_catalog(struct attribute ***events_,
694 event_data_offs, 711 event_data_offs,
695 event_data_offs + event_data_len, 712 event_data_offs + event_data_len,
696 catalog_page_len); 713 catalog_page_len);
714 ret = -EIO;
697 goto e_free; 715 goto e_free;
698 } 716 }
699 717
700 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) { 718 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
701 pr_err("event_entry_count %zu is invalid\n", 719 pr_err("event_entry_count %zu is invalid\n",
702 event_entry_count); 720 event_entry_count);
721 ret = -EIO;
703 goto e_free; 722 goto e_free;
704 } 723 }
705 724
@@ -712,6 +731,7 @@ static void create_events_from_catalog(struct attribute ***events_,
712 event_data = vmalloc(event_data_bytes); 731 event_data = vmalloc(event_data_bytes);
713 if (!event_data) { 732 if (!event_data) {
714 pr_err("could not allocate event data\n"); 733 pr_err("could not allocate event data\n");
734 ret = -ENOMEM;
715 goto e_free; 735 goto e_free;
716 } 736 }
717 737
@@ -731,6 +751,7 @@ static void create_events_from_catalog(struct attribute ***events_,
731 if (hret) { 751 if (hret) {
732 pr_err("failed to get event data in page %zu\n", 752 pr_err("failed to get event data in page %zu\n",
733 i + event_data_offs); 753 i + event_data_offs);
754 ret = -EIO;
734 goto e_event_data; 755 goto e_event_data;
735 } 756 }
736 } 757 }
@@ -778,18 +799,24 @@ static void create_events_from_catalog(struct attribute ***events_,
778 event_idx_last, event_entry_count, junk_events); 799 event_idx_last, event_entry_count, junk_events);
779 800
780 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL); 801 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
781 if (!events) 802 if (!events) {
803 ret = -ENOMEM;
782 goto e_event_data; 804 goto e_event_data;
805 }
783 806
784 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs), 807 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
785 GFP_KERNEL); 808 GFP_KERNEL);
786 if (!event_descs) 809 if (!event_descs) {
810 ret = -ENOMEM;
787 goto e_event_attrs; 811 goto e_event_attrs;
812 }
788 813
789 event_long_descs = kmalloc_array(event_idx + 1, 814 event_long_descs = kmalloc_array(event_idx + 1,
790 sizeof(*event_long_descs), GFP_KERNEL); 815 sizeof(*event_long_descs), GFP_KERNEL);
791 if (!event_long_descs) 816 if (!event_long_descs) {
817 ret = -ENOMEM;
792 goto e_event_descs; 818 goto e_event_descs;
819 }
793 820
794 /* Iterate over the catalog filling in the attribute vector */ 821 /* Iterate over the catalog filling in the attribute vector */
795 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0, 822 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
@@ -843,7 +870,7 @@ static void create_events_from_catalog(struct attribute ***events_,
843 *events_ = events; 870 *events_ = events;
844 *event_descs_ = event_descs; 871 *event_descs_ = event_descs;
845 *event_long_descs_ = event_long_descs; 872 *event_long_descs_ = event_long_descs;
846 return; 873 return 0;
847 874
848e_event_descs: 875e_event_descs:
849 kfree(event_descs); 876 kfree(event_descs);
@@ -857,6 +884,7 @@ e_out:
857 *events_ = NULL; 884 *events_ = NULL;
858 *event_descs_ = NULL; 885 *event_descs_ = NULL;
859 *event_long_descs_ = NULL; 886 *event_long_descs_ = NULL;
887 return ret;
860} 888}
861 889
862static ssize_t catalog_read(struct file *filp, struct kobject *kobj, 890static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@@ -872,6 +900,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
872 uint64_t catalog_version_num = 0; 900 uint64_t catalog_version_num = 0;
873 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); 901 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
874 struct hv_24x7_catalog_page_0 *page_0 = page; 902 struct hv_24x7_catalog_page_0 *page_0 = page;
903
875 if (!page) 904 if (!page)
876 return -ENOMEM; 905 return -ENOMEM;
877 906
@@ -976,31 +1005,104 @@ static const struct attribute_group *attr_groups[] = {
976 NULL, 1005 NULL,
977}; 1006};
978 1007
979DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096); 1008static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
980DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096); 1009 struct hv_24x7_data_result_buffer *result_buffer,
1010 unsigned long ret)
1011{
1012 struct hv_24x7_request *req;
1013
1014 req = &request_buffer->requests[0];
1015 pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
1016 "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
1017 req->performance_domain, req->data_offset,
1018 req->starting_ix, req->starting_lpar_ix, ret, ret,
1019 result_buffer->detailed_rc,
1020 result_buffer->failing_request_ix);
1021}
1022
1023/*
1024 * Start the process for a new H_GET_24x7_DATA hcall.
1025 */
1026static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1027 struct hv_24x7_data_result_buffer *result_buffer)
1028{
1029
1030 memset(request_buffer, 0, 4096);
1031 memset(result_buffer, 0, 4096);
1032
1033 request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
1034 /* memset above set request_buffer->num_requests to 0 */
1035}
981 1036
982static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, 1037/*
983 u16 lpar, u64 *res, 1038 * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
984 bool success_expected) 1039 * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
1040 */
1041static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1042 struct hv_24x7_data_result_buffer *result_buffer)
985{ 1043{
986 unsigned long ret; 1044 unsigned long ret;
987 1045
988 /* 1046 /*
989 * request_buffer and result_buffer are not required to be 4k aligned, 1047 * NOTE: Due to variable number of array elements in request and
990 * but are not allowed to cross any 4k boundary. Aligning them to 4k is 1048 * result buffer(s), sizeof() is not reliable. Use the actual
991 * the simplest way to ensure that. 1049 * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
992 */ 1050 */
993 struct reqb { 1051 ret = plpar_hcall_norets(H_GET_24X7_DATA,
994 struct hv_24x7_request_buffer buf; 1052 virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
995 struct hv_24x7_request req; 1053 virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
996 } __packed *request_buffer; 1054
997 1055 if (ret)
998 struct { 1056 log_24x7_hcall(request_buffer, result_buffer, ret);
999 struct hv_24x7_data_result_buffer buf; 1057
1000 struct hv_24x7_result res; 1058 return ret;
1001 struct hv_24x7_result_element elem; 1059}
1002 __be64 result; 1060
1003 } __packed *result_buffer; 1061/*
1062 * Add the given @event to the next slot in the 24x7 request_buffer.
1063 *
1064 * Note that H_GET_24X7_DATA hcall allows reading several counters'
1065 * values in a single HCALL. We expect the caller to add events to the
1066 * request buffer one by one, make the HCALL and process the results.
1067 */
1068static int add_event_to_24x7_request(struct perf_event *event,
1069 struct hv_24x7_request_buffer *request_buffer)
1070{
1071 u16 idx;
1072 int i;
1073 struct hv_24x7_request *req;
1074
1075 if (request_buffer->num_requests > 254) {
1076 pr_devel("Too many requests for 24x7 HCALL %d\n",
1077 request_buffer->num_requests);
1078 return -EINVAL;
1079 }
1080
1081 if (is_physical_domain(event_get_domain(event)))
1082 idx = event_get_core(event);
1083 else
1084 idx = event_get_vcpu(event);
1085
1086 i = request_buffer->num_requests++;
1087 req = &request_buffer->requests[i];
1088
1089 req->performance_domain = event_get_domain(event);
1090 req->data_size = cpu_to_be16(8);
1091 req->data_offset = cpu_to_be32(event_get_offset(event));
1092 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
1093 req->max_num_lpars = cpu_to_be16(1);
1094 req->starting_ix = cpu_to_be16(idx);
1095 req->max_ix = cpu_to_be16(1);
1096
1097 return 0;
1098}
1099
1100static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
1101{
1102 unsigned long ret;
1103 struct hv_24x7_request_buffer *request_buffer;
1104 struct hv_24x7_data_result_buffer *result_buffer;
1105 struct hv_24x7_result *resb;
1004 1106
1005 BUILD_BUG_ON(sizeof(*request_buffer) > 4096); 1107 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
1006 BUILD_BUG_ON(sizeof(*result_buffer) > 4096); 1108 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1008,63 +1110,28 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
1008 request_buffer = (void *)get_cpu_var(hv_24x7_reqb); 1110 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1009 result_buffer = (void *)get_cpu_var(hv_24x7_resb); 1111 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1010 1112
1011 memset(request_buffer, 0, 4096); 1113 init_24x7_request(request_buffer, result_buffer);
1012 memset(result_buffer, 0, 4096);
1013
1014 *request_buffer = (struct reqb) {
1015 .buf = {
1016 .interface_version = HV_24X7_IF_VERSION_CURRENT,
1017 .num_requests = 1,
1018 },
1019 .req = {
1020 .performance_domain = domain,
1021 .data_size = cpu_to_be16(8),
1022 .data_offset = cpu_to_be32(offset),
1023 .starting_lpar_ix = cpu_to_be16(lpar),
1024 .max_num_lpars = cpu_to_be16(1),
1025 .starting_ix = cpu_to_be16(ix),
1026 .max_ix = cpu_to_be16(1),
1027 }
1028 };
1029 1114
1030 ret = plpar_hcall_norets(H_GET_24X7_DATA, 1115 ret = add_event_to_24x7_request(event, request_buffer);
1031 virt_to_phys(request_buffer), sizeof(*request_buffer), 1116 if (ret)
1032 virt_to_phys(result_buffer), sizeof(*result_buffer)); 1117 goto out;
1033 1118
1119 ret = make_24x7_request(request_buffer, result_buffer);
1034 if (ret) { 1120 if (ret) {
1035 if (success_expected) 1121 log_24x7_hcall(request_buffer, result_buffer, ret);
1036 pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
1037 "0x%lx (%ld) detail=0x%x failing ix=%x\n",
1038 domain, offset, ix, lpar, ret, ret,
1039 result_buffer->buf.detailed_rc,
1040 result_buffer->buf.failing_request_ix);
1041 goto out; 1122 goto out;
1042 } 1123 }
1043 1124
1044 *res = be64_to_cpu(result_buffer->result); 1125 /* process result from hcall */
1126 resb = &result_buffer->results[0];
1127 *count = be64_to_cpu(resb->elements[0].element_data[0]);
1045 1128
1046out: 1129out:
1130 put_cpu_var(hv_24x7_reqb);
1131 put_cpu_var(hv_24x7_resb);
1047 return ret; 1132 return ret;
1048} 1133}
1049 1134
1050static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
1051 bool success_expected)
1052{
1053 u16 idx;
1054 unsigned domain = event_get_domain(event);
1055
1056 if (is_physical_domain(domain))
1057 idx = event_get_core(event);
1058 else
1059 idx = event_get_vcpu(event);
1060
1061 return single_24x7_request(event_get_domain(event),
1062 event_get_offset(event),
1063 idx,
1064 event_get_lpar(event),
1065 res,
1066 success_expected);
1067}
1068 1135
1069static int h_24x7_event_init(struct perf_event *event) 1136static int h_24x7_event_init(struct perf_event *event)
1070{ 1137{
@@ -1133,7 +1200,7 @@ static int h_24x7_event_init(struct perf_event *event)
1133 } 1200 }
1134 1201
1135 /* see if the event complains */ 1202 /* see if the event complains */
1136 if (event_24x7_request(event, &ct, false)) { 1203 if (single_24x7_request(event, &ct)) {
1137 pr_devel("test hcall failed\n"); 1204 pr_devel("test hcall failed\n");
1138 return -EIO; 1205 return -EIO;
1139 } 1206 }
@@ -1145,7 +1212,7 @@ static u64 h_24x7_get_value(struct perf_event *event)
1145{ 1212{
1146 unsigned long ret; 1213 unsigned long ret;
1147 u64 ct; 1214 u64 ct;
1148 ret = event_24x7_request(event, &ct, true); 1215 ret = single_24x7_request(event, &ct);
1149 if (ret) 1216 if (ret)
1150 /* We checked this in event init, shouldn't fail here... */ 1217 /* We checked this in event init, shouldn't fail here... */
1151 return 0; 1218 return 0;
@@ -1153,15 +1220,22 @@ static u64 h_24x7_get_value(struct perf_event *event)
1153 return ct; 1220 return ct;
1154} 1221}
1155 1222
1156static void h_24x7_event_update(struct perf_event *event) 1223static void update_event_count(struct perf_event *event, u64 now)
1157{ 1224{
1158 s64 prev; 1225 s64 prev;
1159 u64 now; 1226
1160 now = h_24x7_get_value(event);
1161 prev = local64_xchg(&event->hw.prev_count, now); 1227 prev = local64_xchg(&event->hw.prev_count, now);
1162 local64_add(now - prev, &event->count); 1228 local64_add(now - prev, &event->count);
1163} 1229}
1164 1230
1231static void h_24x7_event_read(struct perf_event *event)
1232{
1233 u64 now;
1234
1235 now = h_24x7_get_value(event);
1236 update_event_count(event, now);
1237}
1238
1165static void h_24x7_event_start(struct perf_event *event, int flags) 1239static void h_24x7_event_start(struct perf_event *event, int flags)
1166{ 1240{
1167 if (flags & PERF_EF_RELOAD) 1241 if (flags & PERF_EF_RELOAD)
@@ -1170,7 +1244,7 @@ static void h_24x7_event_start(struct perf_event *event, int flags)
1170 1244
1171static void h_24x7_event_stop(struct perf_event *event, int flags) 1245static void h_24x7_event_stop(struct perf_event *event, int flags)
1172{ 1246{
1173 h_24x7_event_update(event); 1247 h_24x7_event_read(event);
1174} 1248}
1175 1249
1176static int h_24x7_event_add(struct perf_event *event, int flags) 1250static int h_24x7_event_add(struct perf_event *event, int flags)
@@ -1191,7 +1265,7 @@ static struct pmu h_24x7_pmu = {
1191 .del = h_24x7_event_stop, 1265 .del = h_24x7_event_stop,
1192 .start = h_24x7_event_start, 1266 .start = h_24x7_event_start,
1193 .stop = h_24x7_event_stop, 1267 .stop = h_24x7_event_stop,
1194 .read = h_24x7_event_update, 1268 .read = h_24x7_event_read,
1195}; 1269};
1196 1270
1197static int hv_24x7_init(void) 1271static int hv_24x7_init(void)
@@ -1219,10 +1293,13 @@ static int hv_24x7_init(void)
1219 /* sampling not supported */ 1293 /* sampling not supported */
1220 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 1294 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1221 1295
1222 create_events_from_catalog(&event_group.attrs, 1296 r = create_events_from_catalog(&event_group.attrs,
1223 &event_desc_group.attrs, 1297 &event_desc_group.attrs,
1224 &event_long_desc_group.attrs); 1298 &event_long_desc_group.attrs);
1225 1299
1300 if (r)
1301 return r;
1302
1226 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); 1303 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1227 if (r) 1304 if (r)
1228 return r; 1305 return r;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 69cd4e690f58..0f9fa21a29f2 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -50,7 +50,7 @@ struct hv_24x7_request_buffer {
50 __u8 interface_version; 50 __u8 interface_version;
51 __u8 num_requests; 51 __u8 num_requests;
52 __u8 reserved[0xE]; 52 __u8 reserved[0xE];
53 struct hv_24x7_request requests[]; 53 struct hv_24x7_request requests[1];
54} __packed; 54} __packed;
55 55
56struct hv_24x7_result_element { 56struct hv_24x7_result_element {
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
66 __be32 lpar_cfg_instance_id; 66 __be32 lpar_cfg_instance_id;
67 67
68 /* size = @result_element_data_size of cointaining result. */ 68 /* size = @result_element_data_size of cointaining result. */
69 __u8 element_data[]; 69 __u64 element_data[1];
70} __packed; 70} __packed;
71 71
72struct hv_24x7_result { 72struct hv_24x7_result {
@@ -87,7 +87,7 @@ struct hv_24x7_result {
87 /* WARNING: only valid for first result element due to variable sizes 87 /* WARNING: only valid for first result element due to variable sizes
88 * of result elements */ 88 * of result elements */
89 /* struct hv_24x7_result_element[@num_elements_returned] */ 89 /* struct hv_24x7_result_element[@num_elements_returned] */
90 struct hv_24x7_result_element elements[]; 90 struct hv_24x7_result_element elements[1];
91} __packed; 91} __packed;
92 92
93struct hv_24x7_data_result_buffer { 93struct hv_24x7_data_result_buffer {
@@ -103,7 +103,7 @@ struct hv_24x7_data_result_buffer {
103 __u8 reserved2[0x8]; 103 __u8 reserved2[0x8];
104 /* WARNING: only valid for the first result due to variable sizes of 104 /* WARNING: only valid for the first result due to variable sizes of
105 * results */ 105 * results */
106 struct hv_24x7_result results[]; /* [@num_results] */ 106 struct hv_24x7_result results[1]; /* [@num_results] */
107} __packed; 107} __packed;
108 108
109#endif 109#endif