aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 15:57:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 15:57:42 -0400
commita20acf99f75e49271381d65db097c9763060a1e8 (patch)
tree3cf661125e86b7625171b96b885bf5395f62e684 /arch/sparc/kernel/perf_event.c
parent437589a74b6a590d175f86cf9f7b2efcee7765e7 (diff)
parent42a4172b6ebb4a419085c6caee7c135e51cae5ea (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller: "Largely this is simply adding support for the Niagara 4 cpu. Major areas are perf events (chip now supports 4 counters and can monitor any event on each counter), crypto (opcodes are availble for sha1, sha256, sha512, md5, crc32c, AES, DES, CAMELLIA, and Kasumi although the last is unsupported since we lack a generic crypto layer Kasumi implementation), and an optimized memcpy. Finally some cleanups by Peter Senna Tschudin." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (47 commits) sparc64: Fix trailing whitespace in NG4 memcpy. sparc64: Fix comment type in NG4 copy from user. sparc64: Add SPARC-T4 optimized memcpy. drivers/sbus/char: removes unnecessary semicolon arch/sparc/kernel/pci_sun4v.c: removes unnecessary semicolon sparc64: Fix function argument comment in camellia_sparc64_key_expand asm. sparc64: Fix IV handling bug in des_sparc64_cbc_decrypt sparc64: Add auto-loading mechanism to crypto-opcode drivers. sparc64: Add missing pr_fmt define to crypto opcode drivers. sparc64: Adjust crypto priorities. sparc64: Use cpu_pgsz_mask for linear kernel mapping config. sparc64: Probe cpu page size support more portably. sparc64: Support 2GB and 16GB page sizes for kernel linear mappings. sparc64: Fix bugs in unrolled 256-bit loops. sparc64: Avoid code duplication in crypto assembler. sparc64: Unroll CTR crypt loops in AES driver. sparc64: Unroll ECB decryption loops in AES driver. sparc64: Unroll ECB encryption loops in AES driver. sparc64: Add ctr mode support to AES driver. sparc64: Move AES driver over to a methods based implementation. ...
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c516
1 files changed, 414 insertions, 102 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 5713957dcb8a..e48651dace1b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -25,36 +25,48 @@
25#include <linux/atomic.h> 25#include <linux/atomic.h>
26#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/pcr.h> 27#include <asm/pcr.h>
28#include <asm/perfctr.h>
29#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
30 29
31#include "kernel.h" 30#include "kernel.h"
32#include "kstack.h" 31#include "kstack.h"
33 32
34/* Sparc64 chips have two performance counters, 32-bits each, with 33/* Two classes of sparc64 chips currently exist. All of which have
35 * overflow interrupts generated on transition from 0xffffffff to 0. 34 * 32-bit counters which can generate overflow interrupts on the
36 * The counters are accessed in one go using a 64-bit register. 35 * transition from 0xffffffff to 0.
37 * 36 *
38 * Both counters are controlled using a single control register. The 37 * All chips upto and including SPARC-T3 have two performance
39 * only way to stop all sampling is to clear all of the context (user, 38 * counters. The two 32-bit counters are accessed in one go using a
40 * supervisor, hypervisor) sampling enable bits. But these bits apply 39 * single 64-bit register.
41 * to both counters, thus the two counters can't be enabled/disabled
42 * individually.
43 * 40 *
44 * The control register has two event fields, one for each of the two 41 * On these older chips both counters are controlled using a single
45 * counters. It's thus nearly impossible to have one counter going 42 * control register. The only way to stop all sampling is to clear
46 * while keeping the other one stopped. Therefore it is possible to 43 * all of the context (user, supervisor, hypervisor) sampling enable
47 * get overflow interrupts for counters not currently "in use" and 44 * bits. But these bits apply to both counters, thus the two counters
48 * that condition must be checked in the overflow interrupt handler. 45 * can't be enabled/disabled individually.
46 *
47 * Furthermore, the control register on these older chips have two
48 * event fields, one for each of the two counters. It's thus nearly
49 * impossible to have one counter going while keeping the other one
50 * stopped. Therefore it is possible to get overflow interrupts for
51 * counters not currently "in use" and that condition must be checked
52 * in the overflow interrupt handler.
49 * 53 *
50 * So we use a hack, in that we program inactive counters with the 54 * So we use a hack, in that we program inactive counters with the
51 * "sw_count0" and "sw_count1" events. These count how many times 55 * "sw_count0" and "sw_count1" events. These count how many times
52 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an 56 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
53 * unusual way to encode a NOP and therefore will not trigger in 57 * unusual way to encode a NOP and therefore will not trigger in
54 * normal code. 58 * normal code.
59 *
60 * Starting with SPARC-T4 we have one control register per counter.
61 * And the counters are stored in individual registers. The registers
62 * for the counters are 64-bit but only a 32-bit counter is
63 * implemented. The event selections on SPARC-T4 lack any
64 * restrictions, therefore we can elide all of the complicated
65 * conflict resolution code we have for SPARC-T3 and earlier chips.
55 */ 66 */
56 67
57#define MAX_HWEVENTS 2 68#define MAX_HWEVENTS 4
69#define MAX_PCRS 4
58#define MAX_PERIOD ((1UL << 32) - 1) 70#define MAX_PERIOD ((1UL << 32) - 1)
59 71
60#define PIC_UPPER_INDEX 0 72#define PIC_UPPER_INDEX 0
@@ -90,8 +102,8 @@ struct cpu_hw_events {
90 */ 102 */
91 int current_idx[MAX_HWEVENTS]; 103 int current_idx[MAX_HWEVENTS];
92 104
93 /* Software copy of %pcr register on this cpu. */ 105 /* Software copy of %pcr register(s) on this cpu. */
94 u64 pcr; 106 u64 pcr[MAX_HWEVENTS];
95 107
96 /* Enabled/disable state. */ 108 /* Enabled/disable state. */
97 int enabled; 109 int enabled;
@@ -103,6 +115,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
103/* An event map describes the characteristics of a performance 115/* An event map describes the characteristics of a performance
104 * counter event. In particular it gives the encoding as well as 116 * counter event. In particular it gives the encoding as well as
105 * a mask telling which counters the event can be measured on. 117 * a mask telling which counters the event can be measured on.
118 *
119 * The mask is unused on SPARC-T4 and later.
106 */ 120 */
107struct perf_event_map { 121struct perf_event_map {
108 u16 encoding; 122 u16 encoding;
@@ -142,15 +156,53 @@ struct sparc_pmu {
142 const struct perf_event_map *(*event_map)(int); 156 const struct perf_event_map *(*event_map)(int);
143 const cache_map_t *cache_map; 157 const cache_map_t *cache_map;
144 int max_events; 158 int max_events;
159 u32 (*read_pmc)(int);
160 void (*write_pmc)(int, u64);
145 int upper_shift; 161 int upper_shift;
146 int lower_shift; 162 int lower_shift;
147 int event_mask; 163 int event_mask;
164 int user_bit;
165 int priv_bit;
148 int hv_bit; 166 int hv_bit;
149 int irq_bit; 167 int irq_bit;
150 int upper_nop; 168 int upper_nop;
151 int lower_nop; 169 int lower_nop;
170 unsigned int flags;
171#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
172#define SPARC_PMU_HAS_CONFLICTS 0x00000002
173 int max_hw_events;
174 int num_pcrs;
175 int num_pic_regs;
152}; 176};
153 177
178static u32 sparc_default_read_pmc(int idx)
179{
180 u64 val;
181
182 val = pcr_ops->read_pic(0);
183 if (idx == PIC_UPPER_INDEX)
184 val >>= 32;
185
186 return val & 0xffffffff;
187}
188
189static void sparc_default_write_pmc(int idx, u64 val)
190{
191 u64 shift, mask, pic;
192
193 shift = 0;
194 if (idx == PIC_UPPER_INDEX)
195 shift = 32;
196
197 mask = ((u64) 0xffffffff) << shift;
198 val <<= shift;
199
200 pic = pcr_ops->read_pic(0);
201 pic &= ~mask;
202 pic |= val;
203 pcr_ops->write_pic(0, pic);
204}
205
154static const struct perf_event_map ultra3_perfmon_event_map[] = { 206static const struct perf_event_map ultra3_perfmon_event_map[] = {
155 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, 207 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
156 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, 208 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
@@ -268,11 +320,20 @@ static const struct sparc_pmu ultra3_pmu = {
268 .event_map = ultra3_event_map, 320 .event_map = ultra3_event_map,
269 .cache_map = &ultra3_cache_map, 321 .cache_map = &ultra3_cache_map,
270 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), 322 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
323 .read_pmc = sparc_default_read_pmc,
324 .write_pmc = sparc_default_write_pmc,
271 .upper_shift = 11, 325 .upper_shift = 11,
272 .lower_shift = 4, 326 .lower_shift = 4,
273 .event_mask = 0x3f, 327 .event_mask = 0x3f,
328 .user_bit = PCR_UTRACE,
329 .priv_bit = PCR_STRACE,
274 .upper_nop = 0x1c, 330 .upper_nop = 0x1c,
275 .lower_nop = 0x14, 331 .lower_nop = 0x14,
332 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
333 SPARC_PMU_HAS_CONFLICTS),
334 .max_hw_events = 2,
335 .num_pcrs = 1,
336 .num_pic_regs = 1,
276}; 337};
277 338
278/* Niagara1 is very limited. The upper PIC is hard-locked to count 339/* Niagara1 is very limited. The upper PIC is hard-locked to count
@@ -397,11 +458,20 @@ static const struct sparc_pmu niagara1_pmu = {
397 .event_map = niagara1_event_map, 458 .event_map = niagara1_event_map,
398 .cache_map = &niagara1_cache_map, 459 .cache_map = &niagara1_cache_map,
399 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), 460 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
461 .read_pmc = sparc_default_read_pmc,
462 .write_pmc = sparc_default_write_pmc,
400 .upper_shift = 0, 463 .upper_shift = 0,
401 .lower_shift = 4, 464 .lower_shift = 4,
402 .event_mask = 0x7, 465 .event_mask = 0x7,
466 .user_bit = PCR_UTRACE,
467 .priv_bit = PCR_STRACE,
403 .upper_nop = 0x0, 468 .upper_nop = 0x0,
404 .lower_nop = 0x0, 469 .lower_nop = 0x0,
470 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
471 SPARC_PMU_HAS_CONFLICTS),
472 .max_hw_events = 2,
473 .num_pcrs = 1,
474 .num_pic_regs = 1,
405}; 475};
406 476
407static const struct perf_event_map niagara2_perfmon_event_map[] = { 477static const struct perf_event_map niagara2_perfmon_event_map[] = {
@@ -523,13 +593,203 @@ static const struct sparc_pmu niagara2_pmu = {
523 .event_map = niagara2_event_map, 593 .event_map = niagara2_event_map,
524 .cache_map = &niagara2_cache_map, 594 .cache_map = &niagara2_cache_map,
525 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), 595 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
596 .read_pmc = sparc_default_read_pmc,
597 .write_pmc = sparc_default_write_pmc,
526 .upper_shift = 19, 598 .upper_shift = 19,
527 .lower_shift = 6, 599 .lower_shift = 6,
528 .event_mask = 0xfff, 600 .event_mask = 0xfff,
529 .hv_bit = 0x8, 601 .user_bit = PCR_UTRACE,
602 .priv_bit = PCR_STRACE,
603 .hv_bit = PCR_N2_HTRACE,
530 .irq_bit = 0x30, 604 .irq_bit = 0x30,
531 .upper_nop = 0x220, 605 .upper_nop = 0x220,
532 .lower_nop = 0x220, 606 .lower_nop = 0x220,
607 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
608 SPARC_PMU_HAS_CONFLICTS),
609 .max_hw_events = 2,
610 .num_pcrs = 1,
611 .num_pic_regs = 1,
612};
613
614static const struct perf_event_map niagara4_perfmon_event_map[] = {
615 [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
616 [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
617 [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
618 [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
619 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
620 [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
621};
622
623static const struct perf_event_map *niagara4_event_map(int event_id)
624{
625 return &niagara4_perfmon_event_map[event_id];
626}
627
628static const cache_map_t niagara4_cache_map = {
629[C(L1D)] = {
630 [C(OP_READ)] = {
631 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
632 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
633 },
634 [C(OP_WRITE)] = {
635 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
636 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
637 },
638 [C(OP_PREFETCH)] = {
639 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
640 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
641 },
642},
643[C(L1I)] = {
644 [C(OP_READ)] = {
645 [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
646 [C(RESULT_MISS)] = { (11 << 6) | 0x03 },
647 },
648 [ C(OP_WRITE) ] = {
649 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
650 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
651 },
652 [ C(OP_PREFETCH) ] = {
653 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
654 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
655 },
656},
657[C(LL)] = {
658 [C(OP_READ)] = {
659 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
660 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
661 },
662 [C(OP_WRITE)] = {
663 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
664 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
665 },
666 [C(OP_PREFETCH)] = {
667 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
668 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
669 },
670},
671[C(DTLB)] = {
672 [C(OP_READ)] = {
673 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
674 [C(RESULT_MISS)] = { (17 << 6) | 0x3f },
675 },
676 [ C(OP_WRITE) ] = {
677 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
678 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
679 },
680 [ C(OP_PREFETCH) ] = {
681 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
682 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
683 },
684},
685[C(ITLB)] = {
686 [C(OP_READ)] = {
687 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
688 [C(RESULT_MISS)] = { (6 << 6) | 0x3f },
689 },
690 [ C(OP_WRITE) ] = {
691 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
692 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
693 },
694 [ C(OP_PREFETCH) ] = {
695 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
696 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
697 },
698},
699[C(BPU)] = {
700 [C(OP_READ)] = {
701 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
702 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
703 },
704 [ C(OP_WRITE) ] = {
705 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
706 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
707 },
708 [ C(OP_PREFETCH) ] = {
709 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
710 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
711 },
712},
713[C(NODE)] = {
714 [C(OP_READ)] = {
715 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
716 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
717 },
718 [ C(OP_WRITE) ] = {
719 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
720 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
721 },
722 [ C(OP_PREFETCH) ] = {
723 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
724 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
725 },
726},
727};
728
729static u32 sparc_vt_read_pmc(int idx)
730{
731 u64 val = pcr_ops->read_pic(idx);
732
733 return val & 0xffffffff;
734}
735
736static void sparc_vt_write_pmc(int idx, u64 val)
737{
738 u64 pcr;
739
740 /* There seems to be an internal latch on the overflow event
741 * on SPARC-T4 that prevents it from triggering unless you
742 * update the PIC exactly as we do here. The requirement
743 * seems to be that you have to turn off event counting in the
744 * PCR around the PIC update.
745 *
746 * For example, after the following sequence:
747 *
748 * 1) set PIC to -1
749 * 2) enable event counting and overflow reporting in PCR
750 * 3) overflow triggers, softint 15 handler invoked
751 * 4) clear OV bit in PCR
752 * 5) write PIC to -1
753 *
754 * a subsequent overflow event will not trigger. This
755 * sequence works on SPARC-T3 and previous chips.
756 */
757 pcr = pcr_ops->read_pcr(idx);
758 pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
759
760 pcr_ops->write_pic(idx, val & 0xffffffff);
761
762 pcr_ops->write_pcr(idx, pcr);
763}
764
765static const struct sparc_pmu niagara4_pmu = {
766 .event_map = niagara4_event_map,
767 .cache_map = &niagara4_cache_map,
768 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
769 .read_pmc = sparc_vt_read_pmc,
770 .write_pmc = sparc_vt_write_pmc,
771 .upper_shift = 5,
772 .lower_shift = 5,
773 .event_mask = 0x7ff,
774 .user_bit = PCR_N4_UTRACE,
775 .priv_bit = PCR_N4_STRACE,
776
777 /* We explicitly don't support hypervisor tracing. The T4
778 * generates the overflow event for precise events via a trap
779 * which will not be generated (ie. it's completely lost) if
780 * we happen to be in the hypervisor when the event triggers.
781 * Essentially, the overflow event reporting is completely
782 * unusable when you have hypervisor mode tracing enabled.
783 */
784 .hv_bit = 0,
785
786 .irq_bit = PCR_N4_TOE,
787 .upper_nop = 0,
788 .lower_nop = 0,
789 .flags = 0,
790 .max_hw_events = 4,
791 .num_pcrs = 4,
792 .num_pic_regs = 4,
533}; 793};
534 794
535static const struct sparc_pmu *sparc_pmu __read_mostly; 795static const struct sparc_pmu *sparc_pmu __read_mostly;
@@ -558,55 +818,35 @@ static u64 nop_for_index(int idx)
558static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) 818static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
559{ 819{
560 u64 val, mask = mask_for_index(idx); 820 u64 val, mask = mask_for_index(idx);
821 int pcr_index = 0;
561 822
562 val = cpuc->pcr; 823 if (sparc_pmu->num_pcrs > 1)
824 pcr_index = idx;
825
826 val = cpuc->pcr[pcr_index];
563 val &= ~mask; 827 val &= ~mask;
564 val |= hwc->config; 828 val |= hwc->config;
565 cpuc->pcr = val; 829 cpuc->pcr[pcr_index] = val;
566 830
567 pcr_ops->write(cpuc->pcr); 831 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
568} 832}
569 833
570static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) 834static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
571{ 835{
572 u64 mask = mask_for_index(idx); 836 u64 mask = mask_for_index(idx);
573 u64 nop = nop_for_index(idx); 837 u64 nop = nop_for_index(idx);
838 int pcr_index = 0;
574 u64 val; 839 u64 val;
575 840
576 val = cpuc->pcr; 841 if (sparc_pmu->num_pcrs > 1)
842 pcr_index = idx;
843
844 val = cpuc->pcr[pcr_index];
577 val &= ~mask; 845 val &= ~mask;
578 val |= nop; 846 val |= nop;
579 cpuc->pcr = val; 847 cpuc->pcr[pcr_index] = val;
580 848
581 pcr_ops->write(cpuc->pcr); 849 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
582}
583
584static u32 read_pmc(int idx)
585{
586 u64 val;
587
588 read_pic(val);
589 if (idx == PIC_UPPER_INDEX)
590 val >>= 32;
591
592 return val & 0xffffffff;
593}
594
595static void write_pmc(int idx, u64 val)
596{
597 u64 shift, mask, pic;
598
599 shift = 0;
600 if (idx == PIC_UPPER_INDEX)
601 shift = 32;
602
603 mask = ((u64) 0xffffffff) << shift;
604 val <<= shift;
605
606 read_pic(pic);
607 pic &= ~mask;
608 pic |= val;
609 write_pic(pic);
610} 850}
611 851
612static u64 sparc_perf_event_update(struct perf_event *event, 852static u64 sparc_perf_event_update(struct perf_event *event,
@@ -618,7 +858,7 @@ static u64 sparc_perf_event_update(struct perf_event *event,
618 858
619again: 859again:
620 prev_raw_count = local64_read(&hwc->prev_count); 860 prev_raw_count = local64_read(&hwc->prev_count);
621 new_raw_count = read_pmc(idx); 861 new_raw_count = sparc_pmu->read_pmc(idx);
622 862
623 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 863 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
624 new_raw_count) != prev_raw_count) 864 new_raw_count) != prev_raw_count)
@@ -658,25 +898,17 @@ static int sparc_perf_event_set_period(struct perf_event *event,
658 898
659 local64_set(&hwc->prev_count, (u64)-left); 899 local64_set(&hwc->prev_count, (u64)-left);
660 900
661 write_pmc(idx, (u64)(-left) & 0xffffffff); 901 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
662 902
663 perf_event_update_userpage(event); 903 perf_event_update_userpage(event);
664 904
665 return ret; 905 return ret;
666} 906}
667 907
668/* If performance event entries have been added, move existing 908static void read_in_all_counters(struct cpu_hw_events *cpuc)
669 * events around (if necessary) and then assign new entries to
670 * counters.
671 */
672static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
673{ 909{
674 int i; 910 int i;
675 911
676 if (!cpuc->n_added)
677 goto out;
678
679 /* Read in the counters which are moving. */
680 for (i = 0; i < cpuc->n_events; i++) { 912 for (i = 0; i < cpuc->n_events; i++) {
681 struct perf_event *cp = cpuc->event[i]; 913 struct perf_event *cp = cpuc->event[i];
682 914
@@ -687,6 +919,20 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
687 cpuc->current_idx[i] = PIC_NO_INDEX; 919 cpuc->current_idx[i] = PIC_NO_INDEX;
688 } 920 }
689 } 921 }
922}
923
924/* On this PMU all PICs are programmed using a single PCR. Calculate
925 * the combined control register value.
926 *
927 * For such chips we require that all of the events have the same
928 * configuration, so just fetch the settings from the first entry.
929 */
930static void calculate_single_pcr(struct cpu_hw_events *cpuc)
931{
932 int i;
933
934 if (!cpuc->n_added)
935 goto out;
690 936
691 /* Assign to counters all unassigned events. */ 937 /* Assign to counters all unassigned events. */
692 for (i = 0; i < cpuc->n_events; i++) { 938 for (i = 0; i < cpuc->n_events; i++) {
@@ -702,20 +948,71 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
702 cpuc->current_idx[i] = idx; 948 cpuc->current_idx[i] = idx;
703 949
704 enc = perf_event_get_enc(cpuc->events[i]); 950 enc = perf_event_get_enc(cpuc->events[i]);
705 pcr &= ~mask_for_index(idx); 951 cpuc->pcr[0] &= ~mask_for_index(idx);
706 if (hwc->state & PERF_HES_STOPPED) 952 if (hwc->state & PERF_HES_STOPPED)
707 pcr |= nop_for_index(idx); 953 cpuc->pcr[0] |= nop_for_index(idx);
708 else 954 else
709 pcr |= event_encoding(enc, idx); 955 cpuc->pcr[0] |= event_encoding(enc, idx);
710 } 956 }
711out: 957out:
712 return pcr; 958 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
959}
960
961/* On this PMU each PIC has it's own PCR control register. */
962static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
963{
964 int i;
965
966 if (!cpuc->n_added)
967 goto out;
968
969 for (i = 0; i < cpuc->n_events; i++) {
970 struct perf_event *cp = cpuc->event[i];
971 struct hw_perf_event *hwc = &cp->hw;
972 int idx = hwc->idx;
973 u64 enc;
974
975 if (cpuc->current_idx[i] != PIC_NO_INDEX)
976 continue;
977
978 sparc_perf_event_set_period(cp, hwc, idx);
979 cpuc->current_idx[i] = idx;
980
981 enc = perf_event_get_enc(cpuc->events[i]);
982 cpuc->pcr[idx] &= ~mask_for_index(idx);
983 if (hwc->state & PERF_HES_STOPPED)
984 cpuc->pcr[idx] |= nop_for_index(idx);
985 else
986 cpuc->pcr[idx] |= event_encoding(enc, idx);
987 }
988out:
989 for (i = 0; i < cpuc->n_events; i++) {
990 struct perf_event *cp = cpuc->event[i];
991 int idx = cp->hw.idx;
992
993 cpuc->pcr[idx] |= cp->hw.config_base;
994 }
995}
996
997/* If performance event entries have been added, move existing events
998 * around (if necessary) and then assign new entries to counters.
999 */
1000static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
1001{
1002 if (cpuc->n_added)
1003 read_in_all_counters(cpuc);
1004
1005 if (sparc_pmu->num_pcrs == 1) {
1006 calculate_single_pcr(cpuc);
1007 } else {
1008 calculate_multiple_pcrs(cpuc);
1009 }
713} 1010}
714 1011
715static void sparc_pmu_enable(struct pmu *pmu) 1012static void sparc_pmu_enable(struct pmu *pmu)
716{ 1013{
717 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1014 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
718 u64 pcr; 1015 int i;
719 1016
720 if (cpuc->enabled) 1017 if (cpuc->enabled)
721 return; 1018 return;
@@ -723,26 +1020,17 @@ static void sparc_pmu_enable(struct pmu *pmu)
723 cpuc->enabled = 1; 1020 cpuc->enabled = 1;
724 barrier(); 1021 barrier();
725 1022
726 pcr = cpuc->pcr; 1023 if (cpuc->n_events)
727 if (!cpuc->n_events) { 1024 update_pcrs_for_enable(cpuc);
728 pcr = 0;
729 } else {
730 pcr = maybe_change_configuration(cpuc, pcr);
731
732 /* We require that all of the events have the same
733 * configuration, so just fetch the settings from the
734 * first entry.
735 */
736 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
737 }
738 1025
739 pcr_ops->write(cpuc->pcr); 1026 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1027 pcr_ops->write_pcr(i, cpuc->pcr[i]);
740} 1028}
741 1029
742static void sparc_pmu_disable(struct pmu *pmu) 1030static void sparc_pmu_disable(struct pmu *pmu)
743{ 1031{
744 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
745 u64 val; 1033 int i;
746 1034
747 if (!cpuc->enabled) 1035 if (!cpuc->enabled)
748 return; 1036 return;
@@ -750,12 +1038,14 @@ static void sparc_pmu_disable(struct pmu *pmu)
750 cpuc->enabled = 0; 1038 cpuc->enabled = 0;
751 cpuc->n_added = 0; 1039 cpuc->n_added = 0;
752 1040
753 val = cpuc->pcr; 1041 for (i = 0; i < sparc_pmu->num_pcrs; i++) {
754 val &= ~(PCR_UTRACE | PCR_STRACE | 1042 u64 val = cpuc->pcr[i];
755 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
756 cpuc->pcr = val;
757 1043
758 pcr_ops->write(cpuc->pcr); 1044 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
1045 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
1046 cpuc->pcr[i] = val;
1047 pcr_ops->write_pcr(i, cpuc->pcr[i]);
1048 }
759} 1049}
760 1050
761static int active_event_index(struct cpu_hw_events *cpuc, 1051static int active_event_index(struct cpu_hw_events *cpuc,
@@ -854,9 +1144,11 @@ static DEFINE_MUTEX(pmc_grab_mutex);
854static void perf_stop_nmi_watchdog(void *unused) 1144static void perf_stop_nmi_watchdog(void *unused)
855{ 1145{
856 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1146 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1147 int i;
857 1148
858 stop_nmi_watchdog(NULL); 1149 stop_nmi_watchdog(NULL);
859 cpuc->pcr = pcr_ops->read(); 1150 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1151 cpuc->pcr[i] = pcr_ops->read_pcr(i);
860} 1152}
861 1153
862void perf_event_grab_pmc(void) 1154void perf_event_grab_pmc(void)
@@ -942,9 +1234,17 @@ static int sparc_check_constraints(struct perf_event **evts,
942 if (!n_ev) 1234 if (!n_ev)
943 return 0; 1235 return 0;
944 1236
945 if (n_ev > MAX_HWEVENTS) 1237 if (n_ev > sparc_pmu->max_hw_events)
946 return -1; 1238 return -1;
947 1239
1240 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
1241 int i;
1242
1243 for (i = 0; i < n_ev; i++)
1244 evts[i]->hw.idx = i;
1245 return 0;
1246 }
1247
948 msk0 = perf_event_get_msk(events[0]); 1248 msk0 = perf_event_get_msk(events[0]);
949 if (n_ev == 1) { 1249 if (n_ev == 1) {
950 if (msk0 & PIC_LOWER) 1250 if (msk0 & PIC_LOWER)
@@ -1000,6 +1300,9 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1000 struct perf_event *event; 1300 struct perf_event *event;
1001 int i, n, first; 1301 int i, n, first;
1002 1302
1303 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1304 return 0;
1305
1003 n = n_prev + n_new; 1306 n = n_prev + n_new;
1004 if (n <= 1) 1307 if (n <= 1)
1005 return 0; 1308 return 0;
@@ -1059,7 +1362,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1059 perf_pmu_disable(event->pmu); 1362 perf_pmu_disable(event->pmu);
1060 1363
1061 n0 = cpuc->n_events; 1364 n0 = cpuc->n_events;
1062 if (n0 >= MAX_HWEVENTS) 1365 if (n0 >= sparc_pmu->max_hw_events)
1063 goto out; 1366 goto out;
1064 1367
1065 cpuc->event[n0] = event; 1368 cpuc->event[n0] = event;
@@ -1146,16 +1449,16 @@ static int sparc_pmu_event_init(struct perf_event *event)
1146 /* We save the enable bits in the config_base. */ 1449 /* We save the enable bits in the config_base. */
1147 hwc->config_base = sparc_pmu->irq_bit; 1450 hwc->config_base = sparc_pmu->irq_bit;
1148 if (!attr->exclude_user) 1451 if (!attr->exclude_user)
1149 hwc->config_base |= PCR_UTRACE; 1452 hwc->config_base |= sparc_pmu->user_bit;
1150 if (!attr->exclude_kernel) 1453 if (!attr->exclude_kernel)
1151 hwc->config_base |= PCR_STRACE; 1454 hwc->config_base |= sparc_pmu->priv_bit;
1152 if (!attr->exclude_hv) 1455 if (!attr->exclude_hv)
1153 hwc->config_base |= sparc_pmu->hv_bit; 1456 hwc->config_base |= sparc_pmu->hv_bit;
1154 1457
1155 n = 0; 1458 n = 0;
1156 if (event->group_leader != event) { 1459 if (event->group_leader != event) {
1157 n = collect_events(event->group_leader, 1460 n = collect_events(event->group_leader,
1158 MAX_HWEVENTS - 1, 1461 sparc_pmu->max_hw_events - 1,
1159 evts, events, current_idx_dmy); 1462 evts, events, current_idx_dmy);
1160 if (n < 0) 1463 if (n < 0)
1161 return -EINVAL; 1464 return -EINVAL;
@@ -1254,8 +1557,7 @@ static struct pmu pmu = {
1254void perf_event_print_debug(void) 1557void perf_event_print_debug(void)
1255{ 1558{
1256 unsigned long flags; 1559 unsigned long flags;
1257 u64 pcr, pic; 1560 int cpu, i;
1258 int cpu;
1259 1561
1260 if (!sparc_pmu) 1562 if (!sparc_pmu)
1261 return; 1563 return;
@@ -1264,12 +1566,13 @@ void perf_event_print_debug(void)
1264 1566
1265 cpu = smp_processor_id(); 1567 cpu = smp_processor_id();
1266 1568
1267 pcr = pcr_ops->read();
1268 read_pic(pic);
1269
1270 pr_info("\n"); 1569 pr_info("\n");
1271 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", 1570 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1272 cpu, pcr, pic); 1571 pr_info("CPU#%d: PCR%d[%016llx]\n",
1572 cpu, i, pcr_ops->read_pcr(i));
1573 for (i = 0; i < sparc_pmu->num_pic_regs; i++)
1574 pr_info("CPU#%d: PIC%d[%016llx]\n",
1575 cpu, i, pcr_ops->read_pic(i));
1273 1576
1274 local_irq_restore(flags); 1577 local_irq_restore(flags);
1275} 1578}
@@ -1305,8 +1608,9 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1305 * Do this before we peek at the counters to determine 1608 * Do this before we peek at the counters to determine
1306 * overflow so we don't lose any events. 1609 * overflow so we don't lose any events.
1307 */ 1610 */
1308 if (sparc_pmu->irq_bit) 1611 if (sparc_pmu->irq_bit &&
1309 pcr_ops->write(cpuc->pcr); 1612 sparc_pmu->num_pcrs == 1)
1613 pcr_ops->write_pcr(0, cpuc->pcr[0]);
1310 1614
1311 for (i = 0; i < cpuc->n_events; i++) { 1615 for (i = 0; i < cpuc->n_events; i++) {
1312 struct perf_event *event = cpuc->event[i]; 1616 struct perf_event *event = cpuc->event[i];
@@ -1314,6 +1618,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1314 struct hw_perf_event *hwc; 1618 struct hw_perf_event *hwc;
1315 u64 val; 1619 u64 val;
1316 1620
1621 if (sparc_pmu->irq_bit &&
1622 sparc_pmu->num_pcrs > 1)
1623 pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
1624
1317 hwc = &event->hw; 1625 hwc = &event->hw;
1318 val = sparc_perf_event_update(event, hwc, idx); 1626 val = sparc_perf_event_update(event, hwc, idx);
1319 if (val & (1ULL << 31)) 1627 if (val & (1ULL << 31))
@@ -1352,6 +1660,10 @@ static bool __init supported_pmu(void)
1352 sparc_pmu = &niagara2_pmu; 1660 sparc_pmu = &niagara2_pmu;
1353 return true; 1661 return true;
1354 } 1662 }
1663 if (!strcmp(sparc_pmu_type, "niagara4")) {
1664 sparc_pmu = &niagara4_pmu;
1665 return true;
1666 }
1355 return false; 1667 return false;
1356} 1668}
1357 1669
"hl opt">) { case HPI_OBJ_OSTREAM: hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM; u32TOINDEXES(h_stream, &adapter, &hm.u.d.u.stream.stream_index); break; case HPI_OBJ_ISTREAM: hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM; u32TOINDEXES(h_stream, &adapter, &hm.u.d.u.stream.stream_index); break; default: return HPI_ERROR_INVALID_STREAM; } if (adapter != hm.adapter_index) return HPI_ERROR_NO_INTERADAPTER_GROUPS; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys, u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_GETMAP); u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (poutstream_map) *poutstream_map = hr.u.d.u.group_info.outstream_group_map; if (pinstream_map) *pinstream_map = hr.u.d.u.group_info.instream_group_map; return hr.error; } u16 hpi_outstream_group_reset(const struct hpi_hsubsys *ph_subsys, u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_RESET); u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u16 instream_index, u32 *ph_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN); hm.adapter_index = adapter_index; hm.obj_index = instream_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_instream = hpi_indexes_to_handle(HPI_OBJ_ISTREAM, adapter_index, instream_index); else *ph_instream = 0; return hr.error; } u16 hpi_instream_close(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_RESET); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys, u32 h_instream, const struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_QUERY_FORMAT); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys, u32 h_instream, const struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_SET_FORMAT); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u8 *pb_data, u32 bytes_to_read) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_READ); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hm.u.d.u.data.data_size = bytes_to_read; hm.u.d.u.data.pb_data = pb_data; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_START); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_wait_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_WAIT_START); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_STOP); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded, u32 *psamples_recorded, u32 *pauxiliary_data_recorded) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GET_INFO); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (pw_state) *pw_state = hr.u.d.u.stream_info.state; if (pbuffer_size) *pbuffer_size = hr.u.d.u.stream_info.buffer_size; if (pdata_recorded) *pdata_recorded = hr.u.d.u.stream_info.data_available; if (psamples_recorded) *psamples_recorded = hr.u.d.u.stream_info.samples_transferred; if (pauxiliary_data_recorded) *pauxiliary_data_recorded = hr.u.d.u.stream_info.auxiliary_data_available; return hr.error; } u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment, u16 idle_bit) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_RESET); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hm.u.d.u.data.format.attributes = bytes_per_frame; hm.u.d.u.data.format.format = (mode << 8) | (alignment & 0xff); hm.u.d.u.data.format.channels = idle_bit; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u32 *pframe_space) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_GET_INFO); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (pframe_space) *pframe_space = (hr.u.d.u.stream_info.buffer_size - hr.u.d.u.stream_info.data_available) / sizeof(struct hpi_anc_frame); return hr.error; } u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys, u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer, u32 anc_frame_buffer_size_in_bytes, u32 number_of_ancillary_frames_to_write) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_WRITE); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer; hm.u.d.u.data.data_size = number_of_ancillary_frames_to_write * sizeof(struct hpi_anc_frame); if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes) hpi_send_recv(&hm, &hr); else hr.error = HPI_ERROR_INVALID_DATA_TRANSFER; return hr.error; } u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u32 size_in_bytes) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_ALLOC); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hm.u.d.u.data.data_size = size_in_bytes; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u8 **pp_buffer, struct hpi_hostbuffer_status **pp_status) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_GET_INFO); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (hr.error == 0) { if (pp_buffer) *pp_buffer = hr.u.d.u.hostbuffer_info.p_buffer; if (pp_status) *pp_status = hr.u.d.u.hostbuffer_info.p_status; } return hr.error; } u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u32 h_stream) { struct hpi_message hm; struct hpi_response hr; u16 adapter; char c_obj_type; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_ADD); hr.error = 0; u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); c_obj_type = hpi_handle_object(h_stream); switch (c_obj_type) { case HPI_OBJ_OSTREAM: hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM; u32TOINDEXES(h_stream, &adapter, &hm.u.d.u.stream.stream_index); break; case HPI_OBJ_ISTREAM: hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM; u32TOINDEXES(h_stream, &adapter, &hm.u.d.u.stream.stream_index); break; default: return HPI_ERROR_INVALID_STREAM; } if (adapter != hm.adapter_index) return HPI_ERROR_NO_INTERADAPTER_GROUPS; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys, u32 h_instream, u32 *poutstream_map, u32 *pinstream_map) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (poutstream_map) *poutstream_map = hr.u.d.u.group_info.outstream_group_map; if (pinstream_map) *pinstream_map = hr.u.d.u.group_info.instream_group_map; return hr.error; } u16 hpi_instream_group_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_RESET); u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u32 *ph_mixer) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_mixer = hpi_indexes_to_handle(HPI_OBJ_MIXER, adapter_index, 0); else *ph_mixer = 0; return hr.error; } u16 hpi_mixer_close(const struct hpi_hsubsys *ph_subsys, u32 h_mixer) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE); u32TOINDEX(h_mixer, &hm.adapter_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer, u16 src_node_type, u16 src_node_type_index, u16 dst_node_type, u16 dst_node_type_index, u16 control_type, u32 *ph_control) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_GET_CONTROL); u32TOINDEX(h_mixer, &hm.adapter_index); hm.u.m.node_type1 = src_node_type; hm.u.m.node_index1 = src_node_type_index; hm.u.m.node_type2 = dst_node_type; hm.u.m.node_index2 = dst_node_type_index; hm.u.m.control_type = control_type; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_control = hpi_indexes_to_handle(HPI_OBJ_CONTROL, hm.adapter_index, hr.u.m.control_index); else *ph_control = 0; return hr.error; } u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys, u32 h_mixer, u16 control_index, u16 *pw_src_node_type, u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index, u16 *pw_control_type, u32 *ph_control) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_GET_CONTROL_BY_INDEX); u32TOINDEX(h_mixer, &hm.adapter_index); hm.u.m.control_index = control_index; hpi_send_recv(&hm, &hr); if (pw_src_node_type) { *pw_src_node_type = hr.u.m.src_node_type + HPI_SOURCENODE_NONE; *pw_src_node_index = hr.u.m.src_node_index; *pw_dst_node_type = hr.u.m.dst_node_type + HPI_DESTNODE_NONE; *pw_dst_node_index = hr.u.m.dst_node_index; } if (pw_control_type) *pw_control_type = hr.u.m.control_index; if (ph_control) { if (hr.error == 0) *ph_control = hpi_indexes_to_handle(HPI_OBJ_CONTROL, hm.adapter_index, control_index); else *ph_control = 0; } return hr.error; } u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer, enum HPI_MIXER_STORE_COMMAND command, u16 index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_STORE); u32TOINDEX(h_mixer, &hm.adapter_index); hm.u.mx.store.command = command; hm.u.mx.store.index = index; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_param_set(const struct hpi_hsubsys *ph_subsys, const u32 h_control, const u16 attrib, const u32 param1, const u32 param2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attrib; hm.u.c.param1 = param1; hm.u.c.param2 = param2; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_log_set2(u32 h_control, u16 attrib, short sv0, short sv1) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attrib; hm.u.c.an_log_value[0] = sv0; hm.u.c.an_log_value[1] = sv1; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_param_get(const struct hpi_hsubsys *ph_subsys, const u32 h_control, const u16 attrib, u32 param1, u32 param2, u32 *pparam1, u32 *pparam2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attrib; hm.u.c.param1 = param1; hm.u.c.param2 = param2; hpi_send_recv(&hm, &hr); *pparam1 = hr.u.c.param1; if (pparam2) *pparam2 = hr.u.c.param2; return hr.error; } #define hpi_control_param1_get(s, h, a, p1) \ hpi_control_param_get(s, h, a, 0, 0, p1, NULL) #define hpi_control_param2_get(s, h, a, p1, p2) \ hpi_control_param_get(s, h, a, 0, 0, p1, p2) static u16 hpi_control_log_get2(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 attrib, short *sv0, short *sv1) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attrib; hpi_send_recv(&hm, &hr); *sv0 = hr.u.c.an_log_value[0]; if (sv1) *sv1 = hr.u.c.an_log_value[1]; return hr.error; } static u16 hpi_control_query(const struct hpi_hsubsys *ph_subsys, const u32 h_control, const u16 attrib, const u32 index, const u32 param, u32 *psetting) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_INFO); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attrib; hm.u.c.param1 = index; hm.u.c.param2 = param; hpi_send_recv(&hm, &hr); *psetting = hr.u.c.param1; return hr.error; } static u16 hpi_control_get_string(const u32 h_control, const u16 attribute, char *psz_string, const u32 string_length) { unsigned int sub_string_index = 0, j = 0; char c = 0; unsigned int n = 0; u16 hE = 0; if ((string_length < 1) || (string_length > 256)) return HPI_ERROR_INVALID_CONTROL_VALUE; for (sub_string_index = 0; sub_string_index < string_length; sub_string_index += 8) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = attribute; hm.u.c.param1 = sub_string_index; hm.u.c.param2 = 0; hpi_send_recv(&hm, &hr); if (sub_string_index == 0 && (hr.u.cu.chars8.remaining_chars + 8) > string_length) return HPI_ERROR_INVALID_CONTROL_VALUE; if (hr.error) { hE = hr.error; break; } for (j = 0; j < 8; j++) { c = hr.u.cu.chars8.sz_data[j]; psz_string[sub_string_index + j] = c; n++; if (n >= string_length) { psz_string[string_length - 1] = 0; hE = HPI_ERROR_INVALID_CONTROL_VALUE; break; } if (c == 0) break; } if ((hr.u.cu.chars8.remaining_chars == 0) && ((sub_string_index + j) < string_length) && (c != 0)) { c = 0; psz_string[sub_string_index + j] = c; } if (c == 0) break; } return hE; } u16 HPI_AESEBU__receiver_query_format(const struct hpi_hsubsys *ph_subsys, const u32 h_aes_rx, const u32 index, u16 *pw_format) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_aes_rx, HPI_AESEBURX_FORMAT, index, 0, &qr); *pw_format = (u16)qr; return err; } u16 HPI_AESEBU__receiver_set_format(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 format) { return hpi_control_param_set(ph_subsys, h_control, HPI_AESEBURX_FORMAT, format, 0); } u16 HPI_AESEBU__receiver_get_format(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_format) { u16 err; u32 param; err = hpi_control_param1_get(ph_subsys, h_control, HPI_AESEBURX_FORMAT, &param); if (!err && pw_format) *pw_format = (u16)param; return err; } u16 HPI_AESEBU__receiver_get_sample_rate(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *psample_rate) { return hpi_control_param1_get(ph_subsys, h_control, HPI_AESEBURX_SAMPLERATE, psample_rate); } u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 *pw_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_AESEBURX_USERDATA; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (pw_data) *pw_data = (u16)hr.u.c.param2; return hr.error; } u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 *pw_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_AESEBURX_CHANNELSTATUS; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (pw_data) *pw_data = (u16)hr.u.c.param2; return hr.error; } u16 HPI_AESEBU__receiver_get_error_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_error_data) { u32 error_data = 0; u16 error = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_AESEBURX_ERRORSTATUS, &error_data); if (pw_error_data) *pw_error_data = (u16)error_data; return error; } u16 HPI_AESEBU__transmitter_set_sample_rate(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 sample_rate) { return hpi_control_param_set(ph_subsys, h_control, HPI_AESEBUTX_SAMPLERATE, sample_rate, 0); } u16 HPI_AESEBU__transmitter_set_user_data(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 data) { return hpi_control_param_set(ph_subsys, h_control, HPI_AESEBUTX_USERDATA, index, data); } u16 HPI_AESEBU__transmitter_set_channel_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 data) { return hpi_control_param_set(ph_subsys, h_control, HPI_AESEBUTX_CHANNELSTATUS, index, data); } u16 HPI_AESEBU__transmitter_get_channel_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 *pw_data) { return HPI_ERROR_INVALID_OPERATION; } u16 HPI_AESEBU__transmitter_query_format(const struct hpi_hsubsys *ph_subsys, const u32 h_aes_tx, const u32 index, u16 *pw_format) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_aes_tx, HPI_AESEBUTX_FORMAT, index, 0, &qr); *pw_format = (u16)qr; return err; } u16 HPI_AESEBU__transmitter_set_format(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 output_format) { return hpi_control_param_set(ph_subsys, h_control, HPI_AESEBUTX_FORMAT, output_format, 0); } u16 HPI_AESEBU__transmitter_get_format(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_output_format) { u16 err; u32 param; err = hpi_control_param1_get(ph_subsys, h_control, HPI_AESEBUTX_FORMAT, &param); if (!err && pw_output_format) *pw_output_format = (u16)param; return err; } u16 hpi_bitstream_set_clock_edge(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 edge_type) { return hpi_control_param_set(ph_subsys, h_control, HPI_BITSTREAM_CLOCK_EDGE, edge_type, 0); } u16 hpi_bitstream_set_data_polarity(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 polarity) { return hpi_control_param_set(ph_subsys, h_control, HPI_BITSTREAM_DATA_POLARITY, polarity, 0); } u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_BITSTREAM_ACTIVITY; hpi_send_recv(&hm, &hr); if (pw_clk_activity) *pw_clk_activity = (u16)hr.u.c.param1; if (pw_data_activity) *pw_data_activity = (u16)hr.u.c.param2; return hr.error; } u16 hpi_channel_mode_query_mode(const struct hpi_hsubsys *ph_subsys, const u32 h_mode, const u32 index, u16 *pw_mode) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_mode, HPI_CHANNEL_MODE_MODE, index, 0, &qr); *pw_mode = (u16)qr; return err; } u16 hpi_channel_mode_set(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 mode) { return hpi_control_param_set(ph_subsys, h_control, HPI_CHANNEL_MODE_MODE, mode, 0); } u16 hpi_channel_mode_get(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *mode) { u32 mode32 = 0; u16 error = hpi_control_param1_get(ph_subsys, h_control, HPI_CHANNEL_MODE_MODE, &mode32); if (mode) *mode = (u16)mode32; return error; } u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 hmi_address, u32 byte_count, u8 *pb_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.cx.u.cobranet_data.byte_count = byte_count; hm.u.cx.u.cobranet_data.hmi_address = hmi_address; if (byte_count <= 8) { memcpy(hm.u.cx.u.cobranet_data.data, pb_data, byte_count); hm.u.cx.attribute = HPI_COBRANET_SET; } else { hm.u.cx.u.cobranet_bigdata.pb_data = pb_data; hm.u.cx.attribute = HPI_COBRANET_SET_DATA; } hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.cx.u.cobranet_data.byte_count = max_byte_count; hm.u.cx.u.cobranet_data.hmi_address = hmi_address; if (max_byte_count <= 8) { hm.u.cx.attribute = HPI_COBRANET_GET; } else { hm.u.cx.u.cobranet_bigdata.pb_data = pb_data; hm.u.cx.attribute = HPI_COBRANET_GET_DATA; } hpi_send_recv(&hm, &hr); if (!hr.error && pb_data) { *pbyte_count = hr.u.cx.u.cobranet_data.byte_count; if (*pbyte_count < max_byte_count) max_byte_count = *pbyte_count; if (hm.u.cx.attribute == HPI_COBRANET_GET) { memcpy(pb_data, hr.u.cx.u.cobranet_data.data, max_byte_count); } else { } } return hr.error; } u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pstatus, u32 *preadable_size, u32 *pwriteable_size) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.cx.attribute = HPI_COBRANET_GET_STATUS; hpi_send_recv(&hm, &hr); if (!hr.error) { if (pstatus) *pstatus = hr.u.cx.u.cobranet_status.status; if (preadable_size) *preadable_size = hr.u.cx.u.cobranet_status.readable_size; if (pwriteable_size) *pwriteable_size = hr.u.cx.u.cobranet_status.writeable_size; } return hr.error; } u16 hpi_cobranet_getI_paddress(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pi_paddress) { u32 byte_count; u32 iP; u16 error; error = hpi_cobranet_hmi_read(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, &byte_count, (u8 *)&iP); *pi_paddress = ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP & 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8); if (error) *pi_paddress = 0; return error; } u16 hpi_cobranet_setI_paddress(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 i_paddress) { u32 iP; u16 error; iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) << 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress & 0x000000ff) << 8); error = hpi_cobranet_hmi_write(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, (u8 *)&iP); return error; } u16 hpi_cobranet_get_staticI_paddress(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pi_paddress) { u32 byte_count; u32 iP; u16 error; error = hpi_cobranet_hmi_read(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, &byte_count, (u8 *)&iP); *pi_paddress = ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP & 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8); if (error) *pi_paddress = 0; return error; } u16 hpi_cobranet_set_staticI_paddress(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 i_paddress) { u32 iP; u16 error; iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) << 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress & 0x000000ff) << 8); error = hpi_cobranet_hmi_write(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, (u8 *)&iP); return error; } u16 hpi_cobranet_getMA_caddress(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pmAC_MS_bs, u32 *pmAC_LS_bs) { u32 byte_count; u16 error; u32 mAC; error = hpi_cobranet_hmi_read(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_if_phy_address, 4, &byte_count, (u8 *)&mAC); *pmAC_MS_bs = ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8); error += hpi_cobranet_hmi_read(ph_subsys, h_control, HPI_COBRANET_HMI_cobra_if_phy_address + 1, 4, &byte_count, (u8 *)&mAC); *pmAC_LS_bs = ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8); if (error) { *pmAC_MS_bs = 0; *pmAC_LS_bs = 0; } return error; } u16 hpi_compander_set_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE, enable, 0); } u16 hpi_compander_get_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *enable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_compander_set_makeup_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short makeup_gain0_01dB) { return hpi_control_log_set2(h_control, HPI_COMPANDER_MAKEUPGAIN, makeup_gain0_01dB, 0); } u16 hpi_compander_get_makeup_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *makeup_gain0_01dB) { return hpi_control_log_get2(ph_subsys, h_control, HPI_COMPANDER_MAKEUPGAIN, makeup_gain0_01dB, NULL); } u16 hpi_compander_set_attack_time_constant(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, u32 attack) { return hpi_control_param_set(ph_subsys, h_control, HPI_COMPANDER_ATTACK, attack, index); } u16 hpi_compander_get_attack_time_constant(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, u32 *attack) { return hpi_control_param_get(ph_subsys, h_control, HPI_COMPANDER_ATTACK, 0, index, attack, NULL); } u16 hpi_compander_set_decay_time_constant(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, u32 decay) { return hpi_control_param_set(ph_subsys, h_control, HPI_COMPANDER_DECAY, decay, index); } u16 hpi_compander_get_decay_time_constant(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, u32 *decay) { return hpi_control_param_get(ph_subsys, h_control, HPI_COMPANDER_DECAY, 0, index, decay, NULL); } u16 hpi_compander_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, short threshold0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; hm.u.c.param2 = index; hm.u.c.an_log_value[0] = threshold0_01dB; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_compander_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, unsigned int index, short *threshold0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); *threshold0_01dB = hr.u.c.an_log_value[0]; return hr.error; } u16 hpi_compander_set_ratio(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 index, u32 ratio100) { return hpi_control_param_set(ph_subsys, h_control, HPI_COMPANDER_RATIO, ratio100, index); } u16 hpi_compander_get_ratio(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 index, u32 *ratio100) { return hpi_control_param_get(ph_subsys, h_control, HPI_COMPANDER_RATIO, 0, index, ratio100, NULL); } u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_LEVEL_RANGE; hpi_send_recv(&hm, &hr); if (hr.error) { hr.u.c.an_log_value[0] = 0; hr.u.c.an_log_value[1] = 0; hr.u.c.param1 = 0; } if (min_gain_01dB) *min_gain_01dB = hr.u.c.an_log_value[0]; if (max_gain_01dB) *max_gain_01dB = hr.u.c.an_log_value[1]; if (step_gain_01dB) *step_gain_01dB = (short)hr.u.c.param1; return hr.error; } u16 hpi_level_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ) { return hpi_control_log_set2(h_control, HPI_LEVEL_GAIN, an_gain0_01dB[0], an_gain0_01dB[1]); } u16 hpi_level_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ) { return hpi_control_log_get2(ph_subsys, h_control, HPI_LEVEL_GAIN, &an_gain0_01dB[0], &an_gain0_01dB[1]); } u16 hpi_meter_query_channels(const struct hpi_hsubsys *ph_subsys, const u32 h_meter, u32 *p_channels) { return hpi_control_query(ph_subsys, h_meter, HPI_METER_NUM_CHANNELS, 0, 0, p_channels); } u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_peakdB[HPI_MAX_CHANNELS] ) { short i = 0; struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.obj_index = hm.obj_index; hm.u.c.attribute = HPI_METER_PEAK; hpi_send_recv(&hm, &hr); if (!hr.error) memcpy(an_peakdB, hr.u.c.an_log_value, sizeof(short) * HPI_MAX_CHANNELS); else for (i = 0; i < HPI_MAX_CHANNELS; i++) an_peakdB[i] = HPI_METER_MINIMUM; return hr.error; } u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_rmsdB[HPI_MAX_CHANNELS] ) { short i = 0; struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_METER_RMS; hpi_send_recv(&hm, &hr); if (!hr.error) memcpy(an_rmsdB, hr.u.c.an_log_value, sizeof(short) * HPI_MAX_CHANNELS); else for (i = 0; i < HPI_MAX_CHANNELS; i++) an_rmsdB[i] = HPI_METER_MINIMUM; return hr.error; } u16 hpi_meter_set_rms_ballistics(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 attack, u16 decay) { return hpi_control_param_set(ph_subsys, h_control, HPI_METER_RMS_BALLISTICS, attack, decay); } u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pn_attack, u16 *pn_decay) { u32 attack; u32 decay; u16 error; error = hpi_control_param2_get(ph_subsys, h_control, HPI_METER_RMS_BALLISTICS, &attack, &decay); if (pn_attack) *pn_attack = (unsigned short)attack; if (pn_decay) *pn_decay = (unsigned short)decay; return error; } u16 hpi_meter_set_peak_ballistics(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 attack, u16 decay) { return hpi_control_param_set(ph_subsys, h_control, HPI_METER_PEAK_BALLISTICS, attack, decay); } u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pn_attack, u16 *pn_decay) { u32 attack; u32 decay; u16 error; error = hpi_control_param2_get(ph_subsys, h_control, HPI_METER_PEAK_BALLISTICS, &attack, &decay); if (pn_attack) *pn_attack = (short)attack; if (pn_decay) *pn_decay = (short)decay; return error; } u16 hpi_microphone_set_phantom_power(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 on_off) { return hpi_control_param_set(ph_subsys, h_control, HPI_MICROPHONE_PHANTOM_POWER, (u32)on_off, 0); } u16 hpi_microphone_get_phantom_power(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_on_off) { u16 error = 0; u32 on_off = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_MICROPHONE_PHANTOM_POWER, &on_off); if (pw_on_off) *pw_on_off = (u16)on_off; return error; } u16 hpi_multiplexer_set_source(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 source_node_type, u16 source_node_index) { return hpi_control_param_set(ph_subsys, h_control, HPI_MULTIPLEXER_SOURCE, source_node_type, source_node_index); } u16 hpi_multiplexer_get_source(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *source_node_type, u16 *source_node_index) { u32 node, index; u16 error = hpi_control_param2_get(ph_subsys, h_control, HPI_MULTIPLEXER_SOURCE, &node, &index); if (source_node_type) *source_node_type = (u16)node; if (source_node_index) *source_node_index = (u16)index; return error; } u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 *source_node_type, u16 *source_node_index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_MULTIPLEXER_QUERYSOURCE; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (source_node_type) *source_node_type = (u16)hr.u.c.param1; if (source_node_index) *source_node_index = (u16)hr.u.c.param2; return hr.error; } u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_number_of_bands, u16 *pw_on_off) { u32 oB = 0; u32 oO = 0; u16 error = 0; error = hpi_control_param2_get(ph_subsys, h_control, HPI_EQUALIZER_NUM_FILTERS, &oO, &oB); if (pw_number_of_bands) *pw_number_of_bands = (u16)oB; if (pw_on_off) *pw_on_off = (u16)oO; return error; } u16 hpi_parametricEQ__set_state(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 on_off) { return hpi_control_param_set(ph_subsys, h_control, HPI_EQUALIZER_NUM_FILTERS, on_off, 0); } u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz, short *pnQ100, short *pn_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_EQUALIZER_FILTER; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); if (pfrequency_hz) *pfrequency_hz = hr.u.c.param1; if (pn_type) *pn_type = (u16)(hr.u.c.param2 >> 16); if (pnQ100) *pnQ100 = hr.u.c.an_log_value[1]; if (pn_gain0_01dB) *pn_gain0_01dB = hr.u.c.an_log_value[0]; return hr.error; } u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100, short gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.param1 = frequency_hz; hm.u.c.param2 = (index & 0xFFFFL) + ((u32)type << 16); hm.u.c.an_log_value[0] = gain0_01dB; hm.u.c.an_log_value[1] = q100; hm.u.c.attribute = HPI_EQUALIZER_FILTER; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 index, short coeffs[5] ) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_EQUALIZER_COEFFICIENTS; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); coeffs[0] = (short)hr.u.c.an_log_value[0]; coeffs[1] = (short)hr.u.c.an_log_value[1]; coeffs[2] = (short)hr.u.c.param1; coeffs[3] = (short)(hr.u.c.param1 >> 16); coeffs[4] = (short)hr.u.c.param2; return hr.error; } u16 hpi_sample_clock_query_source(const struct hpi_hsubsys *ph_subsys, const u32 h_clock, const u32 index, u16 *pw_source) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_clock, HPI_SAMPLECLOCK_SOURCE, index, 0, &qr); *pw_source = (u16)qr; return err; } u16 hpi_sample_clock_set_source(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 source) { return hpi_control_param_set(ph_subsys, h_control, HPI_SAMPLECLOCK_SOURCE, source, 0); } u16 hpi_sample_clock_get_source(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_source) { u16 error = 0; u32 source = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_SOURCE, &source); if (!error) if (pw_source) *pw_source = (u16)source; return error; } u16 hpi_sample_clock_query_source_index(const struct hpi_hsubsys *ph_subsys, const u32 h_clock, const u32 index, const u32 source, u16 *pw_source_index) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_clock, HPI_SAMPLECLOCK_SOURCE_INDEX, index, source, &qr); *pw_source_index = (u16)qr; return err; } u16 hpi_sample_clock_set_source_index(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 source_index) { return hpi_control_param_set(ph_subsys, h_control, HPI_SAMPLECLOCK_SOURCE_INDEX, source_index, 0); } u16 hpi_sample_clock_get_source_index(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_source_index) { u16 error = 0; u32 source_index = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_SOURCE_INDEX, &source_index); if (!error) if (pw_source_index) *pw_source_index = (u16)source_index; return error; } u16 hpi_sample_clock_query_local_rate(const struct hpi_hsubsys *ph_subsys, const u32 h_clock, const u32 index, u32 *prate) { u16 err; err = hpi_control_query(ph_subsys, h_clock, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, index, 0, prate); return err; } u16 hpi_sample_clock_set_local_rate(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 sample_rate) { return hpi_control_param_set(ph_subsys, h_control, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, sample_rate, 0); } u16 hpi_sample_clock_get_local_rate(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *psample_rate) { u16 error = 0; u32 sample_rate = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, &sample_rate); if (!error) if (psample_rate) *psample_rate = sample_rate; return error; } u16 hpi_sample_clock_get_sample_rate(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *psample_rate) { u16 error = 0; u32 sample_rate = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_SAMPLERATE, &sample_rate); if (!error) if (psample_rate) *psample_rate = sample_rate; return error; } u16 hpi_sample_clock_set_auto(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_SAMPLECLOCK_AUTO, enable, 0); } u16 hpi_sample_clock_get_auto(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *penable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_AUTO, penable); } u16 hpi_sample_clock_set_local_rate_lock(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 lock) { return hpi_control_param_set(ph_subsys, h_control, HPI_SAMPLECLOCK_LOCAL_LOCK, lock, 0); } u16 hpi_sample_clock_get_local_rate_lock(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *plock) { return hpi_control_param1_get(ph_subsys, h_control, HPI_SAMPLECLOCK_LOCAL_LOCK, plock); } u16 hpi_tone_detector_get_frequency(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 index, u32 *frequency) { return hpi_control_param_get(ph_subsys, h_control, HPI_TONEDETECTOR_FREQUENCY, index, 0, frequency, NULL); } u16 hpi_tone_detector_get_state(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *state) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TONEDETECTOR_STATE, state); } u16 hpi_tone_detector_set_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE, (u32)enable, 0); } u16 hpi_tone_detector_get_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *enable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_tone_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 event_enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_EVENT_ENABLE, (u32)event_enable, 0); } u16 hpi_tone_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *event_enable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_GENERIC_EVENT_ENABLE, event_enable); } u16 hpi_tone_detector_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, int threshold) { return hpi_control_param_set(ph_subsys, h_control, HPI_TONEDETECTOR_THRESHOLD, (u32)threshold, 0); } u16 hpi_tone_detector_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, int *threshold) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TONEDETECTOR_THRESHOLD, (u32 *)threshold); } u16 hpi_silence_detector_get_state(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *state) { return hpi_control_param1_get(ph_subsys, h_control, HPI_SILENCEDETECTOR_STATE, state); } u16 hpi_silence_detector_set_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE, (u32)enable, 0); } u16 hpi_silence_detector_get_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *enable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_silence_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 event_enable) { return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_EVENT_ENABLE, event_enable, 0); } u16 hpi_silence_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *event_enable) { return hpi_control_param1_get(ph_subsys, h_control, HPI_GENERIC_EVENT_ENABLE, event_enable); } u16 hpi_silence_detector_set_delay(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 delay) { return hpi_control_param_set(ph_subsys, h_control, HPI_SILENCEDETECTOR_DELAY, delay, 0); } u16 hpi_silence_detector_get_delay(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *delay) { return hpi_control_param1_get(ph_subsys, h_control, HPI_SILENCEDETECTOR_DELAY, delay); } u16 hpi_silence_detector_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, int threshold) { return hpi_control_param_set(ph_subsys, h_control, HPI_SILENCEDETECTOR_THRESHOLD, threshold, 0); } u16 hpi_silence_detector_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, int *threshold) { return hpi_control_param1_get(ph_subsys, h_control, HPI_SILENCEDETECTOR_THRESHOLD, (u32 *)threshold); } u16 hpi_tuner_query_band(const struct hpi_hsubsys *ph_subsys, const u32 h_tuner, const u32 index, u16 *pw_band) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0, &qr); *pw_band = (u16)qr; return err; } u16 hpi_tuner_set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 band) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_BAND, band, 0); } u16 hpi_tuner_get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_band) { u32 band = 0; u16 error = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_BAND, &band); if (pw_band) *pw_band = (u16)band; return error; } u16 hpi_tuner_query_frequency(const struct hpi_hsubsys *ph_subsys, const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq) { return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_FREQ, index, band, pfreq); } u16 hpi_tuner_set_frequency(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 freq_ink_hz) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_FREQ, freq_ink_hz, 0); } u16 hpi_tuner_get_frequency(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pw_freq_ink_hz) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_FREQ, pw_freq_ink_hz); } u16 hpi_tuner_query_gain(const struct hpi_hsubsys *ph_subsys, const u32 h_tuner, const u32 index, u16 *pw_gain) { u32 qr; u16 err; err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0, &qr); *pw_gain = (u16)qr; return err; } u16 hpi_tuner_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short gain) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_GAIN, gain, 0); } u16 hpi_tuner_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *pn_gain) { u32 gain = 0; u16 error = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_GAIN, &gain); if (pn_gain) *pn_gain = (u16)gain; return error; } u16 hpi_tuner_getRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *pw_level) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_TUNER_LEVEL; hm.u.c.param1 = HPI_TUNER_LEVEL_AVERAGE; hpi_send_recv(&hm, &hr); if (pw_level) *pw_level = (short)hr.u.c.param1; return hr.error; } u16 hpi_tuner_get_rawRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *pw_level) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_TUNER_LEVEL; hm.u.c.param1 = HPI_TUNER_LEVEL_RAW; hpi_send_recv(&hm, &hr); if (pw_level) *pw_level = (short)hr.u.c.param1; return hr.error; } u16 hpi_tuner_query_deemphasis(const struct hpi_hsubsys *ph_subsys, const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis) { return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_DEEMPHASIS, index, band, pdeemphasis); } u16 hpi_tuner_set_deemphasis(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 deemphasis) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_DEEMPHASIS, deemphasis, 0); } u16 hpi_tuner_get_deemphasis(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pdeemphasis) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_DEEMPHASIS, pdeemphasis); } u16 hpi_tuner_query_program(const struct hpi_hsubsys *ph_subsys, const u32 h_tuner, u32 *pbitmap_program) { return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_PROGRAM, 0, 0, pbitmap_program); } u16 hpi_tuner_set_program(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 program) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_PROGRAM, program, 0); } u16 hpi_tuner_get_program(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pprogram) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_PROGRAM, pprogram); } u16 hpi_tuner_get_hd_radio_dsp_version(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_dsp_version, const u32 string_size) { return hpi_control_get_string(h_control, HPI_TUNER_HDRADIO_DSP_VERSION, psz_dsp_version, string_size); } u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_sdk_version, const u32 string_size) { return hpi_control_get_string(h_control, HPI_TUNER_HDRADIO_SDK_VERSION, psz_sdk_version, string_size); } u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control, u16 *pw_status_mask, u16 *pw_status) { u32 status = 0; u16 error = 0; error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_STATUS, &status); if (pw_status) { if (!error) { *pw_status_mask = (u16)(status >> 16); *pw_status = (u16)(status & 0xFFFF); } else { *pw_status_mask = 0; *pw_status = 0; } } return error; } u16 hpi_tuner_set_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 mode, u32 value) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_MODE, mode, value); } u16 hpi_tuner_get_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 mode, u32 *pn_value) { return hpi_control_param_get(ph_subsys, h_control, HPI_TUNER_MODE, mode, 0, pn_value, NULL); } u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pquality) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_HDRADIO_SIGNAL_QUALITY, pquality); } u16 hpi_tuner_get_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *pblend) { return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_HDRADIO_BLEND, pblend); } u16 hpi_tuner_set_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys, u32 h_control, const u32 blend) { return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_HDRADIO_BLEND, blend, 0); } u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *p_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_TUNER_RDS; hpi_send_recv(&hm, &hr); if (p_data) { *(u32 *)&p_data[0] = hr.u.cu.tuner.rds.data[0]; *(u32 *)&p_data[4] = hr.u.cu.tuner.rds.data[1]; *(u32 *)&p_data[8] = hr.u.cu.tuner.rds.bLER; } return hr.error; } u16 HPI_PAD__get_channel_name(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_CHANNEL_NAME, psz_string, data_length); } u16 HPI_PAD__get_artist(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_ARTIST, psz_string, data_length); } u16 HPI_PAD__get_title(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_TITLE, psz_string, data_length); } u16 HPI_PAD__get_comment(const struct hpi_hsubsys *ph_subsys, u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_COMMENT, psz_string, data_length); } u16 HPI_PAD__get_program_type(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *ppTY) { return hpi_control_param1_get(ph_subsys, h_control, HPI_PAD_PROGRAM_TYPE, ppTY); } u16 HPI_PAD__get_rdsPI(const struct hpi_hsubsys *ph_subsys, u32 h_control, u32 *ppI) { return hpi_control_param1_get(ph_subsys, h_control, HPI_PAD_PROGRAM_ID, ppI); } u16 hpi_volume_query_channels(const struct hpi_hsubsys *ph_subsys, const u32 h_volume, u32 *p_channels) { return hpi_control_query(ph_subsys, h_volume, HPI_VOLUME_NUM_CHANNELS, 0, 0, p_channels); } u16 hpi_volume_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_log_gain[HPI_MAX_CHANNELS] ) { return hpi_control_log_set2(h_control, HPI_VOLUME_GAIN, an_log_gain[0], an_log_gain[1]); } u16 hpi_volume_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_log_gain[HPI_MAX_CHANNELS] ) { return hpi_control_log_get2(ph_subsys, h_control, HPI_VOLUME_GAIN, &an_log_gain[0], &an_log_gain[1]); } u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_VOLUME_RANGE; hpi_send_recv(&hm, &hr); if (hr.error) { hr.u.c.an_log_value[0] = 0; hr.u.c.an_log_value[1] = 0; hr.u.c.param1 = 0; } if (min_gain_01dB) *min_gain_01dB = hr.u.c.an_log_value[0]; if (max_gain_01dB) *max_gain_01dB = hr.u.c.an_log_value[1]; if (step_gain_01dB) *step_gain_01dB = (short)hr.u.c.param1; return hr.error; } u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms, u16 profile) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); memcpy(hm.u.c.an_log_value, an_stop_gain0_01dB, sizeof(short) * HPI_MAX_CHANNELS); hm.u.c.attribute = HPI_VOLUME_AUTOFADE; hm.u.c.param1 = duration_ms; hm.u.c.param2 = profile; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_volume_auto_fade(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms) { return hpi_volume_auto_fade_profile(ph_subsys, h_control, an_stop_gain0_01dB, duration_ms, HPI_VOLUME_AUTOFADE_LOG); } u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, short an_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_VOX_THRESHOLD; hm.u.c.an_log_value[0] = an_gain0_01dB; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control, short *an_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index); hm.u.c.attribute = HPI_VOX_THRESHOLD; hpi_send_recv(&hm, &hr); *an_gain0_01dB = hr.u.c.an_log_value[0]; return hr.error; } static size_t strv_packet_size = MIN_STRV_PACKET_SIZE; static size_t entity_type_to_size[LAST_ENTITY_TYPE] = { 0, sizeof(struct hpi_entity), sizeof(void *), sizeof(int), sizeof(float), sizeof(double), sizeof(char), sizeof(char), 4 * sizeof(char), 16 * sizeof(char), 6 * sizeof(char), }; static inline size_t hpi_entity_size(struct hpi_entity *entity_ptr) { return entity_ptr->header.size; } static inline size_t hpi_entity_header_size(struct hpi_entity *entity_ptr) { return sizeof(entity_ptr->header); } static inline size_t hpi_entity_value_size(struct hpi_entity *entity_ptr) { return hpi_entity_size(entity_ptr) - hpi_entity_header_size(entity_ptr); } static inline size_t hpi_entity_item_count(struct hpi_entity *entity_ptr) { return hpi_entity_value_size(entity_ptr) / entity_type_to_size[entity_ptr->header.type]; } static inline struct hpi_entity *hpi_entity_ptr_to_next(struct hpi_entity *entity_ptr) { return (void *)(((u8 *)entity_ptr) + hpi_entity_size(entity_ptr)); } static inline u16 hpi_entity_check_type(const enum e_entity_type t) { if (t >= 0 && t < STR_TYPE_FIELD_MAX) return 0; return HPI_ERROR_ENTITY_TYPE_INVALID; } static inline u16 hpi_entity_check_role(const enum e_entity_role r) { if (r >= 0 && r < STR_ROLE_FIELD_MAX) return 0; return HPI_ERROR_ENTITY_ROLE_INVALID; } static u16 hpi_entity_get_next(struct hpi_entity *entity, int recursive_flag, void *guard_p, struct hpi_entity **next) { HPI_DEBUG_ASSERT(entity != NULL); HPI_DEBUG_ASSERT(next != NULL); HPI_DEBUG_ASSERT(hpi_entity_size(entity) != 0); if (guard_p <= (void *)entity) { *next = NULL; return 0; } if (recursive_flag && entity->header.type == entity_type_sequence) *next = (struct hpi_entity *)entity->value; else *next = (struct hpi_entity *)hpi_entity_ptr_to_next(entity); if (guard_p <= (void *)*next) { *next = NULL; return 0; } HPI_DEBUG_ASSERT(guard_p >= (void *)hpi_entity_ptr_to_next(*next)); return 0; } u16 hpi_entity_find_next(struct hpi_entity *container_entity, enum e_entity_type type, enum e_entity_role role, int recursive_flag, struct hpi_entity **current_match) { struct hpi_entity *tmp = NULL; void *guard_p = NULL; HPI_DEBUG_ASSERT(container_entity != NULL); guard_p = hpi_entity_ptr_to_next(container_entity); if (*current_match != NULL) hpi_entity_get_next(*current_match, recursive_flag, guard_p, &tmp); else hpi_entity_get_next(container_entity, 1, guard_p, &tmp); while (tmp) { u16 err; HPI_DEBUG_ASSERT((void *)tmp >= (void *)container_entity); if ((!type || tmp->header.type == type) && (!role || tmp->header.role == role)) { *current_match = tmp; return 0; } err = hpi_entity_get_next(tmp, recursive_flag, guard_p, current_match); if (err) return err; tmp = *current_match; } *current_match = NULL; return 0; } void hpi_entity_free(struct hpi_entity *entity) { kfree(entity); } static u16 hpi_entity_alloc_and_copy(struct hpi_entity *src, struct hpi_entity **dst) { size_t buf_size; HPI_DEBUG_ASSERT(dst != NULL); HPI_DEBUG_ASSERT(src != NULL); buf_size = hpi_entity_size(src); *dst = kmalloc(buf_size, GFP_KERNEL); if (*dst == NULL) return HPI_ERROR_MEMORY_ALLOC; memcpy(*dst, src, buf_size); return 0; } u16 hpi_universal_info(const struct hpi_hsubsys *ph_subsys, u32 hC, struct hpi_entity **info) { struct hpi_msg_strv hm; struct hpi_res_strv *phr; u16 hpi_err; int remaining_attempts = 2; size_t resp_packet_size = 1024; *info = NULL; while (remaining_attempts--) { phr = kmalloc(resp_packet_size, GFP_KERNEL); HPI_DEBUG_ASSERT(phr != NULL); hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h, (u16)resp_packet_size, HPI_OBJ_CONTROL, HPI_CONTROL_GET_INFO); u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index); hm.strv.header.size = sizeof(hm.strv); phr->strv.header.size = resp_packet_size - sizeof(phr->h); hpi_send_recv((struct hpi_message *)&hm.h, (struct hpi_response *)&phr->h); if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) { HPI_DEBUG_ASSERT(phr->h.specific_error > MIN_STRV_PACKET_SIZE && phr->h.specific_error < 1500); resp_packet_size = phr->h.specific_error; } else { remaining_attempts = 0; if (!phr->h.error) hpi_entity_alloc_and_copy(&phr->strv, info); } hpi_err = phr->h.error; kfree(phr); } return hpi_err; } u16 hpi_universal_get(const struct hpi_hsubsys *ph_subsys, u32 hC, struct hpi_entity **value) { struct hpi_msg_strv hm; struct hpi_res_strv *phr; u16 hpi_err; int remaining_attempts = 2; *value = NULL; while (remaining_attempts--) { phr = kmalloc(strv_packet_size, GFP_KERNEL); if (!phr) return HPI_ERROR_MEMORY_ALLOC; hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h, (u16)strv_packet_size, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index); hm.strv.header.size = sizeof(hm.strv); phr->strv.header.size = strv_packet_size - sizeof(phr->h); hpi_send_recv((struct hpi_message *)&hm.h, (struct hpi_response *)&phr->h); if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) { HPI_DEBUG_ASSERT(phr->h.specific_error > MIN_STRV_PACKET_SIZE && phr->h.specific_error < 1000); strv_packet_size = phr->h.specific_error; } else { remaining_attempts = 0; if (!phr->h.error) hpi_entity_alloc_and_copy(&phr->strv, value); } hpi_err = phr->h.error; kfree(phr); } return hpi_err; } u16 hpi_universal_set(const struct hpi_hsubsys *ph_subsys, u32 hC, struct hpi_entity *value) { struct hpi_msg_strv *phm; struct hpi_res_strv hr; phm = kmalloc(sizeof(phm->h) + value->header.size, GFP_KERNEL); HPI_DEBUG_ASSERT(phm != NULL); hpi_init_message_responseV1(&phm->h, sizeof(phm->h) + value->header.size, &hr.h, sizeof(hr), HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); u32TOINDEXES(hC, &phm->h.adapter_index, &phm->h.obj_index); hr.strv.header.size = sizeof(hr.strv); memcpy(&phm->strv, value, value->header.size); hpi_send_recv((struct hpi_message *)&phm->h, (struct hpi_response *)&hr.h); return hr.h.error; } u16 hpi_entity_alloc_and_pack(const enum e_entity_type type, const size_t item_count, const enum e_entity_role role, void *value, struct hpi_entity **entity) { size_t bytes_to_copy, total_size; u16 hE = 0; *entity = NULL; hE = hpi_entity_check_type(type); if (hE) return hE; HPI_DEBUG_ASSERT(role > entity_role_null && type < LAST_ENTITY_TYPE); bytes_to_copy = entity_type_to_size[type] * item_count; total_size = hpi_entity_header_size(*entity) + bytes_to_copy; HPI_DEBUG_ASSERT(total_size >= hpi_entity_header_size(*entity) && total_size < STR_SIZE_FIELD_MAX); *entity = kmalloc(total_size, GFP_KERNEL); if (*entity == NULL) return HPI_ERROR_MEMORY_ALLOC; memcpy((*entity)->value, value, bytes_to_copy); (*entity)->header.size = hpi_entity_header_size(*entity) + bytes_to_copy; (*entity)->header.type = type; (*entity)->header.role = role; return 0; } u16 hpi_entity_copy_value_from(struct hpi_entity *entity, enum e_entity_type type, size_t item_count, void *value_dst_p) { size_t bytes_to_copy; if (entity->header.type != type) return HPI_ERROR_ENTITY_TYPE_MISMATCH; if (hpi_entity_item_count(entity) != item_count) return HPI_ERROR_ENTITY_ITEM_COUNT; bytes_to_copy = entity_type_to_size[type] * item_count; memcpy(value_dst_p, entity->value, bytes_to_copy); return 0; } u16 hpi_entity_unpack(struct hpi_entity *entity, enum e_entity_type *type, size_t *item_count, enum e_entity_role *role, void **value) { u16 err = 0; HPI_DEBUG_ASSERT(entity != NULL); if (type) *type = entity->header.type; if (role) *role = entity->header.role; if (value) *value = entity->value; if (item_count != NULL) { if (entity->header.type == entity_type_sequence) { void *guard_p = hpi_entity_ptr_to_next(entity); struct hpi_entity *next = NULL; void *contents = entity->value; *item_count = 0; while (contents < guard_p) { (*item_count)++; err = hpi_entity_get_next(contents, 0, guard_p, &next); if (next == NULL || err) break; contents = next; } } else { *item_count = hpi_entity_item_count(entity); } } return err; } u16 hpi_gpio_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u32 *ph_gpio, u16 *pw_number_input_bits, u16 *pw_number_output_bits) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) { *ph_gpio = hpi_indexes_to_handle(HPI_OBJ_GPIO, adapter_index, 0); if (pw_number_input_bits) *pw_number_input_bits = hr.u.l.number_input_bits; if (pw_number_output_bits) *pw_number_output_bits = hr.u.l.number_output_bits; } else *ph_gpio = 0; return hr.error; } u16 hpi_gpio_read_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio, u16 bit_index, u16 *pw_bit_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_BIT); u32TOINDEX(h_gpio, &hm.adapter_index); hm.u.l.bit_index = bit_index; hpi_send_recv(&hm, &hr); *pw_bit_data = hr.u.l.bit_data[0]; return hr.error; } u16 hpi_gpio_read_all_bits(const struct hpi_hsubsys *ph_subsys, u32 h_gpio, u16 aw_all_bit_data[4] ) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_ALL); u32TOINDEX(h_gpio, &hm.adapter_index); hpi_send_recv(&hm, &hr); if (aw_all_bit_data) { aw_all_bit_data[0] = hr.u.l.bit_data[0]; aw_all_bit_data[1] = hr.u.l.bit_data[1]; aw_all_bit_data[2] = hr.u.l.bit_data[2]; aw_all_bit_data[3] = hr.u.l.bit_data[3]; } return hr.error; } u16 hpi_gpio_write_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio, u16 bit_index, u16 bit_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_WRITE_BIT); u32TOINDEX(h_gpio, &hm.adapter_index); hm.u.l.bit_index = bit_index; hm.u.l.bit_data = bit_data; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_gpio_write_status(const struct hpi_hsubsys *ph_subsys, u32 h_gpio, u16 aw_all_bit_data[4] ) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_WRITE_STATUS); u32TOINDEX(h_gpio, &hm.adapter_index); hpi_send_recv(&hm, &hr); if (aw_all_bit_data) { aw_all_bit_data[0] = hr.u.l.bit_data[0]; aw_all_bit_data[1] = hr.u.l.bit_data[1]; aw_all_bit_data[2] = hr.u.l.bit_data[2]; aw_all_bit_data[3] = hr.u.l.bit_data[3]; } return hr.error; } u16 hpi_async_event_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u32 *ph_async) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT, HPI_ASYNCEVENT_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_async = hpi_indexes_to_handle(HPI_OBJ_ASYNCEVENT, adapter_index, 0); else *ph_async = 0; return hr.error; } u16 hpi_async_event_close(const struct hpi_hsubsys *ph_subsys, u32 h_async) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT, HPI_ASYNCEVENT_OPEN); u32TOINDEX(h_async, &hm.adapter_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_async_event_wait(const struct hpi_hsubsys *ph_subsys, u32 h_async, u16 maximum_events, struct hpi_async_event *p_events, u16 *pw_number_returned) { return 0; } u16 hpi_async_event_get_count(const struct hpi_hsubsys *ph_subsys, u32 h_async, u16 *pw_count) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT, HPI_ASYNCEVENT_GETCOUNT); u32TOINDEX(h_async, &hm.adapter_index); hpi_send_recv(&hm, &hr); if (hr.error == 0) if (pw_count) *pw_count = hr.u.as.u.count.count; return hr.error; } u16 hpi_async_event_get(const struct hpi_hsubsys *ph_subsys, u32 h_async, u16 maximum_events, struct hpi_async_event *p_events, u16 *pw_number_returned) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT, HPI_ASYNCEVENT_GET); u32TOINDEX(h_async, &hm.adapter_index); hpi_send_recv(&hm, &hr); if (!hr.error) { memcpy(p_events, &hr.u.as.u.event, sizeof(struct hpi_async_event)); *pw_number_returned = 1; } return hr.error; } u16 hpi_nv_memory_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u32 *ph_nv_memory, u16 *pw_size_in_bytes) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY, HPI_NVMEMORY_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) { *ph_nv_memory = hpi_indexes_to_handle(HPI_OBJ_NVMEMORY, adapter_index, 0); if (pw_size_in_bytes) *pw_size_in_bytes = hr.u.n.size_in_bytes; } else *ph_nv_memory = 0; return hr.error; } u16 hpi_nv_memory_read_byte(const struct hpi_hsubsys *ph_subsys, u32 h_nv_memory, u16 index, u16 *pw_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY, HPI_NVMEMORY_READ_BYTE); u32TOINDEX(h_nv_memory, &hm.adapter_index); hm.u.n.address = index; hpi_send_recv(&hm, &hr); *pw_data = hr.u.n.data; return hr.error; } u16 hpi_nv_memory_write_byte(const struct hpi_hsubsys *ph_subsys, u32 h_nv_memory, u16 index, u16 data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY, HPI_NVMEMORY_WRITE_BYTE); u32TOINDEX(h_nv_memory, &hm.adapter_index); hm.u.n.address = index; hm.u.n.data = data; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_profile_open_all(const struct hpi_hsubsys *ph_subsys, u16 adapter_index, u16 profile_index, u32 *ph_profile, u16 *pw_max_profiles) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_OPEN_ALL); hm.adapter_index = adapter_index; hm.obj_index = profile_index; hpi_send_recv(&hm, &hr); *pw_max_profiles = hr.u.p.u.o.max_profiles; if (hr.error == 0) *ph_profile = hpi_indexes_to_handle(HPI_OBJ_PROFILE, adapter_index, profile_index); else *ph_profile = 0; return hr.error; } u16 hpi_profile_get(const struct hpi_hsubsys *ph_subsys, u32 h_profile, u16 bin_index, u16 *pw_seconds, u32 *pmicro_seconds, u32 *pcall_count, u32 *pmax_micro_seconds, u32 *pmin_micro_seconds) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_GET); u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index); hm.u.p.bin_index = bin_index; hpi_send_recv(&hm, &hr); if (pw_seconds) *pw_seconds = hr.u.p.u.t.seconds; if (pmicro_seconds) *pmicro_seconds = hr.u.p.u.t.micro_seconds; if (pcall_count) *pcall_count = hr.u.p.u.t.call_count; if (pmax_micro_seconds) *pmax_micro_seconds = hr.u.p.u.t.max_micro_seconds; if (pmin_micro_seconds) *pmin_micro_seconds = hr.u.p.u.t.min_micro_seconds; return hr.error; } u16 hpi_profile_get_utilization(const struct hpi_hsubsys *ph_subsys, u32 h_profile, u32 *putilization) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_GET_UTILIZATION); u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); if (hr.error) { if (putilization) *putilization = 0; } else { if (putilization) *putilization = hr.u.p.u.t.call_count; } return hr.error; } u16 hpi_profile_get_name(const struct hpi_hsubsys *ph_subsys, u32 h_profile, u16 bin_index, char *sz_name, u16 name_length) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_GET_NAME); u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index); hm.u.p.bin_index = bin_index; hpi_send_recv(&hm, &hr); if (hr.error) { if (sz_name) strcpy(sz_name, "??"); } else { if (sz_name) memcpy(sz_name, (char *)hr.u.p.u.n.sz_name, name_length); } return hr.error; } u16 hpi_profile_start_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_START_ALL); u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_profile_stop_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_STOP_ALL);