diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-09 06:19:43 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-09 14:13:54 -0400 |
commit | 40ca7cc573430ca4e21fdec4a44394c09d615846 (patch) | |
tree | e4ee884dd8863d9928b34c7b0bf7468f2903c6b1 /drivers/gpu/nvgpu/gk20a | |
parent | 821d1cab904d055264bc5d62b0c0d5187417ff13 (diff) |
gpu: nvgpu: reorganize PMU IPC
- Moved PMU IPC related code to
drivers/gpu/nvgpu/common/pmu/pmu_ipc.c file,
-Below is the list which are moved
seq
mutex
queue
cmd/msg post & process
event handling
NVGPU-56
Change-Id: Ic380faa27de4e5574d5b22500125e86027fd4b5d
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1478167
GVS: Gerrit_Virtual_Submit
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 20 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 994 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 27 |
4 files changed, 109 insertions, 940 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 4b0667c5..00b26cf4 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -2629,12 +2629,12 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
2629 | for (i = 0; i < g->fifo.max_runlists; i++) | 2629 | for (i = 0; i < g->fifo.max_runlists; i++) |
2630 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); | 2630 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); |
2631 | 2631 | ||
2632 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2632 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2633 | 2633 | ||
2634 | ret = __locked_fifo_preempt(g, hw_chid, false); | 2634 | ret = __locked_fifo_preempt(g, hw_chid, false); |
2635 | 2635 | ||
2636 | if (!mutex_ret) | 2636 | if (!mutex_ret) |
2637 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2637 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2638 | 2638 | ||
2639 | for (i = 0; i < g->fifo.max_runlists; i++) | 2639 | for (i = 0; i < g->fifo.max_runlists; i++) |
2640 | nvgpu_mutex_release(&f->runlist_info[i].mutex); | 2640 | nvgpu_mutex_release(&f->runlist_info[i].mutex); |
@@ -2656,12 +2656,12 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
2656 | for (i = 0; i < g->fifo.max_runlists; i++) | 2656 | for (i = 0; i < g->fifo.max_runlists; i++) |
2657 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); | 2657 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); |
2658 | 2658 | ||
2659 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2659 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2660 | 2660 | ||
2661 | ret = __locked_fifo_preempt(g, tsgid, true); | 2661 | ret = __locked_fifo_preempt(g, tsgid, true); |
2662 | 2662 | ||
2663 | if (!mutex_ret) | 2663 | if (!mutex_ret) |
2664 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2664 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2665 | 2665 | ||
2666 | for (i = 0; i < g->fifo.max_runlists; i++) | 2666 | for (i = 0; i < g->fifo.max_runlists; i++) |
2667 | nvgpu_mutex_release(&f->runlist_info[i].mutex); | 2667 | nvgpu_mutex_release(&f->runlist_info[i].mutex); |
@@ -2718,12 +2718,12 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, | |||
2718 | } | 2718 | } |
2719 | } | 2719 | } |
2720 | 2720 | ||
2721 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2721 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2722 | 2722 | ||
2723 | gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); | 2723 | gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); |
2724 | 2724 | ||
2725 | if (!mutex_ret) | 2725 | if (!mutex_ret) |
2726 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2726 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2727 | 2727 | ||
2728 | if (!is_runlist_info_mutex_locked) { | 2728 | if (!is_runlist_info_mutex_locked) { |
2729 | gk20a_dbg_info("release runlist_info mutex"); | 2729 | gk20a_dbg_info("release runlist_info mutex"); |
@@ -2792,7 +2792,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2792 | fifo_engine_status_engine_busy_v() && !wait_for_idle) | 2792 | fifo_engine_status_engine_busy_v() && !wait_for_idle) |
2793 | return -EBUSY; | 2793 | return -EBUSY; |
2794 | 2794 | ||
2795 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2795 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2796 | 2796 | ||
2797 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( | 2797 | gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( |
2798 | eng_info->runlist_id), RUNLIST_DISABLED, | 2798 | eng_info->runlist_id), RUNLIST_DISABLED, |
@@ -2832,7 +2832,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2832 | 2832 | ||
2833 | clean_up: | 2833 | clean_up: |
2834 | if (!mutex_ret) | 2834 | if (!mutex_ret) |
2835 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2835 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2836 | 2836 | ||
2837 | if (err) { | 2837 | if (err) { |
2838 | gk20a_dbg_fn("failed"); | 2838 | gk20a_dbg_fn("failed"); |
@@ -3300,13 +3300,13 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, | |||
3300 | 3300 | ||
3301 | nvgpu_mutex_acquire(&runlist->mutex); | 3301 | nvgpu_mutex_acquire(&runlist->mutex); |
3302 | 3302 | ||
3303 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 3303 | mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
3304 | 3304 | ||
3305 | ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, | 3305 | ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, |
3306 | wait_for_finish); | 3306 | wait_for_finish); |
3307 | 3307 | ||
3308 | if (!mutex_ret) | 3308 | if (!mutex_ret) |
3309 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 3309 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
3310 | 3310 | ||
3311 | nvgpu_mutex_release(&runlist->mutex); | 3311 | nvgpu_mutex_release(&runlist->mutex); |
3312 | return ret; | 3312 | return ret; |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index b4884af1..269f0d68 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -737,6 +737,14 @@ struct gpu_ops { | |||
737 | u32 (*pmu_get_queue_head_size)(void); | 737 | u32 (*pmu_get_queue_head_size)(void); |
738 | u32 (*pmu_get_queue_tail_size)(void); | 738 | u32 (*pmu_get_queue_tail_size)(void); |
739 | u32 (*pmu_get_queue_tail)(u32 i); | 739 | u32 (*pmu_get_queue_tail)(u32 i); |
740 | int (*pmu_queue_head)(struct nvgpu_pmu *pmu, | ||
741 | struct pmu_queue *queue, u32 *head, bool set); | ||
742 | int (*pmu_queue_tail)(struct nvgpu_pmu *pmu, | ||
743 | struct pmu_queue *queue, u32 *tail, bool set); | ||
744 | int (*pmu_mutex_acquire)(struct nvgpu_pmu *pmu, | ||
745 | u32 id, u32 *token); | ||
746 | int (*pmu_mutex_release)(struct nvgpu_pmu *pmu, | ||
747 | u32 id, u32 *token); | ||
740 | int (*init_wpr_region)(struct gk20a *g); | 748 | int (*init_wpr_region)(struct gk20a *g); |
741 | int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask); | 749 | int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask); |
742 | void (*write_dmatrfbase)(struct gk20a *g, u32 addr); | 750 | void (*write_dmatrfbase)(struct gk20a *g, u32 addr); |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 4e416f67..deab46c8 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -2598,171 +2598,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu) | |||
2598 | return 0; | 2598 | return 0; |
2599 | } | 2599 | } |
2600 | 2600 | ||
2601 | void pmu_seq_init(struct nvgpu_pmu *pmu) | 2601 | int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2602 | { | ||
2603 | u32 i; | ||
2604 | |||
2605 | memset(pmu->seq, 0, | ||
2606 | sizeof(struct pmu_sequence) * PMU_MAX_NUM_SEQUENCES); | ||
2607 | memset(pmu->pmu_seq_tbl, 0, | ||
2608 | sizeof(pmu->pmu_seq_tbl)); | ||
2609 | |||
2610 | for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) | ||
2611 | pmu->seq[i].id = i; | ||
2612 | } | ||
2613 | |||
2614 | static int pmu_seq_acquire(struct nvgpu_pmu *pmu, | ||
2615 | struct pmu_sequence **pseq) | ||
2616 | { | ||
2617 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2618 | struct pmu_sequence *seq; | ||
2619 | u32 index; | ||
2620 | |||
2621 | nvgpu_mutex_acquire(&pmu->pmu_seq_lock); | ||
2622 | index = find_first_zero_bit(pmu->pmu_seq_tbl, | ||
2623 | sizeof(pmu->pmu_seq_tbl)); | ||
2624 | if (index >= sizeof(pmu->pmu_seq_tbl)) { | ||
2625 | nvgpu_err(g, "no free sequence available"); | ||
2626 | nvgpu_mutex_release(&pmu->pmu_seq_lock); | ||
2627 | return -EAGAIN; | ||
2628 | } | ||
2629 | set_bit(index, pmu->pmu_seq_tbl); | ||
2630 | nvgpu_mutex_release(&pmu->pmu_seq_lock); | ||
2631 | |||
2632 | seq = &pmu->seq[index]; | ||
2633 | seq->state = PMU_SEQ_STATE_PENDING; | ||
2634 | |||
2635 | *pseq = seq; | ||
2636 | return 0; | ||
2637 | } | ||
2638 | |||
2639 | static void pmu_seq_release(struct nvgpu_pmu *pmu, | ||
2640 | struct pmu_sequence *seq) | ||
2641 | { | ||
2642 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2643 | seq->state = PMU_SEQ_STATE_FREE; | ||
2644 | seq->desc = PMU_INVALID_SEQ_DESC; | ||
2645 | seq->callback = NULL; | ||
2646 | seq->cb_params = NULL; | ||
2647 | seq->msg = NULL; | ||
2648 | seq->out_payload = NULL; | ||
2649 | g->ops.pmu_ver.pmu_allocation_set_dmem_size(pmu, | ||
2650 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr(seq), 0); | ||
2651 | g->ops.pmu_ver.pmu_allocation_set_dmem_size(pmu, | ||
2652 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr(seq), 0); | ||
2653 | |||
2654 | clear_bit(seq->id, pmu->pmu_seq_tbl); | ||
2655 | } | ||
2656 | |||
2657 | static int pmu_queue_init(struct nvgpu_pmu *pmu, | ||
2658 | u32 id, union pmu_init_msg_pmu *init) | ||
2659 | { | ||
2660 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2661 | struct pmu_queue *queue = &pmu->queue[id]; | ||
2662 | int err; | ||
2663 | |||
2664 | err = nvgpu_mutex_init(&queue->mutex); | ||
2665 | if (err) | ||
2666 | return err; | ||
2667 | |||
2668 | queue->id = id; | ||
2669 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); | ||
2670 | queue->mutex_id = id; | ||
2671 | |||
2672 | gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x", | ||
2673 | id, queue->index, queue->offset, queue->size); | ||
2674 | |||
2675 | return 0; | ||
2676 | } | ||
2677 | |||
2678 | static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
2679 | u32 *head, bool set) | ||
2680 | { | ||
2681 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2682 | u32 queue_head_size = 0; | ||
2683 | |||
2684 | if (g->ops.pmu.pmu_get_queue_head_size) | ||
2685 | queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); | ||
2686 | |||
2687 | BUG_ON(!head || !queue_head_size); | ||
2688 | |||
2689 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { | ||
2690 | |||
2691 | if (queue->index >= queue_head_size) | ||
2692 | return -EINVAL; | ||
2693 | |||
2694 | if (!set) | ||
2695 | *head = pwr_pmu_queue_head_address_v( | ||
2696 | gk20a_readl(g, | ||
2697 | g->ops.pmu.pmu_get_queue_head(queue->index))); | ||
2698 | else | ||
2699 | gk20a_writel(g, | ||
2700 | g->ops.pmu.pmu_get_queue_head(queue->index), | ||
2701 | pwr_pmu_queue_head_address_f(*head)); | ||
2702 | } else { | ||
2703 | if (!set) | ||
2704 | *head = pwr_pmu_msgq_head_val_v( | ||
2705 | gk20a_readl(g, pwr_pmu_msgq_head_r())); | ||
2706 | else | ||
2707 | gk20a_writel(g, | ||
2708 | pwr_pmu_msgq_head_r(), | ||
2709 | pwr_pmu_msgq_head_val_f(*head)); | ||
2710 | } | ||
2711 | |||
2712 | return 0; | ||
2713 | } | ||
2714 | |||
2715 | static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
2716 | u32 *tail, bool set) | ||
2717 | { | ||
2718 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2719 | u32 queue_tail_size = 0; | ||
2720 | |||
2721 | if (g->ops.pmu.pmu_get_queue_tail_size) | ||
2722 | queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); | ||
2723 | |||
2724 | BUG_ON(!tail || !queue_tail_size); | ||
2725 | |||
2726 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { | ||
2727 | |||
2728 | if (queue->index >= queue_tail_size) | ||
2729 | return -EINVAL; | ||
2730 | |||
2731 | if (!set) | ||
2732 | *tail = pwr_pmu_queue_tail_address_v( | ||
2733 | gk20a_readl(g, | ||
2734 | g->ops.pmu.pmu_get_queue_tail(queue->index))); | ||
2735 | else | ||
2736 | gk20a_writel(g, | ||
2737 | g->ops.pmu.pmu_get_queue_tail(queue->index), | ||
2738 | pwr_pmu_queue_tail_address_f(*tail)); | ||
2739 | |||
2740 | } else { | ||
2741 | if (!set) | ||
2742 | *tail = pwr_pmu_msgq_tail_val_v( | ||
2743 | gk20a_readl(g, pwr_pmu_msgq_tail_r())); | ||
2744 | else | ||
2745 | gk20a_writel(g, | ||
2746 | pwr_pmu_msgq_tail_r(), | ||
2747 | pwr_pmu_msgq_tail_val_f(*tail)); | ||
2748 | } | ||
2749 | |||
2750 | return 0; | ||
2751 | } | ||
2752 | |||
2753 | static inline void pmu_queue_read(struct nvgpu_pmu *pmu, | ||
2754 | u32 offset, u8 *dst, u32 size) | ||
2755 | { | ||
2756 | pmu_copy_from_dmem(pmu, offset, dst, size, 0); | ||
2757 | } | ||
2758 | |||
2759 | static inline void pmu_queue_write(struct nvgpu_pmu *pmu, | ||
2760 | u32 offset, u8 *src, u32 size) | ||
2761 | { | ||
2762 | pmu_copy_to_dmem(pmu, offset, src, size, 0); | ||
2763 | } | ||
2764 | |||
2765 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) | ||
2766 | { | 2602 | { |
2767 | struct gk20a *g = gk20a_from_pmu(pmu); | 2603 | struct gk20a *g = gk20a_from_pmu(pmu); |
2768 | struct pmu_mutex *mutex; | 2604 | struct pmu_mutex *mutex; |
@@ -2831,7 +2667,7 @@ int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
2831 | return -EBUSY; | 2667 | return -EBUSY; |
2832 | } | 2668 | } |
2833 | 2669 | ||
2834 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | 2670 | int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2835 | { | 2671 | { |
2836 | struct gk20a *g = gk20a_from_pmu(pmu); | 2672 | struct gk20a *g = gk20a_from_pmu(pmu); |
2837 | struct pmu_mutex *mutex; | 2673 | struct pmu_mutex *mutex; |
@@ -2872,234 +2708,78 @@ int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
2872 | return 0; | 2708 | return 0; |
2873 | } | 2709 | } |
2874 | 2710 | ||
2875 | static int pmu_queue_lock(struct nvgpu_pmu *pmu, | 2711 | int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
2876 | struct pmu_queue *queue) | 2712 | u32 *head, bool set) |
2877 | { | ||
2878 | int err; | ||
2879 | |||
2880 | if (PMU_IS_MESSAGE_QUEUE(queue->id)) | ||
2881 | return 0; | ||
2882 | |||
2883 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | ||
2884 | nvgpu_mutex_acquire(&queue->mutex); | ||
2885 | return 0; | ||
2886 | } | ||
2887 | |||
2888 | err = pmu_mutex_acquire(pmu, queue->mutex_id, &queue->mutex_lock); | ||
2889 | return err; | ||
2890 | } | ||
2891 | |||
2892 | static int pmu_queue_unlock(struct nvgpu_pmu *pmu, | ||
2893 | struct pmu_queue *queue) | ||
2894 | { | ||
2895 | int err; | ||
2896 | |||
2897 | if (PMU_IS_MESSAGE_QUEUE(queue->id)) | ||
2898 | return 0; | ||
2899 | |||
2900 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | ||
2901 | nvgpu_mutex_release(&queue->mutex); | ||
2902 | return 0; | ||
2903 | } | ||
2904 | |||
2905 | err = pmu_mutex_release(pmu, queue->mutex_id, &queue->mutex_lock); | ||
2906 | return err; | ||
2907 | } | ||
2908 | |||
2909 | /* called by pmu_read_message, no lock */ | ||
2910 | static bool pmu_queue_is_empty(struct nvgpu_pmu *pmu, | ||
2911 | struct pmu_queue *queue) | ||
2912 | { | ||
2913 | u32 head, tail; | ||
2914 | |||
2915 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
2916 | if (queue->opened && queue->oflag == OFLAG_READ) | ||
2917 | tail = queue->position; | ||
2918 | else | ||
2919 | pmu_queue_tail(pmu, queue, &tail, QUEUE_GET); | ||
2920 | |||
2921 | return head == tail; | ||
2922 | } | ||
2923 | |||
2924 | static bool pmu_queue_has_room(struct nvgpu_pmu *pmu, | ||
2925 | struct pmu_queue *queue, u32 size, bool *need_rewind) | ||
2926 | { | ||
2927 | u32 head, tail; | ||
2928 | bool rewind = false; | ||
2929 | unsigned int free; | ||
2930 | |||
2931 | size = ALIGN(size, QUEUE_ALIGNMENT); | ||
2932 | |||
2933 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
2934 | pmu_queue_tail(pmu, queue, &tail, QUEUE_GET); | ||
2935 | if (head >= tail) { | ||
2936 | free = queue->offset + queue->size - head; | ||
2937 | free -= PMU_CMD_HDR_SIZE; | ||
2938 | |||
2939 | if (size > free) { | ||
2940 | rewind = true; | ||
2941 | head = queue->offset; | ||
2942 | } | ||
2943 | } | ||
2944 | |||
2945 | if (head < tail) | ||
2946 | free = tail - head - 1; | ||
2947 | |||
2948 | if (need_rewind) | ||
2949 | *need_rewind = rewind; | ||
2950 | |||
2951 | return size <= free; | ||
2952 | } | ||
2953 | |||
2954 | static int pmu_queue_push(struct nvgpu_pmu *pmu, | ||
2955 | struct pmu_queue *queue, void *data, u32 size) | ||
2956 | { | ||
2957 | |||
2958 | gk20a_dbg_fn(""); | ||
2959 | |||
2960 | if (!queue->opened && queue->oflag == OFLAG_WRITE){ | ||
2961 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write"); | ||
2962 | return -EINVAL; | ||
2963 | } | ||
2964 | |||
2965 | pmu_queue_write(pmu, queue->position, data, size); | ||
2966 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | ||
2967 | return 0; | ||
2968 | } | ||
2969 | |||
2970 | static int pmu_queue_pop(struct nvgpu_pmu *pmu, | ||
2971 | struct pmu_queue *queue, void *data, u32 size, | ||
2972 | u32 *bytes_read) | ||
2973 | { | 2713 | { |
2974 | u32 head, tail, used; | 2714 | struct gk20a *g = gk20a_from_pmu(pmu); |
2975 | 2715 | u32 queue_head_size = 0; | |
2976 | *bytes_read = 0; | ||
2977 | |||
2978 | if (!queue->opened && queue->oflag == OFLAG_READ){ | ||
2979 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read"); | ||
2980 | return -EINVAL; | ||
2981 | } | ||
2982 | |||
2983 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
2984 | tail = queue->position; | ||
2985 | |||
2986 | if (head == tail) | ||
2987 | return 0; | ||
2988 | |||
2989 | if (head > tail) | ||
2990 | used = head - tail; | ||
2991 | else | ||
2992 | used = queue->offset + queue->size - tail; | ||
2993 | |||
2994 | if (size > used) { | ||
2995 | nvgpu_warn(gk20a_from_pmu(pmu), | ||
2996 | "queue size smaller than request read"); | ||
2997 | size = used; | ||
2998 | } | ||
2999 | 2716 | ||
3000 | pmu_queue_read(pmu, tail, data, size); | 2717 | if (g->ops.pmu.pmu_get_queue_head_size) |
3001 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | 2718 | queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); |
3002 | *bytes_read = size; | ||
3003 | return 0; | ||
3004 | } | ||
3005 | 2719 | ||
3006 | static void pmu_queue_rewind(struct nvgpu_pmu *pmu, | 2720 | BUG_ON(!head || !queue_head_size); |
3007 | struct pmu_queue *queue) | ||
3008 | { | ||
3009 | struct pmu_cmd cmd; | ||
3010 | 2721 | ||
3011 | gk20a_dbg_fn(""); | 2722 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { |
3012 | 2723 | ||
3013 | if (!queue->opened) { | 2724 | if (queue->index >= queue_head_size) |
3014 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened"); | 2725 | return -EINVAL; |
3015 | return; | ||
3016 | } | ||
3017 | 2726 | ||
3018 | if (queue->oflag == OFLAG_WRITE) { | 2727 | if (!set) |
3019 | cmd.hdr.unit_id = PMU_UNIT_REWIND; | 2728 | *head = pwr_pmu_queue_head_address_v( |
3020 | cmd.hdr.size = PMU_CMD_HDR_SIZE; | 2729 | gk20a_readl(g, |
3021 | pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size); | 2730 | g->ops.pmu.pmu_get_queue_head(queue->index))); |
3022 | gk20a_dbg_pmu("queue %d rewinded", queue->id); | 2731 | else |
2732 | gk20a_writel(g, | ||
2733 | g->ops.pmu.pmu_get_queue_head(queue->index), | ||
2734 | pwr_pmu_queue_head_address_f(*head)); | ||
2735 | } else { | ||
2736 | if (!set) | ||
2737 | *head = pwr_pmu_msgq_head_val_v( | ||
2738 | gk20a_readl(g, pwr_pmu_msgq_head_r())); | ||
2739 | else | ||
2740 | gk20a_writel(g, | ||
2741 | pwr_pmu_msgq_head_r(), | ||
2742 | pwr_pmu_msgq_head_val_f(*head)); | ||
3023 | } | 2743 | } |
3024 | 2744 | ||
3025 | queue->position = queue->offset; | ||
3026 | return; | ||
3027 | } | ||
3028 | |||
3029 | /* open for read and lock the queue */ | ||
3030 | static int pmu_queue_open_read(struct nvgpu_pmu *pmu, | ||
3031 | struct pmu_queue *queue) | ||
3032 | { | ||
3033 | int err; | ||
3034 | |||
3035 | err = pmu_queue_lock(pmu, queue); | ||
3036 | if (err) | ||
3037 | return err; | ||
3038 | |||
3039 | if (queue->opened) | ||
3040 | BUG(); | ||
3041 | |||
3042 | pmu_queue_tail(pmu, queue, &queue->position, QUEUE_GET); | ||
3043 | queue->oflag = OFLAG_READ; | ||
3044 | queue->opened = true; | ||
3045 | |||
3046 | return 0; | 2745 | return 0; |
3047 | } | 2746 | } |
3048 | 2747 | ||
3049 | /* open for write and lock the queue | 2748 | int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
3050 | make sure there's enough free space for the write */ | 2749 | u32 *tail, bool set) |
3051 | static int pmu_queue_open_write(struct nvgpu_pmu *pmu, | ||
3052 | struct pmu_queue *queue, u32 size) | ||
3053 | { | 2750 | { |
3054 | bool rewind = false; | 2751 | struct gk20a *g = gk20a_from_pmu(pmu); |
3055 | int err; | 2752 | u32 queue_tail_size = 0; |
3056 | |||
3057 | err = pmu_queue_lock(pmu, queue); | ||
3058 | if (err) | ||
3059 | return err; | ||
3060 | |||
3061 | if (queue->opened) | ||
3062 | BUG(); | ||
3063 | 2753 | ||
3064 | if (!pmu_queue_has_room(pmu, queue, size, &rewind)) { | 2754 | if (g->ops.pmu.pmu_get_queue_tail_size) |
3065 | gk20a_dbg_pmu("queue full: queue-id %d: index %d", | 2755 | queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); |
3066 | queue->id, queue->index); | ||
3067 | pmu_queue_unlock(pmu, queue); | ||
3068 | return -EAGAIN; | ||
3069 | } | ||
3070 | 2756 | ||
3071 | pmu_queue_head(pmu, queue, &queue->position, QUEUE_GET); | 2757 | BUG_ON(!tail || !queue_tail_size); |
3072 | queue->oflag = OFLAG_WRITE; | ||
3073 | queue->opened = true; | ||
3074 | 2758 | ||
3075 | if (rewind) | 2759 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { |
3076 | pmu_queue_rewind(pmu, queue); | ||
3077 | 2760 | ||
3078 | return 0; | 2761 | if (queue->index >= queue_tail_size) |
3079 | } | 2762 | return -EINVAL; |
3080 | 2763 | ||
3081 | /* close and unlock the queue */ | 2764 | if (!set) |
3082 | static int pmu_queue_close(struct nvgpu_pmu *pmu, | 2765 | *tail = pwr_pmu_queue_tail_address_v( |
3083 | struct pmu_queue *queue, bool commit) | 2766 | gk20a_readl(g, |
3084 | { | 2767 | g->ops.pmu.pmu_get_queue_tail(queue->index))); |
3085 | if (!queue->opened) | 2768 | else |
3086 | return 0; | 2769 | gk20a_writel(g, |
2770 | g->ops.pmu.pmu_get_queue_tail(queue->index), | ||
2771 | pwr_pmu_queue_tail_address_f(*tail)); | ||
3087 | 2772 | ||
3088 | if (commit) { | 2773 | } else { |
3089 | if (queue->oflag == OFLAG_READ) { | 2774 | if (!set) |
3090 | pmu_queue_tail(pmu, queue, | 2775 | *tail = pwr_pmu_msgq_tail_val_v( |
3091 | &queue->position, QUEUE_SET); | 2776 | gk20a_readl(g, pwr_pmu_msgq_tail_r())); |
3092 | } | 2777 | else |
3093 | else { | 2778 | gk20a_writel(g, |
3094 | pmu_queue_head(pmu, queue, | 2779 | pwr_pmu_msgq_tail_r(), |
3095 | &queue->position, QUEUE_SET); | 2780 | pwr_pmu_msgq_tail_val_f(*tail)); |
3096 | } | ||
3097 | } | 2781 | } |
3098 | 2782 | ||
3099 | queue->opened = false; | ||
3100 | |||
3101 | pmu_queue_unlock(pmu, queue); | ||
3102 | |||
3103 | return 0; | 2783 | return 0; |
3104 | } | 2784 | } |
3105 | 2785 | ||
@@ -3193,7 +2873,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) | |||
3193 | pmu->mutex[i].id = i; | 2873 | pmu->mutex[i].id = i; |
3194 | pmu->mutex[i].index = i; | 2874 | pmu->mutex[i].index = i; |
3195 | } | 2875 | } |
3196 | pmu_seq_init(pmu); | 2876 | nvgpu_pmu_seq_init(pmu); |
3197 | 2877 | ||
3198 | gk20a_dbg_fn("skip init"); | 2878 | gk20a_dbg_fn("skip init"); |
3199 | goto skip_init; | 2879 | goto skip_init; |
@@ -3226,7 +2906,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) | |||
3226 | goto err_free_mutex; | 2906 | goto err_free_mutex; |
3227 | } | 2907 | } |
3228 | 2908 | ||
3229 | pmu_seq_init(pmu); | 2909 | nvgpu_pmu_seq_init(pmu); |
3230 | 2910 | ||
3231 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, | 2911 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, |
3232 | &pmu->seq_buf); | 2912 | &pmu->seq_buf); |
@@ -3572,6 +3252,10 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops) | |||
3572 | gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v; | 3252 | gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v; |
3573 | gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r; | 3253 | gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r; |
3574 | gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v; | 3254 | gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v; |
3255 | gops->pmu.pmu_queue_head = gk20a_pmu_queue_head; | ||
3256 | gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail; | ||
3257 | gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire; | ||
3258 | gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release; | ||
3575 | gops->pmu.pmu_setup_elpg = NULL; | 3259 | gops->pmu.pmu_setup_elpg = NULL; |
3576 | gops->pmu.init_wpr_region = NULL; | 3260 | gops->pmu.init_wpr_region = NULL; |
3577 | gops->pmu.load_lsfalcon_ucode = NULL; | 3261 | gops->pmu.load_lsfalcon_ucode = NULL; |
@@ -3829,7 +3513,7 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu) | |||
3829 | return unit_id; | 3513 | return unit_id; |
3830 | } | 3514 | } |
3831 | 3515 | ||
3832 | static int pmu_init_perfmon(struct nvgpu_pmu *pmu) | 3516 | int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) |
3833 | { | 3517 | { |
3834 | struct gk20a *g = gk20a_from_pmu(pmu); | 3518 | struct gk20a *g = gk20a_from_pmu(pmu); |
3835 | struct pmu_v *pv = &g->ops.pmu_ver; | 3519 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -3929,7 +3613,7 @@ static int pmu_init_perfmon(struct nvgpu_pmu *pmu) | |||
3929 | return 0; | 3613 | return 0; |
3930 | } | 3614 | } |
3931 | 3615 | ||
3932 | static int pmu_process_init_msg(struct nvgpu_pmu *pmu, | 3616 | int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu, |
3933 | struct pmu_msg *msg) | 3617 | struct pmu_msg *msg) |
3934 | { | 3618 | { |
3935 | struct gk20a *g = gk20a_from_pmu(pmu); | 3619 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -3983,7 +3667,7 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu, | |||
3983 | } | 3667 | } |
3984 | 3668 | ||
3985 | for (i = 0; i < PMU_QUEUE_COUNT; i++) | 3669 | for (i = 0; i < PMU_QUEUE_COUNT; i++) |
3986 | pmu_queue_init(pmu, i, init); | 3670 | nvgpu_pmu_queue_init(pmu, i, init); |
3987 | 3671 | ||
3988 | if (!nvgpu_alloc_initialized(&pmu->dmem)) { | 3672 | if (!nvgpu_alloc_initialized(&pmu->dmem)) { |
3989 | /* Align start and end addresses */ | 3673 | /* Align start and end addresses */ |
@@ -4007,169 +3691,6 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu, | |||
4007 | return 0; | 3691 | return 0; |
4008 | } | 3692 | } |
4009 | 3693 | ||
4010 | static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
4011 | struct pmu_msg *msg, int *status) | ||
4012 | { | ||
4013 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4014 | u32 read_size, bytes_read; | ||
4015 | int err; | ||
4016 | |||
4017 | *status = 0; | ||
4018 | |||
4019 | if (pmu_queue_is_empty(pmu, queue)) | ||
4020 | return false; | ||
4021 | |||
4022 | err = pmu_queue_open_read(pmu, queue); | ||
4023 | if (err) { | ||
4024 | nvgpu_err(g, "fail to open queue %d for read", queue->id); | ||
4025 | *status = err; | ||
4026 | return false; | ||
4027 | } | ||
4028 | |||
4029 | err = pmu_queue_pop(pmu, queue, &msg->hdr, | ||
4030 | PMU_MSG_HDR_SIZE, &bytes_read); | ||
4031 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | ||
4032 | nvgpu_err(g, "fail to read msg from queue %d", queue->id); | ||
4033 | *status = err | -EINVAL; | ||
4034 | goto clean_up; | ||
4035 | } | ||
4036 | |||
4037 | if (msg->hdr.unit_id == PMU_UNIT_REWIND) { | ||
4038 | pmu_queue_rewind(pmu, queue); | ||
4039 | /* read again after rewind */ | ||
4040 | err = pmu_queue_pop(pmu, queue, &msg->hdr, | ||
4041 | PMU_MSG_HDR_SIZE, &bytes_read); | ||
4042 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | ||
4043 | nvgpu_err(g, | ||
4044 | "fail to read msg from queue %d", queue->id); | ||
4045 | *status = err | -EINVAL; | ||
4046 | goto clean_up; | ||
4047 | } | ||
4048 | } | ||
4049 | |||
4050 | if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) { | ||
4051 | nvgpu_err(g, "read invalid unit_id %d from queue %d", | ||
4052 | msg->hdr.unit_id, queue->id); | ||
4053 | *status = -EINVAL; | ||
4054 | goto clean_up; | ||
4055 | } | ||
4056 | |||
4057 | if (msg->hdr.size > PMU_MSG_HDR_SIZE) { | ||
4058 | read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; | ||
4059 | err = pmu_queue_pop(pmu, queue, &msg->msg, | ||
4060 | read_size, &bytes_read); | ||
4061 | if (err || bytes_read != read_size) { | ||
4062 | nvgpu_err(g, | ||
4063 | "fail to read msg from queue %d", queue->id); | ||
4064 | *status = err; | ||
4065 | goto clean_up; | ||
4066 | } | ||
4067 | } | ||
4068 | |||
4069 | err = pmu_queue_close(pmu, queue, true); | ||
4070 | if (err) { | ||
4071 | nvgpu_err(g, "fail to close queue %d", queue->id); | ||
4072 | *status = err; | ||
4073 | return false; | ||
4074 | } | ||
4075 | |||
4076 | return true; | ||
4077 | |||
4078 | clean_up: | ||
4079 | err = pmu_queue_close(pmu, queue, false); | ||
4080 | if (err) | ||
4081 | nvgpu_err(g, "fail to close queue %d", queue->id); | ||
4082 | return false; | ||
4083 | } | ||
4084 | |||
4085 | static int pmu_response_handle(struct nvgpu_pmu *pmu, | ||
4086 | struct pmu_msg *msg) | ||
4087 | { | ||
4088 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4089 | struct pmu_sequence *seq; | ||
4090 | struct pmu_v *pv = &g->ops.pmu_ver; | ||
4091 | int ret = 0; | ||
4092 | |||
4093 | gk20a_dbg_fn(""); | ||
4094 | |||
4095 | seq = &pmu->seq[msg->hdr.seq_id]; | ||
4096 | if (seq->state != PMU_SEQ_STATE_USED && | ||
4097 | seq->state != PMU_SEQ_STATE_CANCELLED) { | ||
4098 | nvgpu_err(g, "msg for an unknown sequence %d", seq->id); | ||
4099 | return -EINVAL; | ||
4100 | } | ||
4101 | |||
4102 | if (msg->hdr.unit_id == PMU_UNIT_RC && | ||
4103 | msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) { | ||
4104 | nvgpu_err(g, "unhandled cmd: seq %d", seq->id); | ||
4105 | } | ||
4106 | else if (seq->state != PMU_SEQ_STATE_CANCELLED) { | ||
4107 | if (seq->msg) { | ||
4108 | if (seq->msg->hdr.size >= msg->hdr.size) { | ||
4109 | memcpy(seq->msg, msg, msg->hdr.size); | ||
4110 | } else { | ||
4111 | nvgpu_err(g, "sequence %d msg buffer too small", | ||
4112 | seq->id); | ||
4113 | } | ||
4114 | } | ||
4115 | if (pv->pmu_allocation_get_dmem_size(pmu, | ||
4116 | pv->get_pmu_seq_out_a_ptr(seq)) != 0) { | ||
4117 | pmu_copy_from_dmem(pmu, | ||
4118 | pv->pmu_allocation_get_dmem_offset(pmu, | ||
4119 | pv->get_pmu_seq_out_a_ptr(seq)), | ||
4120 | seq->out_payload, | ||
4121 | pv->pmu_allocation_get_dmem_size(pmu, | ||
4122 | pv->get_pmu_seq_out_a_ptr(seq)), 0); | ||
4123 | } | ||
4124 | } else | ||
4125 | seq->callback = NULL; | ||
4126 | if (pv->pmu_allocation_get_dmem_size(pmu, | ||
4127 | pv->get_pmu_seq_in_a_ptr(seq)) != 0) | ||
4128 | nvgpu_free(&pmu->dmem, | ||
4129 | pv->pmu_allocation_get_dmem_offset(pmu, | ||
4130 | pv->get_pmu_seq_in_a_ptr(seq))); | ||
4131 | if (pv->pmu_allocation_get_dmem_size(pmu, | ||
4132 | pv->get_pmu_seq_out_a_ptr(seq)) != 0) | ||
4133 | nvgpu_free(&pmu->dmem, | ||
4134 | pv->pmu_allocation_get_dmem_offset(pmu, | ||
4135 | pv->get_pmu_seq_out_a_ptr(seq))); | ||
4136 | |||
4137 | if (seq->out_mem != NULL) { | ||
4138 | memset(pv->pmu_allocation_get_fb_addr(pmu, | ||
4139 | pv->get_pmu_seq_out_a_ptr(seq)), 0x0, | ||
4140 | pv->pmu_allocation_get_fb_size(pmu, | ||
4141 | pv->get_pmu_seq_out_a_ptr(seq))); | ||
4142 | |||
4143 | gk20a_pmu_surface_free(g, seq->out_mem); | ||
4144 | if (seq->out_mem != seq->in_mem) | ||
4145 | nvgpu_kfree(g, seq->out_mem); | ||
4146 | else | ||
4147 | seq->out_mem = NULL; | ||
4148 | } | ||
4149 | |||
4150 | if (seq->in_mem != NULL) { | ||
4151 | memset(pv->pmu_allocation_get_fb_addr(pmu, | ||
4152 | pv->get_pmu_seq_in_a_ptr(seq)), 0x0, | ||
4153 | pv->pmu_allocation_get_fb_size(pmu, | ||
4154 | pv->get_pmu_seq_in_a_ptr(seq))); | ||
4155 | |||
4156 | gk20a_pmu_surface_free(g, seq->in_mem); | ||
4157 | nvgpu_kfree(g, seq->in_mem); | ||
4158 | seq->in_mem = NULL; | ||
4159 | } | ||
4160 | |||
4161 | if (seq->callback) | ||
4162 | seq->callback(g, msg, seq->cb_params, seq->desc, ret); | ||
4163 | |||
4164 | pmu_seq_release(pmu, seq); | ||
4165 | |||
4166 | /* TBD: notify client waiting for available dmem */ | ||
4167 | |||
4168 | gk20a_dbg_fn("done"); | ||
4169 | |||
4170 | return 0; | ||
4171 | } | ||
4172 | |||
4173 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, | 3694 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, |
4174 | void *param, u32 handle, u32 status) | 3695 | void *param, u32 handle, u32 status) |
4175 | { | 3696 | { |
@@ -4266,7 +3787,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) | |||
4266 | return 0; | 3787 | return 0; |
4267 | } | 3788 | } |
4268 | 3789 | ||
4269 | static int pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, | 3790 | int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, |
4270 | struct pmu_perfmon_msg *msg) | 3791 | struct pmu_perfmon_msg *msg) |
4271 | { | 3792 | { |
4272 | gk20a_dbg_fn(""); | 3793 | gk20a_dbg_fn(""); |
@@ -4298,8 +3819,7 @@ static int pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, | |||
4298 | return 0; | 3819 | return 0; |
4299 | } | 3820 | } |
4300 | 3821 | ||
4301 | 3822 | int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, | |
4302 | static int pmu_handle_therm_event(struct nvgpu_pmu *pmu, | ||
4303 | struct nv_pmu_therm_msg *msg) | 3823 | struct nv_pmu_therm_msg *msg) |
4304 | { | 3824 | { |
4305 | gk20a_dbg_fn(""); | 3825 | gk20a_dbg_fn(""); |
@@ -4323,99 +3843,6 @@ static int pmu_handle_therm_event(struct nvgpu_pmu *pmu, | |||
4323 | return 0; | 3843 | return 0; |
4324 | } | 3844 | } |
4325 | 3845 | ||
4326 | static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) | ||
4327 | { | ||
4328 | int err = 0; | ||
4329 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4330 | |||
4331 | gk20a_dbg_fn(""); | ||
4332 | switch (msg->hdr.unit_id) { | ||
4333 | case PMU_UNIT_PERFMON: | ||
4334 | case PMU_UNIT_PERFMON_T18X: | ||
4335 | err = pmu_handle_perfmon_event(pmu, &msg->msg.perfmon); | ||
4336 | break; | ||
4337 | case PMU_UNIT_PERF: | ||
4338 | if (g->ops.perf.handle_pmu_perf_event != NULL) { | ||
4339 | err = g->ops.perf.handle_pmu_perf_event(g, | ||
4340 | (void *)&msg->msg.perf); | ||
4341 | } else { | ||
4342 | WARN_ON(1); | ||
4343 | } | ||
4344 | break; | ||
4345 | case PMU_UNIT_THERM: | ||
4346 | err = pmu_handle_therm_event(pmu, &msg->msg.therm); | ||
4347 | break; | ||
4348 | default: | ||
4349 | break; | ||
4350 | } | ||
4351 | |||
4352 | return err; | ||
4353 | } | ||
4354 | |||
4355 | static int pmu_process_message(struct nvgpu_pmu *pmu) | ||
4356 | { | ||
4357 | struct pmu_msg msg; | ||
4358 | int status; | ||
4359 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4360 | |||
4361 | if (unlikely(!pmu->pmu_ready)) { | ||
4362 | pmu_process_init_msg(pmu, &msg); | ||
4363 | if (g->ops.pmu.init_wpr_region != NULL) | ||
4364 | g->ops.pmu.init_wpr_region(g); | ||
4365 | pmu_init_perfmon(pmu); | ||
4366 | |||
4367 | return 0; | ||
4368 | } | ||
4369 | |||
4370 | while (pmu_read_message(pmu, | ||
4371 | &pmu->queue[PMU_MESSAGE_QUEUE], &msg, &status)) { | ||
4372 | |||
4373 | gk20a_dbg_pmu("read msg hdr: " | ||
4374 | "unit_id = 0x%08x, size = 0x%08x, " | ||
4375 | "ctrl_flags = 0x%08x, seq_id = 0x%08x", | ||
4376 | msg.hdr.unit_id, msg.hdr.size, | ||
4377 | msg.hdr.ctrl_flags, msg.hdr.seq_id); | ||
4378 | |||
4379 | msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK; | ||
4380 | |||
4381 | if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) { | ||
4382 | pmu_handle_event(pmu, &msg); | ||
4383 | } else { | ||
4384 | pmu_response_handle(pmu, &msg); | ||
4385 | } | ||
4386 | } | ||
4387 | |||
4388 | return 0; | ||
4389 | } | ||
4390 | |||
4391 | int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, | ||
4392 | u32 *var, u32 val) | ||
4393 | { | ||
4394 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4395 | struct nvgpu_timeout timeout; | ||
4396 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | ||
4397 | u32 servicedpmuint; | ||
4398 | |||
4399 | servicedpmuint = pwr_falcon_irqstat_halt_true_f() | | ||
4400 | pwr_falcon_irqstat_exterr_true_f() | | ||
4401 | pwr_falcon_irqstat_swgen0_true_f(); | ||
4402 | |||
4403 | nvgpu_timeout_init(g, &timeout, (int)timeout_ms, NVGPU_TIMER_CPU_TIMER); | ||
4404 | |||
4405 | do { | ||
4406 | if (*var == val) | ||
4407 | return 0; | ||
4408 | |||
4409 | if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) | ||
4410 | gk20a_pmu_isr(g); | ||
4411 | |||
4412 | nvgpu_usleep_range(delay, delay * 2); | ||
4413 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | ||
4414 | } while (!nvgpu_timeout_expired(&timeout)); | ||
4415 | |||
4416 | return -ETIMEDOUT; | ||
4417 | } | ||
4418 | |||
4419 | static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) | 3846 | static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) |
4420 | { | 3847 | { |
4421 | struct gk20a *g = gk20a_from_pmu(pmu); | 3848 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4613,6 +4040,21 @@ void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) | |||
4613 | printtrace(pmu); | 4040 | printtrace(pmu); |
4614 | } | 4041 | } |
4615 | 4042 | ||
4043 | bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) | ||
4044 | { | ||
4045 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4046 | u32 servicedpmuint; | ||
4047 | |||
4048 | servicedpmuint = pwr_falcon_irqstat_halt_true_f() | | ||
4049 | pwr_falcon_irqstat_exterr_true_f() | | ||
4050 | pwr_falcon_irqstat_swgen0_true_f(); | ||
4051 | |||
4052 | if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) | ||
4053 | return true; | ||
4054 | |||
4055 | return false; | ||
4056 | } | ||
4057 | |||
4616 | void gk20a_pmu_isr(struct gk20a *g) | 4058 | void gk20a_pmu_isr(struct gk20a *g) |
4617 | { | 4059 | { |
4618 | struct nvgpu_pmu *pmu = &g->pmu; | 4060 | struct nvgpu_pmu *pmu = &g->pmu; |
@@ -4661,7 +4103,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4661 | ~pwr_falcon_exterrstat_valid_m()); | 4103 | ~pwr_falcon_exterrstat_valid_m()); |
4662 | } | 4104 | } |
4663 | if (intr & pwr_falcon_irqstat_swgen0_true_f()) { | 4105 | if (intr & pwr_falcon_irqstat_swgen0_true_f()) { |
4664 | pmu_process_message(pmu); | 4106 | nvgpu_pmu_process_message(pmu); |
4665 | recheck = true; | 4107 | recheck = true; |
4666 | } | 4108 | } |
4667 | 4109 | ||
@@ -4669,7 +4111,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4669 | 4111 | ||
4670 | if (recheck) { | 4112 | if (recheck) { |
4671 | queue = &pmu->queue[PMU_MESSAGE_QUEUE]; | 4113 | queue = &pmu->queue[PMU_MESSAGE_QUEUE]; |
4672 | if (!pmu_queue_is_empty(pmu, queue)) | 4114 | if (!nvgpu_pmu_queue_is_empty(pmu, queue)) |
4673 | gk20a_writel(g, pwr_falcon_irqsset_r(), | 4115 | gk20a_writel(g, pwr_falcon_irqsset_r(), |
4674 | pwr_falcon_irqsset_swgen0_set_f()); | 4116 | pwr_falcon_irqsset_swgen0_set_f()); |
4675 | } | 4117 | } |
@@ -4677,114 +4119,6 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4677 | nvgpu_mutex_release(&pmu->isr_mutex); | 4119 | nvgpu_mutex_release(&pmu->isr_mutex); |
4678 | } | 4120 | } |
4679 | 4121 | ||
4680 | static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | ||
4681 | struct pmu_msg *msg, struct pmu_payload *payload, | ||
4682 | u32 queue_id) | ||
4683 | { | ||
4684 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4685 | struct pmu_queue *queue; | ||
4686 | u32 in_size, out_size; | ||
4687 | |||
4688 | if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) | ||
4689 | goto invalid_cmd; | ||
4690 | |||
4691 | queue = &pmu->queue[queue_id]; | ||
4692 | if (cmd->hdr.size < PMU_CMD_HDR_SIZE) | ||
4693 | goto invalid_cmd; | ||
4694 | |||
4695 | if (cmd->hdr.size > (queue->size >> 1)) | ||
4696 | goto invalid_cmd; | ||
4697 | |||
4698 | if (msg != NULL && msg->hdr.size < PMU_MSG_HDR_SIZE) | ||
4699 | goto invalid_cmd; | ||
4700 | |||
4701 | if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) | ||
4702 | goto invalid_cmd; | ||
4703 | |||
4704 | if (payload == NULL) | ||
4705 | return true; | ||
4706 | |||
4707 | if (payload->in.buf == NULL && payload->out.buf == NULL) | ||
4708 | goto invalid_cmd; | ||
4709 | |||
4710 | if ((payload->in.buf != NULL && payload->in.size == 0) || | ||
4711 | (payload->out.buf != NULL && payload->out.size == 0)) | ||
4712 | goto invalid_cmd; | ||
4713 | |||
4714 | in_size = PMU_CMD_HDR_SIZE; | ||
4715 | if (payload->in.buf) { | ||
4716 | in_size += payload->in.offset; | ||
4717 | in_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu); | ||
4718 | } | ||
4719 | |||
4720 | out_size = PMU_CMD_HDR_SIZE; | ||
4721 | if (payload->out.buf) { | ||
4722 | out_size += payload->out.offset; | ||
4723 | out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu); | ||
4724 | } | ||
4725 | |||
4726 | if (in_size > cmd->hdr.size || out_size > cmd->hdr.size) | ||
4727 | goto invalid_cmd; | ||
4728 | |||
4729 | |||
4730 | if ((payload->in.offset != 0 && payload->in.buf == NULL) || | ||
4731 | (payload->out.offset != 0 && payload->out.buf == NULL)) | ||
4732 | goto invalid_cmd; | ||
4733 | |||
4734 | return true; | ||
4735 | |||
4736 | invalid_cmd: | ||
4737 | nvgpu_err(g, "invalid pmu cmd :" | ||
4738 | "queue_id=%d,\n" | ||
4739 | "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" | ||
4740 | "payload in=%p, in_size=%d, in_offset=%d,\n" | ||
4741 | "payload out=%p, out_size=%d, out_offset=%d", | ||
4742 | queue_id, cmd->hdr.size, cmd->hdr.unit_id, | ||
4743 | msg, msg?msg->hdr.unit_id:~0, | ||
4744 | &payload->in, payload->in.size, payload->in.offset, | ||
4745 | &payload->out, payload->out.size, payload->out.offset); | ||
4746 | |||
4747 | return false; | ||
4748 | } | ||
4749 | |||
4750 | static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | ||
4751 | u32 queue_id, unsigned long timeout_ms) | ||
4752 | { | ||
4753 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
4754 | struct pmu_queue *queue; | ||
4755 | struct nvgpu_timeout timeout; | ||
4756 | int err; | ||
4757 | |||
4758 | gk20a_dbg_fn(""); | ||
4759 | |||
4760 | queue = &pmu->queue[queue_id]; | ||
4761 | nvgpu_timeout_init(g, &timeout, (int)timeout_ms, NVGPU_TIMER_CPU_TIMER); | ||
4762 | |||
4763 | do { | ||
4764 | err = pmu_queue_open_write(pmu, queue, cmd->hdr.size); | ||
4765 | if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) | ||
4766 | nvgpu_usleep_range(1000, 2000); | ||
4767 | else | ||
4768 | break; | ||
4769 | } while (1); | ||
4770 | |||
4771 | if (err) | ||
4772 | goto clean_up; | ||
4773 | |||
4774 | pmu_queue_push(pmu, queue, cmd, cmd->hdr.size); | ||
4775 | |||
4776 | |||
4777 | err = pmu_queue_close(pmu, queue, true); | ||
4778 | |||
4779 | clean_up: | ||
4780 | if (err) | ||
4781 | nvgpu_err(g, "fail to write cmd to queue %d", queue_id); | ||
4782 | else | ||
4783 | gk20a_dbg_fn("done"); | ||
4784 | |||
4785 | return err; | ||
4786 | } | ||
4787 | |||
4788 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, | 4122 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, |
4789 | struct flcn_mem_desc_v0 *fb) | 4123 | struct flcn_mem_desc_v0 *fb) |
4790 | { | 4124 | { |
@@ -4832,170 +4166,6 @@ void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) | |||
4832 | memset(mem, 0, sizeof(struct nvgpu_mem)); | 4166 | memset(mem, 0, sizeof(struct nvgpu_mem)); |
4833 | } | 4167 | } |
4834 | 4168 | ||
4835 | int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | ||
4836 | struct pmu_msg *msg, struct pmu_payload *payload, | ||
4837 | u32 queue_id, pmu_callback callback, void* cb_param, | ||
4838 | u32 *seq_desc, unsigned long timeout) | ||
4839 | { | ||
4840 | struct nvgpu_pmu *pmu = &g->pmu; | ||
4841 | struct pmu_v *pv = &g->ops.pmu_ver; | ||
4842 | struct pmu_sequence *seq; | ||
4843 | void *in = NULL, *out = NULL; | ||
4844 | int err; | ||
4845 | |||
4846 | gk20a_dbg_fn(""); | ||
4847 | |||
4848 | if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { | ||
4849 | if (!cmd) | ||
4850 | nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); | ||
4851 | else if (!seq_desc) | ||
4852 | nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); | ||
4853 | else | ||
4854 | nvgpu_warn(g, "%s(): PMU is not ready", __func__); | ||
4855 | |||
4856 | WARN_ON(1); | ||
4857 | return -EINVAL; | ||
4858 | } | ||
4859 | |||
4860 | if (!pmu_validate_cmd(pmu, cmd, msg, payload, queue_id)) | ||
4861 | return -EINVAL; | ||
4862 | |||
4863 | err = pmu_seq_acquire(pmu, &seq); | ||
4864 | if (err) | ||
4865 | return err; | ||
4866 | |||
4867 | cmd->hdr.seq_id = seq->id; | ||
4868 | |||
4869 | cmd->hdr.ctrl_flags = 0; | ||
4870 | cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS; | ||
4871 | cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR; | ||
4872 | |||
4873 | seq->callback = callback; | ||
4874 | seq->cb_params = cb_param; | ||
4875 | seq->msg = msg; | ||
4876 | seq->out_payload = NULL; | ||
4877 | seq->desc = pmu->next_seq_desc++; | ||
4878 | |||
4879 | if (payload) | ||
4880 | seq->out_payload = payload->out.buf; | ||
4881 | |||
4882 | *seq_desc = seq->desc; | ||
4883 | |||
4884 | if (payload && payload->in.offset != 0) { | ||
4885 | pv->set_pmu_allocation_ptr(pmu, &in, | ||
4886 | ((u8 *)&cmd->cmd + payload->in.offset)); | ||
4887 | |||
4888 | if (payload->in.buf != payload->out.buf) | ||
4889 | pv->pmu_allocation_set_dmem_size(pmu, in, | ||
4890 | (u16)payload->in.size); | ||
4891 | else | ||
4892 | pv->pmu_allocation_set_dmem_size(pmu, in, | ||
4893 | (u16)max(payload->in.size, payload->out.size)); | ||
4894 | |||
4895 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = | ||
4896 | nvgpu_alloc(&pmu->dmem, | ||
4897 | pv->pmu_allocation_get_dmem_size(pmu, in)); | ||
4898 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) | ||
4899 | goto clean_up; | ||
4900 | |||
4901 | if (payload->in.fb_size != 0x0) { | ||
4902 | seq->in_mem = nvgpu_kzalloc(g, | ||
4903 | sizeof(struct nvgpu_mem)); | ||
4904 | if (!seq->in_mem) { | ||
4905 | err = -ENOMEM; | ||
4906 | goto clean_up; | ||
4907 | } | ||
4908 | |||
4909 | gk20a_pmu_vidmem_surface_alloc(g, seq->in_mem, | ||
4910 | payload->in.fb_size); | ||
4911 | gk20a_pmu_surface_describe(g, seq->in_mem, | ||
4912 | (struct flcn_mem_desc_v0 *) | ||
4913 | pv->pmu_allocation_get_fb_addr(pmu, in)); | ||
4914 | |||
4915 | nvgpu_mem_wr_n(g, seq->in_mem, 0, | ||
4916 | payload->in.buf, payload->in.fb_size); | ||
4917 | |||
4918 | } else { | ||
4919 | pmu_copy_to_dmem(pmu, | ||
4920 | (pv->pmu_allocation_get_dmem_offset(pmu, in)), | ||
4921 | payload->in.buf, payload->in.size, 0); | ||
4922 | } | ||
4923 | pv->pmu_allocation_set_dmem_size(pmu, | ||
4924 | pv->get_pmu_seq_in_a_ptr(seq), | ||
4925 | pv->pmu_allocation_get_dmem_size(pmu, in)); | ||
4926 | pv->pmu_allocation_set_dmem_offset(pmu, | ||
4927 | pv->get_pmu_seq_in_a_ptr(seq), | ||
4928 | pv->pmu_allocation_get_dmem_offset(pmu, in)); | ||
4929 | } | ||
4930 | |||
4931 | if (payload && payload->out.offset != 0) { | ||
4932 | pv->set_pmu_allocation_ptr(pmu, &out, | ||
4933 | ((u8 *)&cmd->cmd + payload->out.offset)); | ||
4934 | pv->pmu_allocation_set_dmem_size(pmu, out, | ||
4935 | (u16)payload->out.size); | ||
4936 | |||
4937 | if (payload->in.buf != payload->out.buf) { | ||
4938 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = | ||
4939 | nvgpu_alloc(&pmu->dmem, | ||
4940 | pv->pmu_allocation_get_dmem_size(pmu, out)); | ||
4941 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, | ||
4942 | out))) | ||
4943 | goto clean_up; | ||
4944 | |||
4945 | if (payload->out.fb_size != 0x0) { | ||
4946 | seq->out_mem = nvgpu_kzalloc(g, | ||
4947 | sizeof(struct nvgpu_mem)); | ||
4948 | if (!seq->out_mem) { | ||
4949 | err = -ENOMEM; | ||
4950 | goto clean_up; | ||
4951 | } | ||
4952 | gk20a_pmu_vidmem_surface_alloc(g, seq->out_mem, | ||
4953 | payload->out.fb_size); | ||
4954 | gk20a_pmu_surface_describe(g, seq->out_mem, | ||
4955 | (struct flcn_mem_desc_v0 *) | ||
4956 | pv->pmu_allocation_get_fb_addr(pmu, | ||
4957 | out)); | ||
4958 | } | ||
4959 | } else { | ||
4960 | BUG_ON(in == NULL); | ||
4961 | seq->out_mem = seq->in_mem; | ||
4962 | pv->pmu_allocation_set_dmem_offset(pmu, out, | ||
4963 | pv->pmu_allocation_get_dmem_offset(pmu, in)); | ||
4964 | } | ||
4965 | pv->pmu_allocation_set_dmem_size(pmu, | ||
4966 | pv->get_pmu_seq_out_a_ptr(seq), | ||
4967 | pv->pmu_allocation_get_dmem_size(pmu, out)); | ||
4968 | pv->pmu_allocation_set_dmem_offset(pmu, | ||
4969 | pv->get_pmu_seq_out_a_ptr(seq), | ||
4970 | pv->pmu_allocation_get_dmem_offset(pmu, out)); | ||
4971 | |||
4972 | } | ||
4973 | |||
4974 | |||
4975 | |||
4976 | seq->state = PMU_SEQ_STATE_USED; | ||
4977 | |||
4978 | err = pmu_write_cmd(pmu, cmd, queue_id, timeout); | ||
4979 | if (err) | ||
4980 | seq->state = PMU_SEQ_STATE_PENDING; | ||
4981 | |||
4982 | gk20a_dbg_fn("done"); | ||
4983 | |||
4984 | return 0; | ||
4985 | |||
4986 | clean_up: | ||
4987 | gk20a_dbg_fn("fail"); | ||
4988 | if (in) | ||
4989 | nvgpu_free(&pmu->dmem, | ||
4990 | pv->pmu_allocation_get_dmem_offset(pmu, in)); | ||
4991 | if (out) | ||
4992 | nvgpu_free(&pmu->dmem, | ||
4993 | pv->pmu_allocation_get_dmem_offset(pmu, out)); | ||
4994 | |||
4995 | pmu_seq_release(pmu, seq); | ||
4996 | return err; | ||
4997 | } | ||
4998 | |||
4999 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | 4169 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) |
5000 | { | 4170 | { |
5001 | u32 status = 0; | 4171 | u32 status = 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index 24bc5822..a53329b4 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -57,15 +57,6 @@ struct nvgpu_firmware; | |||
57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) | 57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) |
58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) | 58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) |
59 | 59 | ||
60 | struct pmu_payload { | ||
61 | struct { | ||
62 | void *buf; | ||
63 | u32 offset; | ||
64 | u32 size; | ||
65 | u32 fb_size; | ||
66 | } in, out; | ||
67 | }; | ||
68 | |||
69 | struct pmu_surface { | 60 | struct pmu_surface { |
70 | struct nvgpu_mem vidmem_desc; | 61 | struct nvgpu_mem vidmem_desc; |
71 | struct nvgpu_mem sysmem_desc; | 62 | struct nvgpu_mem sysmem_desc; |
@@ -119,14 +110,9 @@ struct pmu_pg_stats_data { | |||
119 | int gk20a_init_pmu_support(struct gk20a *g); | 110 | int gk20a_init_pmu_support(struct gk20a *g); |
120 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); | 111 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); |
121 | 112 | ||
113 | bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu); | ||
122 | void gk20a_pmu_isr(struct gk20a *g); | 114 | void gk20a_pmu_isr(struct gk20a *g); |
123 | 115 | ||
124 | /* send a cmd to pmu */ | ||
125 | int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_msg *msg, | ||
126 | struct pmu_payload *payload, u32 queue_id, | ||
127 | pmu_callback callback, void* cb_param, | ||
128 | u32 *seq_desc, unsigned long timeout); | ||
129 | |||
130 | int gk20a_pmu_enable_elpg(struct gk20a *g); | 116 | int gk20a_pmu_enable_elpg(struct gk20a *g); |
131 | int gk20a_pmu_disable_elpg(struct gk20a *g); | 117 | int gk20a_pmu_disable_elpg(struct gk20a *g); |
132 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg); | 118 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg); |
@@ -138,8 +124,14 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); | |||
138 | 124 | ||
139 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); | 125 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); |
140 | 126 | ||
141 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); | 127 | int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
142 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); | 128 | int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
129 | |||
130 | int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
131 | u32 *head, bool set); | ||
132 | int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
133 | u32 *tail, bool set); | ||
134 | |||
143 | int gk20a_pmu_destroy(struct gk20a *g); | 135 | int gk20a_pmu_destroy(struct gk20a *g); |
144 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); | 136 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); |
145 | int gk20a_pmu_load_update(struct gk20a *g); | 137 | int gk20a_pmu_load_update(struct gk20a *g); |
@@ -157,7 +149,6 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu); | |||
157 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | 149 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
158 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); | 150 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); |
159 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); | 151 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); |
160 | void pmu_seq_init(struct nvgpu_pmu *pmu); | ||
161 | 152 | ||
162 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | 153 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
163 | 154 | ||