diff options
author | Xiangliang Yu <Xiangliang.Yu@amd.com> | 2017-03-10 01:18:17 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-03-29 23:55:05 -0400 |
commit | c9c9de93a33ccdbf7eaef0c86a5e60551f3410bd (patch) | |
tree | d6fadb30b4b5be6b17a173a9a18d6c69d94cee63 /drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | |
parent | ebe0a8097913659e29c4b89a37c93fc0a88c6d4e (diff) |
drm/amdgpu/virt: impl mailbox for ai
Implement mailbox protocol for AI so that guest vf can communicate
with GPU hypervisor.
Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 207 |
1 files changed, 207 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c new file mode 100644 index 000000000000..cfd5e54777bb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include "amdgpu.h" | ||
25 | #include "vega10/soc15ip.h" | ||
26 | #include "vega10/NBIO/nbio_6_1_offset.h" | ||
27 | #include "vega10/NBIO/nbio_6_1_sh_mask.h" | ||
28 | #include "vega10/GC/gc_9_0_offset.h" | ||
29 | #include "vega10/GC/gc_9_0_sh_mask.h" | ||
30 | #include "soc15.h" | ||
31 | #include "soc15_common.h" | ||
32 | #include "mxgpu_ai.h" | ||
33 | |||
34 | static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) | ||
35 | { | ||
36 | u32 reg; | ||
37 | int timeout = AI_MAILBOX_TIMEDOUT; | ||
38 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); | ||
39 | |||
40 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
41 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
42 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1); | ||
43 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
44 | mmBIF_BX_PF0_MAILBOX_CONTROL), reg); | ||
45 | |||
46 | /*Wait for RCV_MSG_VALID to be 0*/ | ||
47 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
48 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
49 | while (reg & mask) { | ||
50 | if (timeout <= 0) { | ||
51 | pr_err("RCV_MSG_VALID is not cleared\n"); | ||
52 | break; | ||
53 | } | ||
54 | mdelay(1); | ||
55 | timeout -=1; | ||
56 | |||
57 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
58 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) | ||
63 | { | ||
64 | u32 reg; | ||
65 | |||
66 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
67 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
68 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, | ||
69 | TRN_MSG_VALID, val ? 1 : 0); | ||
70 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), | ||
71 | reg); | ||
72 | } | ||
73 | |||
74 | static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, | ||
75 | enum idh_request req) | ||
76 | { | ||
77 | u32 reg; | ||
78 | |||
79 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
80 | mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); | ||
81 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, | ||
82 | MSGBUF_DATA, req); | ||
83 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), | ||
84 | reg); | ||
85 | |||
86 | xgpu_ai_mailbox_set_valid(adev, true); | ||
87 | } | ||
88 | |||
89 | static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, | ||
90 | enum idh_event event) | ||
91 | { | ||
92 | u32 reg; | ||
93 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); | ||
94 | |||
95 | if (event != IDH_FLR_NOTIFICATION_CMPL) { | ||
96 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
97 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
98 | if (!(reg & mask)) | ||
99 | return -ENOENT; | ||
100 | } | ||
101 | |||
102 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
103 | mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); | ||
104 | if (reg != event) | ||
105 | return -ENOENT; | ||
106 | |||
107 | xgpu_ai_mailbox_send_ack(adev); | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int xgpu_ai_poll_ack(struct amdgpu_device *adev) | ||
113 | { | ||
114 | int r = 0, timeout = AI_MAILBOX_TIMEDOUT; | ||
115 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); | ||
116 | u32 reg; | ||
117 | |||
118 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
119 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
120 | while (!(reg & mask)) { | ||
121 | if (timeout <= 0) { | ||
122 | pr_err("Doesn't get ack from pf.\n"); | ||
123 | r = -ETIME; | ||
124 | break; | ||
125 | } | ||
126 | msleep(1); | ||
127 | timeout -= 1; | ||
128 | |||
129 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | ||
130 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | ||
131 | } | ||
132 | |||
133 | return r; | ||
134 | } | ||
135 | |||
136 | static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) | ||
137 | { | ||
138 | int r = 0, timeout = AI_MAILBOX_TIMEDOUT; | ||
139 | |||
140 | r = xgpu_ai_mailbox_rcv_msg(adev, event); | ||
141 | while (r) { | ||
142 | if (timeout <= 0) { | ||
143 | pr_err("Doesn't get ack from pf.\n"); | ||
144 | r = -ETIME; | ||
145 | break; | ||
146 | } | ||
147 | msleep(1); | ||
148 | timeout -= 1; | ||
149 | |||
150 | r = xgpu_ai_mailbox_rcv_msg(adev, event); | ||
151 | } | ||
152 | |||
153 | return r; | ||
154 | } | ||
155 | |||
156 | |||
157 | static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, | ||
158 | enum idh_request req) | ||
159 | { | ||
160 | int r; | ||
161 | |||
162 | xgpu_ai_mailbox_trans_msg(adev, req); | ||
163 | |||
164 | /* start to poll ack */ | ||
165 | r = xgpu_ai_poll_ack(adev); | ||
166 | if (r) | ||
167 | return r; | ||
168 | |||
169 | xgpu_ai_mailbox_set_valid(adev, false); | ||
170 | |||
171 | /* start to check msg if request is idh_req_gpu_init_access */ | ||
172 | if (req == IDH_REQ_GPU_INIT_ACCESS || | ||
173 | req == IDH_REQ_GPU_FINI_ACCESS || | ||
174 | req == IDH_REQ_GPU_RESET_ACCESS) { | ||
175 | r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); | ||
176 | if (r) | ||
177 | return r; | ||
178 | } | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, | ||
184 | bool init) | ||
185 | { | ||
186 | enum idh_request req; | ||
187 | |||
188 | req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; | ||
189 | return xgpu_ai_send_access_requests(adev, req); | ||
190 | } | ||
191 | |||
192 | static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, | ||
193 | bool init) | ||
194 | { | ||
195 | enum idh_request req; | ||
196 | int r = 0; | ||
197 | |||
198 | req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; | ||
199 | r = xgpu_ai_send_access_requests(adev, req); | ||
200 | |||
201 | return r; | ||
202 | } | ||
203 | |||
204 | const struct amdgpu_virt_ops xgpu_ai_virt_ops = { | ||
205 | .req_full_gpu = xgpu_ai_request_full_gpu_access, | ||
206 | .rel_full_gpu = xgpu_ai_release_full_gpu_access, | ||
207 | }; | ||