diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 838 |
1 files changed, 838 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c new file mode 100644 index 000000000000..6b1243f9f86d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -0,0 +1,838 @@ | |||
1 | /* | ||
2 | * Copyright 2015 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/list.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <drm/drmP.h> | ||
28 | #include <linux/firmware.h> | ||
29 | #include <drm/amdgpu_drm.h> | ||
30 | #include "amdgpu.h" | ||
31 | #include "cgs_linux.h" | ||
32 | #include "atom.h" | ||
33 | #include "amdgpu_ucode.h" | ||
34 | |||
35 | |||
36 | struct amdgpu_cgs_device { | ||
37 | struct cgs_device base; | ||
38 | struct amdgpu_device *adev; | ||
39 | }; | ||
40 | |||
41 | #define CGS_FUNC_ADEV \ | ||
42 | struct amdgpu_device *adev = \ | ||
43 | ((struct amdgpu_cgs_device *)cgs_device)->adev | ||
44 | |||
45 | static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, | ||
46 | uint64_t *mc_start, uint64_t *mc_size, | ||
47 | uint64_t *mem_size) | ||
48 | { | ||
49 | CGS_FUNC_ADEV; | ||
50 | switch(type) { | ||
51 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | ||
52 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | ||
53 | *mc_start = 0; | ||
54 | *mc_size = adev->mc.visible_vram_size; | ||
55 | *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; | ||
56 | break; | ||
57 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | ||
58 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | ||
59 | *mc_start = adev->mc.visible_vram_size; | ||
60 | *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; | ||
61 | *mem_size = *mc_size; | ||
62 | break; | ||
63 | case CGS_GPU_MEM_TYPE__GART_CACHEABLE: | ||
64 | case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: | ||
65 | *mc_start = adev->mc.gtt_start; | ||
66 | *mc_size = adev->mc.gtt_size; | ||
67 | *mem_size = adev->mc.gtt_size - adev->gart_pin_size; | ||
68 | break; | ||
69 | default: | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, | ||
77 | uint64_t size, | ||
78 | uint64_t min_offset, uint64_t max_offset, | ||
79 | cgs_handle_t *kmem_handle, uint64_t *mcaddr) | ||
80 | { | ||
81 | CGS_FUNC_ADEV; | ||
82 | int ret; | ||
83 | struct amdgpu_bo *bo; | ||
84 | struct page *kmem_page = vmalloc_to_page(kmem); | ||
85 | int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; | ||
86 | |||
87 | struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); | ||
88 | ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, | ||
89 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | ret = amdgpu_bo_reserve(bo, false); | ||
93 | if (unlikely(ret != 0)) | ||
94 | return ret; | ||
95 | |||
96 | /* pin buffer into GTT */ | ||
97 | ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, | ||
98 | min_offset, max_offset, mcaddr); | ||
99 | amdgpu_bo_unreserve(bo); | ||
100 | |||
101 | *kmem_handle = (cgs_handle_t)bo; | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) | ||
106 | { | ||
107 | struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; | ||
108 | |||
109 | if (obj) { | ||
110 | int r = amdgpu_bo_reserve(obj, false); | ||
111 | if (likely(r == 0)) { | ||
112 | amdgpu_bo_unpin(obj); | ||
113 | amdgpu_bo_unreserve(obj); | ||
114 | } | ||
115 | amdgpu_bo_unref(&obj); | ||
116 | |||
117 | } | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, | ||
122 | enum cgs_gpu_mem_type type, | ||
123 | uint64_t size, uint64_t align, | ||
124 | uint64_t min_offset, uint64_t max_offset, | ||
125 | cgs_handle_t *handle) | ||
126 | { | ||
127 | CGS_FUNC_ADEV; | ||
128 | uint16_t flags = 0; | ||
129 | int ret = 0; | ||
130 | uint32_t domain = 0; | ||
131 | struct amdgpu_bo *obj; | ||
132 | struct ttm_placement placement; | ||
133 | struct ttm_place place; | ||
134 | |||
135 | if (min_offset > max_offset) { | ||
136 | BUG_ON(1); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | /* fail if the alignment is not a power of 2 */ | ||
141 | if (((align != 1) && (align & (align - 1))) | ||
142 | || size == 0 || align == 0) | ||
143 | return -EINVAL; | ||
144 | |||
145 | |||
146 | switch(type) { | ||
147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | ||
148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | ||
149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | ||
150 | domain = AMDGPU_GEM_DOMAIN_VRAM; | ||
151 | if (max_offset > adev->mc.real_vram_size) | ||
152 | return -EINVAL; | ||
153 | place.fpfn = min_offset >> PAGE_SHIFT; | ||
154 | place.lpfn = max_offset >> PAGE_SHIFT; | ||
155 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | ||
156 | TTM_PL_FLAG_VRAM; | ||
157 | break; | ||
158 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | ||
159 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | ||
160 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | ||
161 | domain = AMDGPU_GEM_DOMAIN_VRAM; | ||
162 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { | ||
163 | place.fpfn = | ||
164 | max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; | ||
165 | place.lpfn = | ||
166 | min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; | ||
167 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | ||
168 | TTM_PL_FLAG_VRAM; | ||
169 | } | ||
170 | |||
171 | break; | ||
172 | case CGS_GPU_MEM_TYPE__GART_CACHEABLE: | ||
173 | domain = AMDGPU_GEM_DOMAIN_GTT; | ||
174 | place.fpfn = min_offset >> PAGE_SHIFT; | ||
175 | place.lpfn = max_offset >> PAGE_SHIFT; | ||
176 | place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | ||
177 | break; | ||
178 | case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: | ||
179 | flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; | ||
180 | domain = AMDGPU_GEM_DOMAIN_GTT; | ||
181 | place.fpfn = min_offset >> PAGE_SHIFT; | ||
182 | place.lpfn = max_offset >> PAGE_SHIFT; | ||
183 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | | ||
184 | TTM_PL_FLAG_UNCACHED; | ||
185 | break; | ||
186 | default: | ||
187 | return -EINVAL; | ||
188 | } | ||
189 | |||
190 | |||
191 | *handle = 0; | ||
192 | |||
193 | placement.placement = &place; | ||
194 | placement.num_placement = 1; | ||
195 | placement.busy_placement = &place; | ||
196 | placement.num_busy_placement = 1; | ||
197 | |||
198 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, | ||
199 | true, domain, flags, | ||
200 | NULL, &placement, &obj); | ||
201 | if (ret) { | ||
202 | DRM_ERROR("(%d) bo create failed\n", ret); | ||
203 | return ret; | ||
204 | } | ||
205 | *handle = (cgs_handle_t)obj; | ||
206 | |||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, | ||
211 | cgs_handle_t *handle) | ||
212 | { | ||
213 | CGS_FUNC_ADEV; | ||
214 | int r; | ||
215 | uint32_t dma_handle; | ||
216 | struct drm_gem_object *obj; | ||
217 | struct amdgpu_bo *bo; | ||
218 | struct drm_device *dev = adev->ddev; | ||
219 | struct drm_file *file_priv = NULL, *priv; | ||
220 | |||
221 | mutex_lock(&dev->struct_mutex); | ||
222 | list_for_each_entry(priv, &dev->filelist, lhead) { | ||
223 | rcu_read_lock(); | ||
224 | if (priv->pid == get_pid(task_pid(current))) | ||
225 | file_priv = priv; | ||
226 | rcu_read_unlock(); | ||
227 | if (file_priv) | ||
228 | break; | ||
229 | } | ||
230 | mutex_unlock(&dev->struct_mutex); | ||
231 | r = dev->driver->prime_fd_to_handle(dev, | ||
232 | file_priv, dmabuf_fd, | ||
233 | &dma_handle); | ||
234 | spin_lock(&file_priv->table_lock); | ||
235 | |||
236 | /* Check if we currently have a reference on the object */ | ||
237 | obj = idr_find(&file_priv->object_idr, dma_handle); | ||
238 | if (obj == NULL) { | ||
239 | spin_unlock(&file_priv->table_lock); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | spin_unlock(&file_priv->table_lock); | ||
243 | bo = gem_to_amdgpu_bo(obj); | ||
244 | *handle = (cgs_handle_t)bo; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) | ||
249 | { | ||
250 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | ||
251 | |||
252 | if (obj) { | ||
253 | int r = amdgpu_bo_reserve(obj, false); | ||
254 | if (likely(r == 0)) { | ||
255 | amdgpu_bo_kunmap(obj); | ||
256 | amdgpu_bo_unpin(obj); | ||
257 | amdgpu_bo_unreserve(obj); | ||
258 | } | ||
259 | amdgpu_bo_unref(&obj); | ||
260 | |||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, | ||
266 | uint64_t *mcaddr) | ||
267 | { | ||
268 | int r; | ||
269 | u64 min_offset, max_offset; | ||
270 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | ||
271 | |||
272 | WARN_ON_ONCE(obj->placement.num_placement > 1); | ||
273 | |||
274 | min_offset = obj->placements[0].fpfn << PAGE_SHIFT; | ||
275 | max_offset = obj->placements[0].lpfn << PAGE_SHIFT; | ||
276 | |||
277 | r = amdgpu_bo_reserve(obj, false); | ||
278 | if (unlikely(r != 0)) | ||
279 | return r; | ||
280 | r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, | ||
281 | min_offset, max_offset, mcaddr); | ||
282 | amdgpu_bo_unreserve(obj); | ||
283 | return r; | ||
284 | } | ||
285 | |||
286 | static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) | ||
287 | { | ||
288 | int r; | ||
289 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | ||
290 | r = amdgpu_bo_reserve(obj, false); | ||
291 | if (unlikely(r != 0)) | ||
292 | return r; | ||
293 | r = amdgpu_bo_unpin(obj); | ||
294 | amdgpu_bo_unreserve(obj); | ||
295 | return r; | ||
296 | } | ||
297 | |||
298 | static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, | ||
299 | void **map) | ||
300 | { | ||
301 | int r; | ||
302 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | ||
303 | r = amdgpu_bo_reserve(obj, false); | ||
304 | if (unlikely(r != 0)) | ||
305 | return r; | ||
306 | r = amdgpu_bo_kmap(obj, map); | ||
307 | amdgpu_bo_unreserve(obj); | ||
308 | return r; | ||
309 | } | ||
310 | |||
311 | static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) | ||
312 | { | ||
313 | int r; | ||
314 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | ||
315 | r = amdgpu_bo_reserve(obj, false); | ||
316 | if (unlikely(r != 0)) | ||
317 | return r; | ||
318 | amdgpu_bo_kunmap(obj); | ||
319 | amdgpu_bo_unreserve(obj); | ||
320 | return r; | ||
321 | } | ||
322 | |||
323 | static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset) | ||
324 | { | ||
325 | CGS_FUNC_ADEV; | ||
326 | return RREG32(offset); | ||
327 | } | ||
328 | |||
329 | static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset, | ||
330 | uint32_t value) | ||
331 | { | ||
332 | CGS_FUNC_ADEV; | ||
333 | WREG32(offset, value); | ||
334 | } | ||
335 | |||
336 | static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, | ||
337 | enum cgs_ind_reg space, | ||
338 | unsigned index) | ||
339 | { | ||
340 | CGS_FUNC_ADEV; | ||
341 | switch (space) { | ||
342 | case CGS_IND_REG__MMIO: | ||
343 | return RREG32_IDX(index); | ||
344 | case CGS_IND_REG__PCIE: | ||
345 | return RREG32_PCIE(index); | ||
346 | case CGS_IND_REG__SMC: | ||
347 | return RREG32_SMC(index); | ||
348 | case CGS_IND_REG__UVD_CTX: | ||
349 | return RREG32_UVD_CTX(index); | ||
350 | case CGS_IND_REG__DIDT: | ||
351 | return RREG32_DIDT(index); | ||
352 | case CGS_IND_REG__AUDIO_ENDPT: | ||
353 | DRM_ERROR("audio endpt register access not implemented.\n"); | ||
354 | return 0; | ||
355 | } | ||
356 | WARN(1, "Invalid indirect register space"); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static void amdgpu_cgs_write_ind_register(void *cgs_device, | ||
361 | enum cgs_ind_reg space, | ||
362 | unsigned index, uint32_t value) | ||
363 | { | ||
364 | CGS_FUNC_ADEV; | ||
365 | switch (space) { | ||
366 | case CGS_IND_REG__MMIO: | ||
367 | return WREG32_IDX(index, value); | ||
368 | case CGS_IND_REG__PCIE: | ||
369 | return WREG32_PCIE(index, value); | ||
370 | case CGS_IND_REG__SMC: | ||
371 | return WREG32_SMC(index, value); | ||
372 | case CGS_IND_REG__UVD_CTX: | ||
373 | return WREG32_UVD_CTX(index, value); | ||
374 | case CGS_IND_REG__DIDT: | ||
375 | return WREG32_DIDT(index, value); | ||
376 | case CGS_IND_REG__AUDIO_ENDPT: | ||
377 | DRM_ERROR("audio endpt register access not implemented.\n"); | ||
378 | return; | ||
379 | } | ||
380 | WARN(1, "Invalid indirect register space"); | ||
381 | } | ||
382 | |||
383 | static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) | ||
384 | { | ||
385 | CGS_FUNC_ADEV; | ||
386 | uint8_t val; | ||
387 | int ret = pci_read_config_byte(adev->pdev, addr, &val); | ||
388 | if (WARN(ret, "pci_read_config_byte error")) | ||
389 | return 0; | ||
390 | return val; | ||
391 | } | ||
392 | |||
393 | static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) | ||
394 | { | ||
395 | CGS_FUNC_ADEV; | ||
396 | uint16_t val; | ||
397 | int ret = pci_read_config_word(adev->pdev, addr, &val); | ||
398 | if (WARN(ret, "pci_read_config_word error")) | ||
399 | return 0; | ||
400 | return val; | ||
401 | } | ||
402 | |||
403 | static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, | ||
404 | unsigned addr) | ||
405 | { | ||
406 | CGS_FUNC_ADEV; | ||
407 | uint32_t val; | ||
408 | int ret = pci_read_config_dword(adev->pdev, addr, &val); | ||
409 | if (WARN(ret, "pci_read_config_dword error")) | ||
410 | return 0; | ||
411 | return val; | ||
412 | } | ||
413 | |||
414 | static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, | ||
415 | uint8_t value) | ||
416 | { | ||
417 | CGS_FUNC_ADEV; | ||
418 | int ret = pci_write_config_byte(adev->pdev, addr, value); | ||
419 | WARN(ret, "pci_write_config_byte error"); | ||
420 | } | ||
421 | |||
422 | static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, | ||
423 | uint16_t value) | ||
424 | { | ||
425 | CGS_FUNC_ADEV; | ||
426 | int ret = pci_write_config_word(adev->pdev, addr, value); | ||
427 | WARN(ret, "pci_write_config_word error"); | ||
428 | } | ||
429 | |||
430 | static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, | ||
431 | uint32_t value) | ||
432 | { | ||
433 | CGS_FUNC_ADEV; | ||
434 | int ret = pci_write_config_dword(adev->pdev, addr, value); | ||
435 | WARN(ret, "pci_write_config_dword error"); | ||
436 | } | ||
437 | |||
438 | static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, | ||
439 | unsigned table, uint16_t *size, | ||
440 | uint8_t *frev, uint8_t *crev) | ||
441 | { | ||
442 | CGS_FUNC_ADEV; | ||
443 | uint16_t data_start; | ||
444 | |||
445 | if (amdgpu_atom_parse_data_header( | ||
446 | adev->mode_info.atom_context, table, size, | ||
447 | frev, crev, &data_start)) | ||
448 | return (uint8_t*)adev->mode_info.atom_context->bios + | ||
449 | data_start; | ||
450 | |||
451 | return NULL; | ||
452 | } | ||
453 | |||
454 | static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, | ||
455 | uint8_t *frev, uint8_t *crev) | ||
456 | { | ||
457 | CGS_FUNC_ADEV; | ||
458 | |||
459 | if (amdgpu_atom_parse_cmd_header( | ||
460 | adev->mode_info.atom_context, table, | ||
461 | frev, crev)) | ||
462 | return 0; | ||
463 | |||
464 | return -EINVAL; | ||
465 | } | ||
466 | |||
467 | static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, | ||
468 | void *args) | ||
469 | { | ||
470 | CGS_FUNC_ADEV; | ||
471 | |||
472 | return amdgpu_atom_execute_table( | ||
473 | adev->mode_info.atom_context, table, args); | ||
474 | } | ||
475 | |||
476 | static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request) | ||
477 | { | ||
478 | /* TODO */ | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request) | ||
483 | { | ||
484 | /* TODO */ | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request, | ||
489 | int active) | ||
490 | { | ||
491 | /* TODO */ | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request, | ||
496 | enum cgs_clock clock, unsigned freq) | ||
497 | { | ||
498 | /* TODO */ | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, | ||
503 | enum cgs_engine engine, int powered) | ||
504 | { | ||
505 | /* TODO */ | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | |||
510 | |||
511 | static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, | ||
512 | enum cgs_clock clock, | ||
513 | struct cgs_clock_limits *limits) | ||
514 | { | ||
515 | /* TODO */ | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask, | ||
520 | const uint32_t *voltages) | ||
521 | { | ||
522 | DRM_ERROR("not implemented"); | ||
523 | return -EPERM; | ||
524 | } | ||
525 | |||
526 | struct cgs_irq_params { | ||
527 | unsigned src_id; | ||
528 | cgs_irq_source_set_func_t set; | ||
529 | cgs_irq_handler_func_t handler; | ||
530 | void *private_data; | ||
531 | }; | ||
532 | |||
533 | static int cgs_set_irq_state(struct amdgpu_device *adev, | ||
534 | struct amdgpu_irq_src *src, | ||
535 | unsigned type, | ||
536 | enum amdgpu_interrupt_state state) | ||
537 | { | ||
538 | struct cgs_irq_params *irq_params = | ||
539 | (struct cgs_irq_params *)src->data; | ||
540 | if (!irq_params) | ||
541 | return -EINVAL; | ||
542 | if (!irq_params->set) | ||
543 | return -EINVAL; | ||
544 | return irq_params->set(irq_params->private_data, | ||
545 | irq_params->src_id, | ||
546 | type, | ||
547 | (int)state); | ||
548 | } | ||
549 | |||
550 | static int cgs_process_irq(struct amdgpu_device *adev, | ||
551 | struct amdgpu_irq_src *source, | ||
552 | struct amdgpu_iv_entry *entry) | ||
553 | { | ||
554 | struct cgs_irq_params *irq_params = | ||
555 | (struct cgs_irq_params *)source->data; | ||
556 | if (!irq_params) | ||
557 | return -EINVAL; | ||
558 | if (!irq_params->handler) | ||
559 | return -EINVAL; | ||
560 | return irq_params->handler(irq_params->private_data, | ||
561 | irq_params->src_id, | ||
562 | entry->iv_entry); | ||
563 | } | ||
564 | |||
565 | static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { | ||
566 | .set = cgs_set_irq_state, | ||
567 | .process = cgs_process_irq, | ||
568 | }; | ||
569 | |||
570 | static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, | ||
571 | unsigned num_types, | ||
572 | cgs_irq_source_set_func_t set, | ||
573 | cgs_irq_handler_func_t handler, | ||
574 | void *private_data) | ||
575 | { | ||
576 | CGS_FUNC_ADEV; | ||
577 | int ret = 0; | ||
578 | struct cgs_irq_params *irq_params; | ||
579 | struct amdgpu_irq_src *source = | ||
580 | kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); | ||
581 | if (!source) | ||
582 | return -ENOMEM; | ||
583 | irq_params = | ||
584 | kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); | ||
585 | if (!irq_params) { | ||
586 | kfree(source); | ||
587 | return -ENOMEM; | ||
588 | } | ||
589 | source->num_types = num_types; | ||
590 | source->funcs = &cgs_irq_funcs; | ||
591 | irq_params->src_id = src_id; | ||
592 | irq_params->set = set; | ||
593 | irq_params->handler = handler; | ||
594 | irq_params->private_data = private_data; | ||
595 | source->data = (void *)irq_params; | ||
596 | ret = amdgpu_irq_add_id(adev, src_id, source); | ||
597 | if (ret) { | ||
598 | kfree(irq_params); | ||
599 | kfree(source); | ||
600 | } | ||
601 | |||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type) | ||
606 | { | ||
607 | CGS_FUNC_ADEV; | ||
608 | return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); | ||
609 | } | ||
610 | |||
611 | static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type) | ||
612 | { | ||
613 | CGS_FUNC_ADEV; | ||
614 | return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); | ||
615 | } | ||
616 | |||
617 | int amdgpu_cgs_set_clockgating_state(void *cgs_device, | ||
618 | enum amd_ip_block_type block_type, | ||
619 | enum amd_clockgating_state state) | ||
620 | { | ||
621 | CGS_FUNC_ADEV; | ||
622 | int i, r = -1; | ||
623 | |||
624 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
625 | if (!adev->ip_block_status[i].valid) | ||
626 | continue; | ||
627 | |||
628 | if (adev->ip_blocks[i].type == block_type) { | ||
629 | r = adev->ip_blocks[i].funcs->set_clockgating_state( | ||
630 | (void *)adev, | ||
631 | state); | ||
632 | break; | ||
633 | } | ||
634 | } | ||
635 | return r; | ||
636 | } | ||
637 | |||
638 | int amdgpu_cgs_set_powergating_state(void *cgs_device, | ||
639 | enum amd_ip_block_type block_type, | ||
640 | enum amd_powergating_state state) | ||
641 | { | ||
642 | CGS_FUNC_ADEV; | ||
643 | int i, r = -1; | ||
644 | |||
645 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
646 | if (!adev->ip_block_status[i].valid) | ||
647 | continue; | ||
648 | |||
649 | if (adev->ip_blocks[i].type == block_type) { | ||
650 | r = adev->ip_blocks[i].funcs->set_powergating_state( | ||
651 | (void *)adev, | ||
652 | state); | ||
653 | break; | ||
654 | } | ||
655 | } | ||
656 | return r; | ||
657 | } | ||
658 | |||
659 | |||
660 | static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) | ||
661 | { | ||
662 | CGS_FUNC_ADEV; | ||
663 | enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; | ||
664 | |||
665 | switch (fw_type) { | ||
666 | case CGS_UCODE_ID_SDMA0: | ||
667 | result = AMDGPU_UCODE_ID_SDMA0; | ||
668 | break; | ||
669 | case CGS_UCODE_ID_SDMA1: | ||
670 | result = AMDGPU_UCODE_ID_SDMA1; | ||
671 | break; | ||
672 | case CGS_UCODE_ID_CP_CE: | ||
673 | result = AMDGPU_UCODE_ID_CP_CE; | ||
674 | break; | ||
675 | case CGS_UCODE_ID_CP_PFP: | ||
676 | result = AMDGPU_UCODE_ID_CP_PFP; | ||
677 | break; | ||
678 | case CGS_UCODE_ID_CP_ME: | ||
679 | result = AMDGPU_UCODE_ID_CP_ME; | ||
680 | break; | ||
681 | case CGS_UCODE_ID_CP_MEC: | ||
682 | case CGS_UCODE_ID_CP_MEC_JT1: | ||
683 | result = AMDGPU_UCODE_ID_CP_MEC1; | ||
684 | break; | ||
685 | case CGS_UCODE_ID_CP_MEC_JT2: | ||
686 | if (adev->asic_type == CHIP_TONGA) | ||
687 | result = AMDGPU_UCODE_ID_CP_MEC2; | ||
688 | else if (adev->asic_type == CHIP_CARRIZO) | ||
689 | result = AMDGPU_UCODE_ID_CP_MEC1; | ||
690 | break; | ||
691 | case CGS_UCODE_ID_RLC_G: | ||
692 | result = AMDGPU_UCODE_ID_RLC_G; | ||
693 | break; | ||
694 | default: | ||
695 | DRM_ERROR("Firmware type not supported\n"); | ||
696 | } | ||
697 | return result; | ||
698 | } | ||
699 | |||
700 | static int amdgpu_cgs_get_firmware_info(void *cgs_device, | ||
701 | enum cgs_ucode_id type, | ||
702 | struct cgs_firmware_info *info) | ||
703 | { | ||
704 | CGS_FUNC_ADEV; | ||
705 | |||
706 | if (CGS_UCODE_ID_SMU != type) { | ||
707 | uint64_t gpu_addr; | ||
708 | uint32_t data_size; | ||
709 | const struct gfx_firmware_header_v1_0 *header; | ||
710 | enum AMDGPU_UCODE_ID id; | ||
711 | struct amdgpu_firmware_info *ucode; | ||
712 | |||
713 | id = fw_type_convert(cgs_device, type); | ||
714 | ucode = &adev->firmware.ucode[id]; | ||
715 | if (ucode->fw == NULL) | ||
716 | return -EINVAL; | ||
717 | |||
718 | gpu_addr = ucode->mc_addr; | ||
719 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
720 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | ||
721 | |||
722 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || | ||
723 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { | ||
724 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | ||
725 | data_size = le32_to_cpu(header->jt_size) << 2; | ||
726 | } | ||
727 | info->mc_addr = gpu_addr; | ||
728 | info->image_size = data_size; | ||
729 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | ||
730 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); | ||
731 | } else { | ||
732 | char fw_name[30] = {0}; | ||
733 | int err = 0; | ||
734 | uint32_t ucode_size; | ||
735 | uint32_t ucode_start_address; | ||
736 | const uint8_t *src; | ||
737 | const struct smc_firmware_header_v1_0 *hdr; | ||
738 | |||
739 | switch (adev->asic_type) { | ||
740 | case CHIP_TONGA: | ||
741 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); | ||
742 | break; | ||
743 | default: | ||
744 | DRM_ERROR("SMC firmware not supported\n"); | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | |||
748 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | ||
749 | if (err) { | ||
750 | DRM_ERROR("Failed to request firmware\n"); | ||
751 | return err; | ||
752 | } | ||
753 | |||
754 | err = amdgpu_ucode_validate(adev->pm.fw); | ||
755 | if (err) { | ||
756 | DRM_ERROR("Failed to load firmware \"%s\"", fw_name); | ||
757 | release_firmware(adev->pm.fw); | ||
758 | adev->pm.fw = NULL; | ||
759 | return err; | ||
760 | } | ||
761 | |||
762 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | ||
763 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
764 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | ||
765 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | ||
766 | src = (const uint8_t *)(adev->pm.fw->data + | ||
767 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
768 | |||
769 | info->version = adev->pm.fw_version; | ||
770 | info->image_size = ucode_size; | ||
771 | info->kptr = (void *)src; | ||
772 | } | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static const struct cgs_ops amdgpu_cgs_ops = { | ||
777 | amdgpu_cgs_gpu_mem_info, | ||
778 | amdgpu_cgs_gmap_kmem, | ||
779 | amdgpu_cgs_gunmap_kmem, | ||
780 | amdgpu_cgs_alloc_gpu_mem, | ||
781 | amdgpu_cgs_free_gpu_mem, | ||
782 | amdgpu_cgs_gmap_gpu_mem, | ||
783 | amdgpu_cgs_gunmap_gpu_mem, | ||
784 | amdgpu_cgs_kmap_gpu_mem, | ||
785 | amdgpu_cgs_kunmap_gpu_mem, | ||
786 | amdgpu_cgs_read_register, | ||
787 | amdgpu_cgs_write_register, | ||
788 | amdgpu_cgs_read_ind_register, | ||
789 | amdgpu_cgs_write_ind_register, | ||
790 | amdgpu_cgs_read_pci_config_byte, | ||
791 | amdgpu_cgs_read_pci_config_word, | ||
792 | amdgpu_cgs_read_pci_config_dword, | ||
793 | amdgpu_cgs_write_pci_config_byte, | ||
794 | amdgpu_cgs_write_pci_config_word, | ||
795 | amdgpu_cgs_write_pci_config_dword, | ||
796 | amdgpu_cgs_atom_get_data_table, | ||
797 | amdgpu_cgs_atom_get_cmd_table_revs, | ||
798 | amdgpu_cgs_atom_exec_cmd_table, | ||
799 | amdgpu_cgs_create_pm_request, | ||
800 | amdgpu_cgs_destroy_pm_request, | ||
801 | amdgpu_cgs_set_pm_request, | ||
802 | amdgpu_cgs_pm_request_clock, | ||
803 | amdgpu_cgs_pm_request_engine, | ||
804 | amdgpu_cgs_pm_query_clock_limits, | ||
805 | amdgpu_cgs_set_camera_voltages, | ||
806 | amdgpu_cgs_get_firmware_info, | ||
807 | amdgpu_cgs_set_powergating_state, | ||
808 | amdgpu_cgs_set_clockgating_state | ||
809 | }; | ||
810 | |||
811 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | ||
812 | amdgpu_cgs_import_gpu_mem, | ||
813 | amdgpu_cgs_add_irq_source, | ||
814 | amdgpu_cgs_irq_get, | ||
815 | amdgpu_cgs_irq_put | ||
816 | }; | ||
817 | |||
818 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev) | ||
819 | { | ||
820 | struct amdgpu_cgs_device *cgs_device = | ||
821 | kmalloc(sizeof(*cgs_device), GFP_KERNEL); | ||
822 | |||
823 | if (!cgs_device) { | ||
824 | DRM_ERROR("Couldn't allocate CGS device structure\n"); | ||
825 | return NULL; | ||
826 | } | ||
827 | |||
828 | cgs_device->base.ops = &amdgpu_cgs_ops; | ||
829 | cgs_device->base.os_ops = &amdgpu_cgs_os_ops; | ||
830 | cgs_device->adev = adev; | ||
831 | |||
832 | return cgs_device; | ||
833 | } | ||
834 | |||
835 | void amdgpu_cgs_destroy_device(void *cgs_device) | ||
836 | { | ||
837 | kfree(cgs_device); | ||
838 | } | ||