diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2015-04-20 16:55:21 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:15 -0400 |
commit | d38ceaf99ed015f2a0b9af3499791bd3a3daae21 (patch) | |
tree | c8e237ea218e8ed8a5f64c1654fc01fe5d2239cb /drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |
parent | 97b2e202fba05b87d720318a6500a337100dab4d (diff) |
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code.
v2: remove extra kconfig option
v3: implement minor fixes from Fengguang Wu
v4: fix cast in amdgpu_ucode.c
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 674 |
1 files changed, 674 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c new file mode 100644 index 000000000000..c271da34998d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -0,0 +1,674 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #include <drm/drmP.h> | ||
29 | #include "amdgpu.h" | ||
30 | #include <drm/amdgpu_drm.h> | ||
31 | #include "amdgpu_uvd.h" | ||
32 | #include "amdgpu_vce.h" | ||
33 | |||
34 | #include <linux/vga_switcheroo.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/pm_runtime.h> | ||
37 | |||
38 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
39 | bool amdgpu_has_atpx(void); | ||
40 | #else | ||
41 | static inline bool amdgpu_has_atpx(void) { return false; } | ||
42 | #endif | ||
43 | |||
44 | /** | ||
45 | * amdgpu_driver_unload_kms - Main unload function for KMS. | ||
46 | * | ||
47 | * @dev: drm dev pointer | ||
48 | * | ||
49 | * This is the main unload function for KMS (all asics). | ||
50 | * Returns 0 on success. | ||
51 | */ | ||
52 | int amdgpu_driver_unload_kms(struct drm_device *dev) | ||
53 | { | ||
54 | struct amdgpu_device *adev = dev->dev_private; | ||
55 | |||
56 | if (adev == NULL) | ||
57 | return 0; | ||
58 | |||
59 | if (adev->rmmio == NULL) | ||
60 | goto done_free; | ||
61 | |||
62 | pm_runtime_get_sync(dev->dev); | ||
63 | |||
64 | amdgpu_acpi_fini(adev); | ||
65 | |||
66 | amdgpu_device_fini(adev); | ||
67 | |||
68 | done_free: | ||
69 | kfree(adev); | ||
70 | dev->dev_private = NULL; | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * amdgpu_driver_load_kms - Main load function for KMS. | ||
76 | * | ||
77 | * @dev: drm dev pointer | ||
78 | * @flags: device flags | ||
79 | * | ||
80 | * This is the main load function for KMS (all asics). | ||
81 | * Returns 0 on success, error on failure. | ||
82 | */ | ||
83 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | ||
84 | { | ||
85 | struct amdgpu_device *adev; | ||
86 | int r, acpi_status; | ||
87 | |||
88 | adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); | ||
89 | if (adev == NULL) { | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | dev->dev_private = (void *)adev; | ||
93 | |||
94 | if ((amdgpu_runtime_pm != 0) && | ||
95 | amdgpu_has_atpx() && | ||
96 | ((flags & AMDGPU_IS_APU) == 0)) | ||
97 | flags |= AMDGPU_IS_PX; | ||
98 | |||
99 | /* amdgpu_device_init should report only fatal error | ||
100 | * like memory allocation failure or iomapping failure, | ||
101 | * or memory manager initialization failure, it must | ||
102 | * properly initialize the GPU MC controller and permit | ||
103 | * VRAM allocation | ||
104 | */ | ||
105 | r = amdgpu_device_init(adev, dev, dev->pdev, flags); | ||
106 | if (r) { | ||
107 | dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | /* Call ACPI methods: require modeset init | ||
112 | * but failure is not fatal | ||
113 | */ | ||
114 | if (!r) { | ||
115 | acpi_status = amdgpu_acpi_init(adev); | ||
116 | if (acpi_status) | ||
117 | dev_dbg(&dev->pdev->dev, | ||
118 | "Error during ACPI methods call\n"); | ||
119 | } | ||
120 | |||
121 | if (amdgpu_device_is_px(dev)) { | ||
122 | pm_runtime_use_autosuspend(dev->dev); | ||
123 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | ||
124 | pm_runtime_set_active(dev->dev); | ||
125 | pm_runtime_allow(dev->dev); | ||
126 | pm_runtime_mark_last_busy(dev->dev); | ||
127 | pm_runtime_put_autosuspend(dev->dev); | ||
128 | } | ||
129 | |||
130 | out: | ||
131 | if (r) | ||
132 | amdgpu_driver_unload_kms(dev); | ||
133 | |||
134 | |||
135 | return r; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Userspace get information ioctl | ||
140 | */ | ||
141 | /** | ||
142 | * amdgpu_info_ioctl - answer a device specific request. | ||
143 | * | ||
144 | * @adev: amdgpu device pointer | ||
145 | * @data: request object | ||
146 | * @filp: drm filp | ||
147 | * | ||
148 | * This function is used to pass device specific parameters to the userspace | ||
149 | * drivers. Examples include: pci device id, pipeline parms, tiling params, | ||
150 | * etc. (all asics). | ||
151 | * Returns 0 on success, -EINVAL on failure. | ||
152 | */ | ||
153 | static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | ||
154 | { | ||
155 | struct amdgpu_device *adev = dev->dev_private; | ||
156 | struct drm_amdgpu_info *info = data; | ||
157 | struct amdgpu_mode_info *minfo = &adev->mode_info; | ||
158 | void __user *out = (void __user *)(long)info->return_pointer; | ||
159 | uint32_t size = info->return_size; | ||
160 | struct drm_crtc *crtc; | ||
161 | uint32_t ui32 = 0; | ||
162 | uint64_t ui64 = 0; | ||
163 | int i, found; | ||
164 | |||
165 | if (!info->return_size || !info->return_pointer) | ||
166 | return -EINVAL; | ||
167 | |||
168 | switch (info->query) { | ||
169 | case AMDGPU_INFO_ACCEL_WORKING: | ||
170 | ui32 = adev->accel_working; | ||
171 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; | ||
172 | case AMDGPU_INFO_CRTC_FROM_ID: | ||
173 | for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { | ||
174 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | ||
175 | if (crtc && crtc->base.id == info->mode_crtc.id) { | ||
176 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
177 | ui32 = amdgpu_crtc->crtc_id; | ||
178 | found = 1; | ||
179 | break; | ||
180 | } | ||
181 | } | ||
182 | if (!found) { | ||
183 | DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; | ||
187 | case AMDGPU_INFO_HW_IP_INFO: { | ||
188 | struct drm_amdgpu_info_hw_ip ip = {}; | ||
189 | enum amdgpu_ip_block_type type; | ||
190 | uint32_t ring_mask = 0; | ||
191 | |||
192 | if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
193 | return -EINVAL; | ||
194 | |||
195 | switch (info->query_hw_ip.type) { | ||
196 | case AMDGPU_HW_IP_GFX: | ||
197 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
198 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
199 | ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); | ||
200 | break; | ||
201 | case AMDGPU_HW_IP_COMPUTE: | ||
202 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
203 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
204 | ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); | ||
205 | break; | ||
206 | case AMDGPU_HW_IP_DMA: | ||
207 | type = AMDGPU_IP_BLOCK_TYPE_SDMA; | ||
208 | ring_mask = adev->sdma[0].ring.ready ? 1 : 0; | ||
209 | ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); | ||
210 | break; | ||
211 | case AMDGPU_HW_IP_UVD: | ||
212 | type = AMDGPU_IP_BLOCK_TYPE_UVD; | ||
213 | ring_mask = adev->uvd.ring.ready ? 1 : 0; | ||
214 | break; | ||
215 | case AMDGPU_HW_IP_VCE: | ||
216 | type = AMDGPU_IP_BLOCK_TYPE_VCE; | ||
217 | for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) | ||
218 | ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); | ||
219 | break; | ||
220 | default: | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
225 | if (adev->ip_blocks[i].type == type && | ||
226 | adev->ip_block_enabled[i]) { | ||
227 | ip.hw_ip_version_major = adev->ip_blocks[i].major; | ||
228 | ip.hw_ip_version_minor = adev->ip_blocks[i].minor; | ||
229 | ip.capabilities_flags = 0; | ||
230 | ip.available_rings = ring_mask; | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | return copy_to_user(out, &ip, | ||
235 | min((size_t)size, sizeof(ip))) ? -EFAULT : 0; | ||
236 | } | ||
237 | case AMDGPU_INFO_HW_IP_COUNT: { | ||
238 | enum amdgpu_ip_block_type type; | ||
239 | uint32_t count = 0; | ||
240 | |||
241 | switch (info->query_hw_ip.type) { | ||
242 | case AMDGPU_HW_IP_GFX: | ||
243 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
244 | break; | ||
245 | case AMDGPU_HW_IP_COMPUTE: | ||
246 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
247 | break; | ||
248 | case AMDGPU_HW_IP_DMA: | ||
249 | type = AMDGPU_IP_BLOCK_TYPE_SDMA; | ||
250 | break; | ||
251 | case AMDGPU_HW_IP_UVD: | ||
252 | type = AMDGPU_IP_BLOCK_TYPE_UVD; | ||
253 | break; | ||
254 | case AMDGPU_HW_IP_VCE: | ||
255 | type = AMDGPU_IP_BLOCK_TYPE_VCE; | ||
256 | break; | ||
257 | default: | ||
258 | return -EINVAL; | ||
259 | } | ||
260 | |||
261 | for (i = 0; i < adev->num_ip_blocks; i++) | ||
262 | if (adev->ip_blocks[i].type == type && | ||
263 | adev->ip_block_enabled[i] && | ||
264 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
265 | count++; | ||
266 | |||
267 | return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; | ||
268 | } | ||
269 | case AMDGPU_INFO_TIMESTAMP: | ||
270 | ui64 = amdgpu_asic_get_gpu_clock_counter(adev); | ||
271 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
272 | case AMDGPU_INFO_FW_VERSION: { | ||
273 | struct drm_amdgpu_info_firmware fw_info; | ||
274 | |||
275 | /* We only support one instance of each IP block right now. */ | ||
276 | if (info->query_fw.ip_instance != 0) | ||
277 | return -EINVAL; | ||
278 | |||
279 | switch (info->query_fw.fw_type) { | ||
280 | case AMDGPU_INFO_FW_VCE: | ||
281 | fw_info.ver = adev->vce.fw_version; | ||
282 | fw_info.feature = adev->vce.fb_version; | ||
283 | break; | ||
284 | case AMDGPU_INFO_FW_UVD: | ||
285 | fw_info.ver = 0; | ||
286 | fw_info.feature = 0; | ||
287 | break; | ||
288 | case AMDGPU_INFO_FW_GMC: | ||
289 | fw_info.ver = adev->mc.fw_version; | ||
290 | fw_info.feature = 0; | ||
291 | break; | ||
292 | case AMDGPU_INFO_FW_GFX_ME: | ||
293 | fw_info.ver = adev->gfx.me_fw_version; | ||
294 | fw_info.feature = 0; | ||
295 | break; | ||
296 | case AMDGPU_INFO_FW_GFX_PFP: | ||
297 | fw_info.ver = adev->gfx.pfp_fw_version; | ||
298 | fw_info.feature = 0; | ||
299 | break; | ||
300 | case AMDGPU_INFO_FW_GFX_CE: | ||
301 | fw_info.ver = adev->gfx.ce_fw_version; | ||
302 | fw_info.feature = 0; | ||
303 | break; | ||
304 | case AMDGPU_INFO_FW_GFX_RLC: | ||
305 | fw_info.ver = adev->gfx.rlc_fw_version; | ||
306 | fw_info.feature = 0; | ||
307 | break; | ||
308 | case AMDGPU_INFO_FW_GFX_MEC: | ||
309 | if (info->query_fw.index == 0) | ||
310 | fw_info.ver = adev->gfx.mec_fw_version; | ||
311 | else if (info->query_fw.index == 1) | ||
312 | fw_info.ver = adev->gfx.mec2_fw_version; | ||
313 | else | ||
314 | return -EINVAL; | ||
315 | fw_info.feature = 0; | ||
316 | break; | ||
317 | case AMDGPU_INFO_FW_SMC: | ||
318 | fw_info.ver = adev->pm.fw_version; | ||
319 | fw_info.feature = 0; | ||
320 | break; | ||
321 | case AMDGPU_INFO_FW_SDMA: | ||
322 | if (info->query_fw.index >= 2) | ||
323 | return -EINVAL; | ||
324 | fw_info.ver = adev->sdma[info->query_fw.index].fw_version; | ||
325 | fw_info.feature = 0; | ||
326 | break; | ||
327 | default: | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | return copy_to_user(out, &fw_info, | ||
331 | min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; | ||
332 | } | ||
333 | case AMDGPU_INFO_NUM_BYTES_MOVED: | ||
334 | ui64 = atomic64_read(&adev->num_bytes_moved); | ||
335 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
336 | case AMDGPU_INFO_VRAM_USAGE: | ||
337 | ui64 = atomic64_read(&adev->vram_usage); | ||
338 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
339 | case AMDGPU_INFO_VIS_VRAM_USAGE: | ||
340 | ui64 = atomic64_read(&adev->vram_vis_usage); | ||
341 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
342 | case AMDGPU_INFO_GTT_USAGE: | ||
343 | ui64 = atomic64_read(&adev->gtt_usage); | ||
344 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
345 | case AMDGPU_INFO_GDS_CONFIG: { | ||
346 | struct drm_amdgpu_info_gds gds_info; | ||
347 | |||
348 | gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT; | ||
349 | gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT; | ||
350 | gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT; | ||
351 | gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT; | ||
352 | gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT; | ||
353 | gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT; | ||
354 | gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT; | ||
355 | return copy_to_user(out, &gds_info, | ||
356 | min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; | ||
357 | } | ||
358 | case AMDGPU_INFO_VRAM_GTT: { | ||
359 | struct drm_amdgpu_info_vram_gtt vram_gtt; | ||
360 | |||
361 | vram_gtt.vram_size = adev->mc.real_vram_size; | ||
362 | vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; | ||
363 | vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; | ||
364 | vram_gtt.gtt_size = adev->mc.gtt_size; | ||
365 | vram_gtt.gtt_size -= adev->gart_pin_size; | ||
366 | return copy_to_user(out, &vram_gtt, | ||
367 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; | ||
368 | } | ||
369 | case AMDGPU_INFO_READ_MMR_REG: { | ||
370 | unsigned n, alloc_size = info->read_mmr_reg.count * 4; | ||
371 | uint32_t *regs; | ||
372 | unsigned se_num = (info->read_mmr_reg.instance >> | ||
373 | AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & | ||
374 | AMDGPU_INFO_MMR_SE_INDEX_MASK; | ||
375 | unsigned sh_num = (info->read_mmr_reg.instance >> | ||
376 | AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & | ||
377 | AMDGPU_INFO_MMR_SH_INDEX_MASK; | ||
378 | |||
379 | /* set full masks if the userspace set all bits | ||
380 | * in the bitfields */ | ||
381 | if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) | ||
382 | se_num = 0xffffffff; | ||
383 | if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) | ||
384 | sh_num = 0xffffffff; | ||
385 | |||
386 | regs = kmalloc(alloc_size, GFP_KERNEL); | ||
387 | if (!regs) | ||
388 | return -ENOMEM; | ||
389 | |||
390 | for (i = 0; i < info->read_mmr_reg.count; i++) | ||
391 | if (amdgpu_asic_read_register(adev, se_num, sh_num, | ||
392 | info->read_mmr_reg.dword_offset + i, | ||
393 | ®s[i])) { | ||
394 | DRM_DEBUG_KMS("unallowed offset %#x\n", | ||
395 | info->read_mmr_reg.dword_offset + i); | ||
396 | kfree(regs); | ||
397 | return -EFAULT; | ||
398 | } | ||
399 | n = copy_to_user(out, regs, min(size, alloc_size)); | ||
400 | kfree(regs); | ||
401 | return n ? -EFAULT : 0; | ||
402 | } | ||
403 | case AMDGPU_INFO_DEV_INFO: { | ||
404 | struct drm_amdgpu_info_device dev_info; | ||
405 | struct amdgpu_cu_info cu_info; | ||
406 | |||
407 | dev_info.device_id = dev->pdev->device; | ||
408 | dev_info.chip_rev = adev->rev_id; | ||
409 | dev_info.external_rev = adev->external_rev_id; | ||
410 | dev_info.pci_rev = dev->pdev->revision; | ||
411 | dev_info.family = adev->family; | ||
412 | dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; | ||
413 | dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; | ||
414 | /* return all clocks in KHz */ | ||
415 | dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; | ||
416 | if (adev->pm.dpm_enabled) | ||
417 | dev_info.max_engine_clock = | ||
418 | adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; | ||
419 | else | ||
420 | dev_info.max_engine_clock = adev->pm.default_sclk * 10; | ||
421 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; | ||
422 | dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * | ||
423 | adev->gfx.config.max_shader_engines; | ||
424 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; | ||
425 | dev_info._pad = 0; | ||
426 | dev_info.ids_flags = 0; | ||
427 | if (adev->flags & AMDGPU_IS_APU) | ||
428 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; | ||
429 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; | ||
430 | dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); | ||
431 | dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * | ||
432 | AMDGPU_GPU_PAGE_SIZE; | ||
433 | dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; | ||
434 | |||
435 | amdgpu_asic_get_cu_info(adev, &cu_info); | ||
436 | dev_info.cu_active_number = cu_info.number; | ||
437 | dev_info.cu_ao_mask = cu_info.ao_cu_mask; | ||
438 | memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); | ||
439 | |||
440 | return copy_to_user(out, &dev_info, | ||
441 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; | ||
442 | } | ||
443 | default: | ||
444 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); | ||
445 | return -EINVAL; | ||
446 | } | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | |||
451 | /* | ||
452 | * Outdated mess for old drm with Xorg being in charge (void function now). | ||
453 | */ | ||
454 | /** | ||
455 | * amdgpu_driver_firstopen_kms - drm callback for last close | ||
456 | * | ||
457 | * @dev: drm dev pointer | ||
458 | * | ||
459 | * Switch vga switcheroo state after last close (all asics). | ||
460 | */ | ||
461 | void amdgpu_driver_lastclose_kms(struct drm_device *dev) | ||
462 | { | ||
463 | vga_switcheroo_process_delayed_switch(); | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * amdgpu_driver_open_kms - drm callback for open | ||
468 | * | ||
469 | * @dev: drm dev pointer | ||
470 | * @file_priv: drm file | ||
471 | * | ||
472 | * On device open, init vm on cayman+ (all asics). | ||
473 | * Returns 0 on success, error on failure. | ||
474 | */ | ||
475 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | ||
476 | { | ||
477 | struct amdgpu_device *adev = dev->dev_private; | ||
478 | struct amdgpu_fpriv *fpriv; | ||
479 | int r; | ||
480 | |||
481 | file_priv->driver_priv = NULL; | ||
482 | |||
483 | r = pm_runtime_get_sync(dev->dev); | ||
484 | if (r < 0) | ||
485 | return r; | ||
486 | |||
487 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | ||
488 | if (unlikely(!fpriv)) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | r = amdgpu_vm_init(adev, &fpriv->vm); | ||
492 | if (r) | ||
493 | goto error_free; | ||
494 | |||
495 | mutex_init(&fpriv->bo_list_lock); | ||
496 | idr_init(&fpriv->bo_list_handles); | ||
497 | |||
498 | /* init context manager */ | ||
499 | mutex_init(&fpriv->ctx_mgr.hlock); | ||
500 | idr_init(&fpriv->ctx_mgr.ctx_handles); | ||
501 | fpriv->ctx_mgr.adev = adev; | ||
502 | |||
503 | file_priv->driver_priv = fpriv; | ||
504 | |||
505 | pm_runtime_mark_last_busy(dev->dev); | ||
506 | pm_runtime_put_autosuspend(dev->dev); | ||
507 | return 0; | ||
508 | |||
509 | error_free: | ||
510 | kfree(fpriv); | ||
511 | |||
512 | return r; | ||
513 | } | ||
514 | |||
515 | /** | ||
516 | * amdgpu_driver_postclose_kms - drm callback for post close | ||
517 | * | ||
518 | * @dev: drm dev pointer | ||
519 | * @file_priv: drm file | ||
520 | * | ||
521 | * On device post close, tear down vm on cayman+ (all asics). | ||
522 | */ | ||
523 | void amdgpu_driver_postclose_kms(struct drm_device *dev, | ||
524 | struct drm_file *file_priv) | ||
525 | { | ||
526 | struct amdgpu_device *adev = dev->dev_private; | ||
527 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | ||
528 | struct amdgpu_bo_list *list; | ||
529 | int handle; | ||
530 | |||
531 | if (!fpriv) | ||
532 | return; | ||
533 | |||
534 | amdgpu_vm_fini(adev, &fpriv->vm); | ||
535 | |||
536 | idr_for_each_entry(&fpriv->bo_list_handles, list, handle) | ||
537 | amdgpu_bo_list_free(list); | ||
538 | |||
539 | idr_destroy(&fpriv->bo_list_handles); | ||
540 | mutex_destroy(&fpriv->bo_list_lock); | ||
541 | |||
542 | /* release context */ | ||
543 | amdgpu_ctx_fini(fpriv); | ||
544 | |||
545 | kfree(fpriv); | ||
546 | file_priv->driver_priv = NULL; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * amdgpu_driver_preclose_kms - drm callback for pre close | ||
551 | * | ||
552 | * @dev: drm dev pointer | ||
553 | * @file_priv: drm file | ||
554 | * | ||
555 | * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx | ||
556 | * (all asics). | ||
557 | */ | ||
558 | void amdgpu_driver_preclose_kms(struct drm_device *dev, | ||
559 | struct drm_file *file_priv) | ||
560 | { | ||
561 | struct amdgpu_device *adev = dev->dev_private; | ||
562 | |||
563 | amdgpu_uvd_free_handles(adev, file_priv); | ||
564 | amdgpu_vce_free_handles(adev, file_priv); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * VBlank related functions. | ||
569 | */ | ||
570 | /** | ||
571 | * amdgpu_get_vblank_counter_kms - get frame count | ||
572 | * | ||
573 | * @dev: drm dev pointer | ||
574 | * @crtc: crtc to get the frame count from | ||
575 | * | ||
576 | * Gets the frame count on the requested crtc (all asics). | ||
577 | * Returns frame count on success, -EINVAL on failure. | ||
578 | */ | ||
579 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc) | ||
580 | { | ||
581 | struct amdgpu_device *adev = dev->dev_private; | ||
582 | |||
583 | if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { | ||
584 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | return amdgpu_display_vblank_get_counter(adev, crtc); | ||
589 | } | ||
590 | |||
591 | /** | ||
592 | * amdgpu_enable_vblank_kms - enable vblank interrupt | ||
593 | * | ||
594 | * @dev: drm dev pointer | ||
595 | * @crtc: crtc to enable vblank interrupt for | ||
596 | * | ||
597 | * Enable the interrupt on the requested crtc (all asics). | ||
598 | * Returns 0 on success, -EINVAL on failure. | ||
599 | */ | ||
600 | int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc) | ||
601 | { | ||
602 | struct amdgpu_device *adev = dev->dev_private; | ||
603 | int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
604 | |||
605 | return amdgpu_irq_get(adev, &adev->crtc_irq, idx); | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * amdgpu_disable_vblank_kms - disable vblank interrupt | ||
610 | * | ||
611 | * @dev: drm dev pointer | ||
612 | * @crtc: crtc to disable vblank interrupt for | ||
613 | * | ||
614 | * Disable the interrupt on the requested crtc (all asics). | ||
615 | */ | ||
616 | void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc) | ||
617 | { | ||
618 | struct amdgpu_device *adev = dev->dev_private; | ||
619 | int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
620 | |||
621 | amdgpu_irq_put(adev, &adev->crtc_irq, idx); | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * amdgpu_get_vblank_timestamp_kms - get vblank timestamp | ||
626 | * | ||
627 | * @dev: drm dev pointer | ||
628 | * @crtc: crtc to get the timestamp for | ||
629 | * @max_error: max error | ||
630 | * @vblank_time: time value | ||
631 | * @flags: flags passed to the driver | ||
632 | * | ||
633 | * Gets the timestamp on the requested crtc based on the | ||
634 | * scanout position. (all asics). | ||
635 | * Returns postive status flags on success, negative error on failure. | ||
636 | */ | ||
637 | int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
638 | int *max_error, | ||
639 | struct timeval *vblank_time, | ||
640 | unsigned flags) | ||
641 | { | ||
642 | struct drm_crtc *drmcrtc; | ||
643 | struct amdgpu_device *adev = dev->dev_private; | ||
644 | |||
645 | if (crtc < 0 || crtc >= dev->num_crtcs) { | ||
646 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
647 | return -EINVAL; | ||
648 | } | ||
649 | |||
650 | /* Get associated drm_crtc: */ | ||
651 | drmcrtc = &adev->mode_info.crtcs[crtc]->base; | ||
652 | |||
653 | /* Helper routine in DRM core does all the work: */ | ||
654 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | ||
655 | vblank_time, flags, | ||
656 | drmcrtc, &drmcrtc->hwmode); | ||
657 | } | ||
658 | |||
659 | const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { | ||
660 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
661 | DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
662 | DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
663 | /* KMS */ | ||
664 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
665 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
666 | DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
667 | DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
668 | DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
669 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
670 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
671 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
672 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
673 | }; | ||
674 | int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); | ||