diff options
| author | James Morris <james.l.morris@oracle.com> | 2017-07-24 20:44:18 -0400 |
|---|---|---|
| committer | James Morris <james.l.morris@oracle.com> | 2017-07-24 20:44:18 -0400 |
| commit | 53a2ebaaabc1eb8458796fec3bc1e0e80746b642 (patch) | |
| tree | 9d1f9227b49392cdd2edcc01057517da4f4b09c2 /drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |
| parent | 3cf29931453215536916d0c4da953fce1911ced3 (diff) | |
| parent | 520eccdfe187591a51ea9ab4c1a024ae4d0f68d9 (diff) | |
sync to Linus v4.13-rc2 for subsystem developers to work against
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 206 |
1 files changed, 206 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 19943356cca7..e26108aad3fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
| @@ -108,3 +108,209 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s | |||
| 108 | p = next + 1; | 108 | p = next + 1; |
| 109 | } | 109 | } |
| 110 | } | 110 | } |
| 111 | |||
| 112 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) | ||
| 113 | { | ||
| 114 | int i, queue, pipe, mec; | ||
| 115 | |||
| 116 | /* policy for amdgpu compute queue ownership */ | ||
| 117 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | ||
| 118 | queue = i % adev->gfx.mec.num_queue_per_pipe; | ||
| 119 | pipe = (i / adev->gfx.mec.num_queue_per_pipe) | ||
| 120 | % adev->gfx.mec.num_pipe_per_mec; | ||
| 121 | mec = (i / adev->gfx.mec.num_queue_per_pipe) | ||
| 122 | / adev->gfx.mec.num_pipe_per_mec; | ||
| 123 | |||
| 124 | /* we've run out of HW */ | ||
| 125 | if (mec >= adev->gfx.mec.num_mec) | ||
| 126 | break; | ||
| 127 | |||
| 128 | if (adev->gfx.mec.num_mec > 1) { | ||
| 129 | /* policy: amdgpu owns the first two queues of the first MEC */ | ||
| 130 | if (mec == 0 && queue < 2) | ||
| 131 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
| 132 | } else { | ||
| 133 | /* policy: amdgpu owns all queues in the first pipe */ | ||
| 134 | if (mec == 0 && pipe == 0) | ||
| 135 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | /* update the number of active compute rings */ | ||
| 140 | adev->gfx.num_compute_rings = | ||
| 141 | bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
| 142 | |||
| 143 | /* If you hit this case and edited the policy, you probably just | ||
| 144 | * need to increase AMDGPU_MAX_COMPUTE_RINGS */ | ||
| 145 | if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) | ||
| 146 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; | ||
| 147 | } | ||
| 148 | |||
| 149 | static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, | ||
| 150 | struct amdgpu_ring *ring) | ||
| 151 | { | ||
| 152 | int queue_bit; | ||
| 153 | int mec, pipe, queue; | ||
| 154 | |||
| 155 | queue_bit = adev->gfx.mec.num_mec | ||
| 156 | * adev->gfx.mec.num_pipe_per_mec | ||
| 157 | * adev->gfx.mec.num_queue_per_pipe; | ||
| 158 | |||
| 159 | while (queue_bit-- >= 0) { | ||
| 160 | if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) | ||
| 161 | continue; | ||
| 162 | |||
| 163 | amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue); | ||
| 164 | |||
| 165 | /* Using pipes 2/3 from MEC 2 seems cause problems */ | ||
| 166 | if (mec == 1 && pipe > 1) | ||
| 167 | continue; | ||
| 168 | |||
| 169 | ring->me = mec + 1; | ||
| 170 | ring->pipe = pipe; | ||
| 171 | ring->queue = queue; | ||
| 172 | |||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | dev_err(adev->dev, "Failed to find a queue for KIQ\n"); | ||
| 177 | return -EINVAL; | ||
| 178 | } | ||
| 179 | |||
| 180 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, | ||
| 181 | struct amdgpu_ring *ring, | ||
| 182 | struct amdgpu_irq_src *irq) | ||
| 183 | { | ||
| 184 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
| 185 | int r = 0; | ||
| 186 | |||
| 187 | mutex_init(&kiq->ring_mutex); | ||
| 188 | |||
| 189 | r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); | ||
| 190 | if (r) | ||
| 191 | return r; | ||
| 192 | |||
| 193 | ring->adev = NULL; | ||
| 194 | ring->ring_obj = NULL; | ||
| 195 | ring->use_doorbell = true; | ||
| 196 | ring->doorbell_index = AMDGPU_DOORBELL_KIQ; | ||
| 197 | |||
| 198 | r = amdgpu_gfx_kiq_acquire(adev, ring); | ||
| 199 | if (r) | ||
| 200 | return r; | ||
| 201 | |||
| 202 | ring->eop_gpu_addr = kiq->eop_gpu_addr; | ||
| 203 | sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); | ||
| 204 | r = amdgpu_ring_init(adev, ring, 1024, | ||
| 205 | irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); | ||
| 206 | if (r) | ||
| 207 | dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); | ||
| 208 | |||
| 209 | return r; | ||
| 210 | } | ||
| 211 | |||
| 212 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, | ||
| 213 | struct amdgpu_irq_src *irq) | ||
| 214 | { | ||
| 215 | amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs); | ||
| 216 | amdgpu_ring_fini(ring); | ||
| 217 | } | ||
| 218 | |||
| 219 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) | ||
| 220 | { | ||
| 221 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
| 222 | |||
| 223 | amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); | ||
| 224 | } | ||
| 225 | |||
| 226 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, | ||
| 227 | unsigned hpd_size) | ||
| 228 | { | ||
| 229 | int r; | ||
| 230 | u32 *hpd; | ||
| 231 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
| 232 | |||
| 233 | r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, | ||
| 234 | AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, | ||
| 235 | &kiq->eop_gpu_addr, (void **)&hpd); | ||
| 236 | if (r) { | ||
| 237 | dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); | ||
| 238 | return r; | ||
| 239 | } | ||
| 240 | |||
| 241 | memset(hpd, 0, hpd_size); | ||
| 242 | |||
| 243 | r = amdgpu_bo_reserve(kiq->eop_obj, true); | ||
| 244 | if (unlikely(r != 0)) | ||
| 245 | dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); | ||
| 246 | amdgpu_bo_kunmap(kiq->eop_obj); | ||
| 247 | amdgpu_bo_unreserve(kiq->eop_obj); | ||
| 248 | |||
| 249 | return 0; | ||
| 250 | } | ||
| 251 | |||
| 252 | /* create MQD for each compute queue */ | ||
| 253 | int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, | ||
| 254 | unsigned mqd_size) | ||
| 255 | { | ||
| 256 | struct amdgpu_ring *ring = NULL; | ||
| 257 | int r, i; | ||
| 258 | |||
| 259 | /* create MQD for KIQ */ | ||
| 260 | ring = &adev->gfx.kiq.ring; | ||
| 261 | if (!ring->mqd_obj) { | ||
| 262 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | ||
| 263 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, | ||
| 264 | &ring->mqd_gpu_addr, &ring->mqd_ptr); | ||
| 265 | if (r) { | ||
| 266 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); | ||
| 267 | return r; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* prepare MQD backup */ | ||
| 271 | adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); | ||
| 272 | if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) | ||
| 273 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); | ||
| 274 | } | ||
| 275 | |||
| 276 | /* create MQD for each KCQ */ | ||
| 277 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
| 278 | ring = &adev->gfx.compute_ring[i]; | ||
| 279 | if (!ring->mqd_obj) { | ||
| 280 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | ||
| 281 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, | ||
| 282 | &ring->mqd_gpu_addr, &ring->mqd_ptr); | ||
| 283 | if (r) { | ||
| 284 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); | ||
| 285 | return r; | ||
| 286 | } | ||
| 287 | |||
| 288 | /* prepare MQD backup */ | ||
| 289 | adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); | ||
| 290 | if (!adev->gfx.mec.mqd_backup[i]) | ||
| 291 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | return 0; | ||
| 296 | } | ||
| 297 | |||
| 298 | void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev) | ||
| 299 | { | ||
| 300 | struct amdgpu_ring *ring = NULL; | ||
| 301 | int i; | ||
| 302 | |||
| 303 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
| 304 | ring = &adev->gfx.compute_ring[i]; | ||
| 305 | kfree(adev->gfx.mec.mqd_backup[i]); | ||
| 306 | amdgpu_bo_free_kernel(&ring->mqd_obj, | ||
| 307 | &ring->mqd_gpu_addr, | ||
| 308 | &ring->mqd_ptr); | ||
| 309 | } | ||
| 310 | |||
| 311 | ring = &adev->gfx.kiq.ring; | ||
| 312 | kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); | ||
| 313 | amdgpu_bo_free_kernel(&ring->mqd_obj, | ||
| 314 | &ring->mqd_gpu_addr, | ||
| 315 | &ring->mqd_ptr); | ||
| 316 | } | ||
