diff options
author | Christian König <christian.koenig@amd.com> | 2016-09-15 09:06:50 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-09-28 16:16:20 -0400 |
commit | 765e7fbf081d0e8bd22b35468a1c016358b46179 (patch) | |
tree | 47bf14c467cfec99984a0bd4283bf42b06df4063 /drivers/gpu | |
parent | 1927ffc0c19d7d42f775604f6984933bbb0d419b (diff) |
drm/amdgpu: rename all rbo variable to abo v2
Just to cleanup some radeon leftovers.
sed -i "s/rbo/abo/g" drivers/gpu/drm/amd/amdgpu/*.c
sed -i "s/rbo/abo/g" drivers/gpu/drm/amd/amdgpu/*.h
v2: rebased
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 12 |
12 files changed, 159 insertions, 159 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 869d6ebc2ea3..4f29f84efae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -703,7 +703,7 @@ struct amdgpu_flip_work { | |||
703 | u32 target_vblank; | 703 | u32 target_vblank; |
704 | uint64_t base; | 704 | uint64_t base; |
705 | struct drm_pending_vblank_event *event; | 705 | struct drm_pending_vblank_event *event; |
706 | struct amdgpu_bo *old_rbo; | 706 | struct amdgpu_bo *old_abo; |
707 | struct fence *excl; | 707 | struct fence *excl; |
708 | unsigned shared_count; | 708 | unsigned shared_count; |
709 | struct fence **shared; | 709 | struct fence **shared; |
@@ -2416,7 +2416,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | |||
2416 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 2416 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
2417 | u32 ip_instance, u32 ring, | 2417 | u32 ip_instance, u32 ring, |
2418 | struct amdgpu_ring **out_ring); | 2418 | struct amdgpu_ring **out_ring); |
2419 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); | 2419 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
2420 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); | 2420 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
2421 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); | 2421 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); |
2422 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | 2422 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 9af8d3c7ae8b..083e2b429872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -123,17 +123,17 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) | |||
123 | int r; | 123 | int r; |
124 | 124 | ||
125 | /* unpin of the old buffer */ | 125 | /* unpin of the old buffer */ |
126 | r = amdgpu_bo_reserve(work->old_rbo, false); | 126 | r = amdgpu_bo_reserve(work->old_abo, false); |
127 | if (likely(r == 0)) { | 127 | if (likely(r == 0)) { |
128 | r = amdgpu_bo_unpin(work->old_rbo); | 128 | r = amdgpu_bo_unpin(work->old_abo); |
129 | if (unlikely(r != 0)) { | 129 | if (unlikely(r != 0)) { |
130 | DRM_ERROR("failed to unpin buffer after flip\n"); | 130 | DRM_ERROR("failed to unpin buffer after flip\n"); |
131 | } | 131 | } |
132 | amdgpu_bo_unreserve(work->old_rbo); | 132 | amdgpu_bo_unreserve(work->old_abo); |
133 | } else | 133 | } else |
134 | DRM_ERROR("failed to reserve buffer after flip\n"); | 134 | DRM_ERROR("failed to reserve buffer after flip\n"); |
135 | 135 | ||
136 | amdgpu_bo_unref(&work->old_rbo); | 136 | amdgpu_bo_unref(&work->old_abo); |
137 | kfree(work->shared); | 137 | kfree(work->shared); |
138 | kfree(work); | 138 | kfree(work); |
139 | } | 139 | } |
@@ -150,7 +150,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
150 | struct amdgpu_framebuffer *new_amdgpu_fb; | 150 | struct amdgpu_framebuffer *new_amdgpu_fb; |
151 | struct drm_gem_object *obj; | 151 | struct drm_gem_object *obj; |
152 | struct amdgpu_flip_work *work; | 152 | struct amdgpu_flip_work *work; |
153 | struct amdgpu_bo *new_rbo; | 153 | struct amdgpu_bo *new_abo; |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | u64 tiling_flags; | 155 | u64 tiling_flags; |
156 | u64 base; | 156 | u64 base; |
@@ -173,28 +173,28 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
173 | obj = old_amdgpu_fb->obj; | 173 | obj = old_amdgpu_fb->obj; |
174 | 174 | ||
175 | /* take a reference to the old object */ | 175 | /* take a reference to the old object */ |
176 | work->old_rbo = gem_to_amdgpu_bo(obj); | 176 | work->old_abo = gem_to_amdgpu_bo(obj); |
177 | amdgpu_bo_ref(work->old_rbo); | 177 | amdgpu_bo_ref(work->old_abo); |
178 | 178 | ||
179 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | 179 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); |
180 | obj = new_amdgpu_fb->obj; | 180 | obj = new_amdgpu_fb->obj; |
181 | new_rbo = gem_to_amdgpu_bo(obj); | 181 | new_abo = gem_to_amdgpu_bo(obj); |
182 | 182 | ||
183 | /* pin the new buffer */ | 183 | /* pin the new buffer */ |
184 | r = amdgpu_bo_reserve(new_rbo, false); | 184 | r = amdgpu_bo_reserve(new_abo, false); |
185 | if (unlikely(r != 0)) { | 185 | if (unlikely(r != 0)) { |
186 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | 186 | DRM_ERROR("failed to reserve new abo buffer before flip\n"); |
187 | goto cleanup; | 187 | goto cleanup; |
188 | } | 188 | } |
189 | 189 | ||
190 | r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); | 190 | r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); |
191 | if (unlikely(r != 0)) { | 191 | if (unlikely(r != 0)) { |
192 | r = -EINVAL; | 192 | r = -EINVAL; |
193 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 193 | DRM_ERROR("failed to pin new abo buffer before flip\n"); |
194 | goto unreserve; | 194 | goto unreserve; |
195 | } | 195 | } |
196 | 196 | ||
197 | r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, | 197 | r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, |
198 | &work->shared_count, | 198 | &work->shared_count, |
199 | &work->shared); | 199 | &work->shared); |
200 | if (unlikely(r != 0)) { | 200 | if (unlikely(r != 0)) { |
@@ -202,8 +202,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
202 | goto unpin; | 202 | goto unpin; |
203 | } | 203 | } |
204 | 204 | ||
205 | amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); | 205 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); |
206 | amdgpu_bo_unreserve(new_rbo); | 206 | amdgpu_bo_unreserve(new_abo); |
207 | 207 | ||
208 | work->base = base; | 208 | work->base = base; |
209 | work->target_vblank = target - drm_crtc_vblank_count(crtc) + | 209 | work->target_vblank = target - drm_crtc_vblank_count(crtc) + |
@@ -231,19 +231,19 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, | |||
231 | return 0; | 231 | return 0; |
232 | 232 | ||
233 | pflip_cleanup: | 233 | pflip_cleanup: |
234 | if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { | 234 | if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { |
235 | DRM_ERROR("failed to reserve new rbo in error path\n"); | 235 | DRM_ERROR("failed to reserve new abo in error path\n"); |
236 | goto cleanup; | 236 | goto cleanup; |
237 | } | 237 | } |
238 | unpin: | 238 | unpin: |
239 | if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { | 239 | if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { |
240 | DRM_ERROR("failed to unpin new rbo in error path\n"); | 240 | DRM_ERROR("failed to unpin new abo in error path\n"); |
241 | } | 241 | } |
242 | unreserve: | 242 | unreserve: |
243 | amdgpu_bo_unreserve(new_rbo); | 243 | amdgpu_bo_unreserve(new_abo); |
244 | 244 | ||
245 | cleanup: | 245 | cleanup: |
246 | amdgpu_bo_unref(&work->old_rbo); | 246 | amdgpu_bo_unref(&work->old_abo); |
247 | fence_put(work->excl); | 247 | fence_put(work->excl); |
248 | for (i = 0; i < work->shared_count; ++i) | 248 | for (i = 0; i < work->shared_count; ++i) |
249 | fence_put(work->shared[i]); | 249 | fence_put(work->shared[i]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 3c527cc72bf0..aa4d15b20631 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -116,14 +116,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile | |||
116 | 116 | ||
117 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) | 117 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) |
118 | { | 118 | { |
119 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj); | 119 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); |
120 | int ret; | 120 | int ret; |
121 | 121 | ||
122 | ret = amdgpu_bo_reserve(rbo, false); | 122 | ret = amdgpu_bo_reserve(abo, false); |
123 | if (likely(ret == 0)) { | 123 | if (likely(ret == 0)) { |
124 | amdgpu_bo_kunmap(rbo); | 124 | amdgpu_bo_kunmap(abo); |
125 | amdgpu_bo_unpin(rbo); | 125 | amdgpu_bo_unpin(abo); |
126 | amdgpu_bo_unreserve(rbo); | 126 | amdgpu_bo_unreserve(abo); |
127 | } | 127 | } |
128 | drm_gem_object_unreference_unlocked(gobj); | 128 | drm_gem_object_unreference_unlocked(gobj); |
129 | } | 129 | } |
@@ -134,7 +134,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
134 | { | 134 | { |
135 | struct amdgpu_device *adev = rfbdev->adev; | 135 | struct amdgpu_device *adev = rfbdev->adev; |
136 | struct drm_gem_object *gobj = NULL; | 136 | struct drm_gem_object *gobj = NULL; |
137 | struct amdgpu_bo *rbo = NULL; | 137 | struct amdgpu_bo *abo = NULL; |
138 | bool fb_tiled = false; /* useful for testing */ | 138 | bool fb_tiled = false; /* useful for testing */ |
139 | u32 tiling_flags = 0; | 139 | u32 tiling_flags = 0; |
140 | int ret; | 140 | int ret; |
@@ -160,30 +160,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
160 | aligned_size); | 160 | aligned_size); |
161 | return -ENOMEM; | 161 | return -ENOMEM; |
162 | } | 162 | } |
163 | rbo = gem_to_amdgpu_bo(gobj); | 163 | abo = gem_to_amdgpu_bo(gobj); |
164 | 164 | ||
165 | if (fb_tiled) | 165 | if (fb_tiled) |
166 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); | 166 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); |
167 | 167 | ||
168 | ret = amdgpu_bo_reserve(rbo, false); | 168 | ret = amdgpu_bo_reserve(abo, false); |
169 | if (unlikely(ret != 0)) | 169 | if (unlikely(ret != 0)) |
170 | goto out_unref; | 170 | goto out_unref; |
171 | 171 | ||
172 | if (tiling_flags) { | 172 | if (tiling_flags) { |
173 | ret = amdgpu_bo_set_tiling_flags(rbo, | 173 | ret = amdgpu_bo_set_tiling_flags(abo, |
174 | tiling_flags); | 174 | tiling_flags); |
175 | if (ret) | 175 | if (ret) |
176 | dev_err(adev->dev, "FB failed to set tiling flags\n"); | 176 | dev_err(adev->dev, "FB failed to set tiling flags\n"); |
177 | } | 177 | } |
178 | 178 | ||
179 | 179 | ||
180 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); | 180 | ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); |
181 | if (ret) { | 181 | if (ret) { |
182 | amdgpu_bo_unreserve(rbo); | 182 | amdgpu_bo_unreserve(abo); |
183 | goto out_unref; | 183 | goto out_unref; |
184 | } | 184 | } |
185 | ret = amdgpu_bo_kmap(rbo, NULL); | 185 | ret = amdgpu_bo_kmap(abo, NULL); |
186 | amdgpu_bo_unreserve(rbo); | 186 | amdgpu_bo_unreserve(abo); |
187 | if (ret) { | 187 | if (ret) { |
188 | goto out_unref; | 188 | goto out_unref; |
189 | } | 189 | } |
@@ -205,7 +205,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
205 | struct drm_framebuffer *fb = NULL; | 205 | struct drm_framebuffer *fb = NULL; |
206 | struct drm_mode_fb_cmd2 mode_cmd; | 206 | struct drm_mode_fb_cmd2 mode_cmd; |
207 | struct drm_gem_object *gobj = NULL; | 207 | struct drm_gem_object *gobj = NULL; |
208 | struct amdgpu_bo *rbo = NULL; | 208 | struct amdgpu_bo *abo = NULL; |
209 | int ret; | 209 | int ret; |
210 | unsigned long tmp; | 210 | unsigned long tmp; |
211 | 211 | ||
@@ -224,7 +224,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
226 | 226 | ||
227 | rbo = gem_to_amdgpu_bo(gobj); | 227 | abo = gem_to_amdgpu_bo(gobj); |
228 | 228 | ||
229 | /* okay we have an object now allocate the framebuffer */ | 229 | /* okay we have an object now allocate the framebuffer */ |
230 | info = drm_fb_helper_alloc_fbi(helper); | 230 | info = drm_fb_helper_alloc_fbi(helper); |
@@ -247,7 +247,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
247 | /* setup helper */ | 247 | /* setup helper */ |
248 | rfbdev->helper.fb = fb; | 248 | rfbdev->helper.fb = fb; |
249 | 249 | ||
250 | memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); | 250 | memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo)); |
251 | 251 | ||
252 | strcpy(info->fix.id, "amdgpudrmfb"); | 252 | strcpy(info->fix.id, "amdgpudrmfb"); |
253 | 253 | ||
@@ -256,11 +256,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
256 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 256 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
257 | info->fbops = &amdgpufb_ops; | 257 | info->fbops = &amdgpufb_ops; |
258 | 258 | ||
259 | tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start; | 259 | tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; |
260 | info->fix.smem_start = adev->mc.aper_base + tmp; | 260 | info->fix.smem_start = adev->mc.aper_base + tmp; |
261 | info->fix.smem_len = amdgpu_bo_size(rbo); | 261 | info->fix.smem_len = amdgpu_bo_size(abo); |
262 | info->screen_base = rbo->kptr; | 262 | info->screen_base = abo->kptr; |
263 | info->screen_size = amdgpu_bo_size(rbo); | 263 | info->screen_size = amdgpu_bo_size(abo); |
264 | 264 | ||
265 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | 265 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
266 | 266 | ||
@@ -277,7 +277,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
277 | 277 | ||
278 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); | 278 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
279 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); | 279 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); |
280 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo)); | 280 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); |
281 | DRM_INFO("fb depth is %d\n", fb->depth); | 281 | DRM_INFO("fb depth is %d\n", fb->depth); |
282 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); | 282 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); |
283 | 283 | ||
@@ -287,7 +287,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
287 | out_destroy_fbi: | 287 | out_destroy_fbi: |
288 | drm_fb_helper_release_fbi(helper); | 288 | drm_fb_helper_release_fbi(helper); |
289 | out_unref: | 289 | out_unref: |
290 | if (rbo) { | 290 | if (abo) { |
291 | 291 | ||
292 | } | 292 | } |
293 | if (fb && ret) { | 293 | if (fb && ret) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index f9f911623917..a7ea9a3b454e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) | |||
118 | */ | 118 | */ |
119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | 119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) |
120 | { | 120 | { |
121 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); | 121 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); |
122 | struct amdgpu_device *adev = rbo->adev; | 122 | struct amdgpu_device *adev = abo->adev; |
123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
124 | struct amdgpu_vm *vm = &fpriv->vm; | 124 | struct amdgpu_vm *vm = &fpriv->vm; |
125 | struct amdgpu_bo_va *bo_va; | 125 | struct amdgpu_bo_va *bo_va; |
126 | int r; | 126 | int r; |
127 | r = amdgpu_bo_reserve(rbo, false); | 127 | r = amdgpu_bo_reserve(abo, false); |
128 | if (r) | 128 | if (r) |
129 | return r; | 129 | return r; |
130 | 130 | ||
131 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 131 | bo_va = amdgpu_vm_bo_find(vm, abo); |
132 | if (!bo_va) { | 132 | if (!bo_va) { |
133 | bo_va = amdgpu_vm_bo_add(adev, vm, rbo); | 133 | bo_va = amdgpu_vm_bo_add(adev, vm, abo); |
134 | } else { | 134 | } else { |
135 | ++bo_va->ref_count; | 135 | ++bo_va->ref_count; |
136 | } | 136 | } |
137 | amdgpu_bo_unreserve(rbo); | 137 | amdgpu_bo_unreserve(abo); |
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
@@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
547 | struct drm_gem_object *gobj; | 547 | struct drm_gem_object *gobj; |
548 | struct amdgpu_device *adev = dev->dev_private; | 548 | struct amdgpu_device *adev = dev->dev_private; |
549 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 549 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
550 | struct amdgpu_bo *rbo; | 550 | struct amdgpu_bo *abo; |
551 | struct amdgpu_bo_va *bo_va; | 551 | struct amdgpu_bo_va *bo_va; |
552 | struct ttm_validate_buffer tv, tv_pd; | 552 | struct ttm_validate_buffer tv, tv_pd; |
553 | struct ww_acquire_ctx ticket; | 553 | struct ww_acquire_ctx ticket; |
@@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
587 | gobj = drm_gem_object_lookup(filp, args->handle); | 587 | gobj = drm_gem_object_lookup(filp, args->handle); |
588 | if (gobj == NULL) | 588 | if (gobj == NULL) |
589 | return -ENOENT; | 589 | return -ENOENT; |
590 | rbo = gem_to_amdgpu_bo(gobj); | 590 | abo = gem_to_amdgpu_bo(gobj); |
591 | INIT_LIST_HEAD(&list); | 591 | INIT_LIST_HEAD(&list); |
592 | INIT_LIST_HEAD(&duplicates); | 592 | INIT_LIST_HEAD(&duplicates); |
593 | tv.bo = &rbo->tbo; | 593 | tv.bo = &abo->tbo; |
594 | tv.shared = true; | 594 | tv.shared = true; |
595 | list_add(&tv.head, &list); | 595 | list_add(&tv.head, &list); |
596 | 596 | ||
@@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
604 | return r; | 604 | return r; |
605 | } | 605 | } |
606 | 606 | ||
607 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 607 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); |
608 | if (!bo_va) { | 608 | if (!bo_va) { |
609 | ttm_eu_backoff_reservation(&ticket, &list); | 609 | ttm_eu_backoff_reservation(&ticket, &list); |
610 | drm_gem_object_unreference_unlocked(gobj); | 610 | drm_gem_object_unreference_unlocked(gobj); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 171131f360cd..aa074fac0c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -203,10 +203,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
203 | placement->busy_placement = places; | 203 | placement->busy_placement = places; |
204 | } | 204 | } |
205 | 205 | ||
206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) | 206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
207 | { | 207 | { |
208 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, | 208 | amdgpu_ttm_placement_init(abo->adev, &abo->placement, |
209 | rbo->placements, domain, rbo->flags); | 209 | abo->placements, domain, abo->flags); |
210 | } | 210 | } |
211 | 211 | ||
212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | 212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, |
@@ -849,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
850 | struct ttm_mem_reg *new_mem) | 850 | struct ttm_mem_reg *new_mem) |
851 | { | 851 | { |
852 | struct amdgpu_bo *rbo; | 852 | struct amdgpu_bo *abo; |
853 | struct ttm_mem_reg *old_mem = &bo->mem; | 853 | struct ttm_mem_reg *old_mem = &bo->mem; |
854 | 854 | ||
855 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | 855 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) |
856 | return; | 856 | return; |
857 | 857 | ||
858 | rbo = container_of(bo, struct amdgpu_bo, tbo); | 858 | abo = container_of(bo, struct amdgpu_bo, tbo); |
859 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); | 859 | amdgpu_vm_bo_invalidate(abo->adev, abo); |
860 | 860 | ||
861 | /* update statistics */ | 861 | /* update statistics */ |
862 | if (!new_mem) | 862 | if (!new_mem) |
863 | return; | 863 | return; |
864 | 864 | ||
865 | /* move_notify is called before move happens */ | 865 | /* move_notify is called before move happens */ |
866 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); | 866 | amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); |
867 | 867 | ||
868 | trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); | 868 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
869 | } | 869 | } |
870 | 870 | ||
871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e21e823f67a5..e30d3fda0f6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -195,7 +195,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | 195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
196 | struct ttm_placement *placement) | 196 | struct ttm_placement *placement) |
197 | { | 197 | { |
198 | struct amdgpu_bo *rbo; | 198 | struct amdgpu_bo *abo; |
199 | static struct ttm_place placements = { | 199 | static struct ttm_place placements = { |
200 | .fpfn = 0, | 200 | .fpfn = 0, |
201 | .lpfn = 0, | 201 | .lpfn = 0, |
@@ -210,43 +210,43 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
210 | placement->num_busy_placement = 1; | 210 | placement->num_busy_placement = 1; |
211 | return; | 211 | return; |
212 | } | 212 | } |
213 | rbo = container_of(bo, struct amdgpu_bo, tbo); | 213 | abo = container_of(bo, struct amdgpu_bo, tbo); |
214 | switch (bo->mem.mem_type) { | 214 | switch (bo->mem.mem_type) { |
215 | case TTM_PL_VRAM: | 215 | case TTM_PL_VRAM: |
216 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) { | 216 | if (abo->adev->mman.buffer_funcs_ring->ready == false) { |
217 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | 217 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
218 | } else { | 218 | } else { |
219 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | 219 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
220 | for (i = 0; i < rbo->placement.num_placement; ++i) { | 220 | for (i = 0; i < abo->placement.num_placement; ++i) { |
221 | if (!(rbo->placements[i].flags & | 221 | if (!(abo->placements[i].flags & |
222 | TTM_PL_FLAG_TT)) | 222 | TTM_PL_FLAG_TT)) |
223 | continue; | 223 | continue; |
224 | 224 | ||
225 | if (rbo->placements[i].lpfn) | 225 | if (abo->placements[i].lpfn) |
226 | continue; | 226 | continue; |
227 | 227 | ||
228 | /* set an upper limit to force directly | 228 | /* set an upper limit to force directly |
229 | * allocating address space for the BO. | 229 | * allocating address space for the BO. |
230 | */ | 230 | */ |
231 | rbo->placements[i].lpfn = | 231 | abo->placements[i].lpfn = |
232 | rbo->adev->mc.gtt_size >> PAGE_SHIFT; | 232 | abo->adev->mc.gtt_size >> PAGE_SHIFT; |
233 | } | 233 | } |
234 | } | 234 | } |
235 | break; | 235 | break; |
236 | case TTM_PL_TT: | 236 | case TTM_PL_TT: |
237 | default: | 237 | default: |
238 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | 238 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
239 | } | 239 | } |
240 | *placement = rbo->placement; | 240 | *placement = abo->placement; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | 243 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
244 | { | 244 | { |
245 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | 245 | struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); |
246 | 246 | ||
247 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) | 247 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
248 | return -EPERM; | 248 | return -EPERM; |
249 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); | 249 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, filp); |
250 | } | 250 | } |
251 | 251 | ||
252 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | 252 | static void amdgpu_move_null(struct ttm_buffer_object *bo, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 25dd58a65905..4656f1b0a6dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -351,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | 354 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) |
355 | { | 355 | { |
356 | int i; | 356 | int i; |
357 | for (i = 0; i < rbo->placement.num_placement; ++i) { | 357 | for (i = 0; i < abo->placement.num_placement; ++i) { |
358 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | 358 | abo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
359 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | 359 | abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 55e346e0d1dc..98f4bad3a5f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2107,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2107 | struct amdgpu_framebuffer *amdgpu_fb; | 2107 | struct amdgpu_framebuffer *amdgpu_fb; |
2108 | struct drm_framebuffer *target_fb; | 2108 | struct drm_framebuffer *target_fb; |
2109 | struct drm_gem_object *obj; | 2109 | struct drm_gem_object *obj; |
2110 | struct amdgpu_bo *rbo; | 2110 | struct amdgpu_bo *abo; |
2111 | uint64_t fb_location, tiling_flags; | 2111 | uint64_t fb_location, tiling_flags; |
2112 | uint32_t fb_format, fb_pitch_pixels; | 2112 | uint32_t fb_format, fb_pitch_pixels; |
2113 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | 2113 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); |
@@ -2134,23 +2134,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2134 | * just update base pointers | 2134 | * just update base pointers |
2135 | */ | 2135 | */ |
2136 | obj = amdgpu_fb->obj; | 2136 | obj = amdgpu_fb->obj; |
2137 | rbo = gem_to_amdgpu_bo(obj); | 2137 | abo = gem_to_amdgpu_bo(obj); |
2138 | r = amdgpu_bo_reserve(rbo, false); | 2138 | r = amdgpu_bo_reserve(abo, false); |
2139 | if (unlikely(r != 0)) | 2139 | if (unlikely(r != 0)) |
2140 | return r; | 2140 | return r; |
2141 | 2141 | ||
2142 | if (atomic) { | 2142 | if (atomic) { |
2143 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2143 | fb_location = amdgpu_bo_gpu_offset(abo); |
2144 | } else { | 2144 | } else { |
2145 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2145 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2146 | if (unlikely(r != 0)) { | 2146 | if (unlikely(r != 0)) { |
2147 | amdgpu_bo_unreserve(rbo); | 2147 | amdgpu_bo_unreserve(abo); |
2148 | return -EINVAL; | 2148 | return -EINVAL; |
2149 | } | 2149 | } |
2150 | } | 2150 | } |
2151 | 2151 | ||
2152 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2152 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2153 | amdgpu_bo_unreserve(rbo); | 2153 | amdgpu_bo_unreserve(abo); |
2154 | 2154 | ||
2155 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2155 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2156 | 2156 | ||
@@ -2324,12 +2324,12 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2324 | 2324 | ||
2325 | if (!atomic && fb && fb != crtc->primary->fb) { | 2325 | if (!atomic && fb && fb != crtc->primary->fb) { |
2326 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2326 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2327 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2327 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2328 | r = amdgpu_bo_reserve(rbo, false); | 2328 | r = amdgpu_bo_reserve(abo, false); |
2329 | if (unlikely(r != 0)) | 2329 | if (unlikely(r != 0)) |
2330 | return r; | 2330 | return r; |
2331 | amdgpu_bo_unpin(rbo); | 2331 | amdgpu_bo_unpin(abo); |
2332 | amdgpu_bo_unreserve(rbo); | 2332 | amdgpu_bo_unreserve(abo); |
2333 | } | 2333 | } |
2334 | 2334 | ||
2335 | /* Bytes per pixel may have changed */ | 2335 | /* Bytes per pixel may have changed */ |
@@ -2809,16 +2809,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) | |||
2809 | if (crtc->primary->fb) { | 2809 | if (crtc->primary->fb) { |
2810 | int r; | 2810 | int r; |
2811 | struct amdgpu_framebuffer *amdgpu_fb; | 2811 | struct amdgpu_framebuffer *amdgpu_fb; |
2812 | struct amdgpu_bo *rbo; | 2812 | struct amdgpu_bo *abo; |
2813 | 2813 | ||
2814 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2814 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2815 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2815 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2816 | r = amdgpu_bo_reserve(rbo, false); | 2816 | r = amdgpu_bo_reserve(abo, false); |
2817 | if (unlikely(r)) | 2817 | if (unlikely(r)) |
2818 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2818 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2819 | else { | 2819 | else { |
2820 | amdgpu_bo_unpin(rbo); | 2820 | amdgpu_bo_unpin(abo); |
2821 | amdgpu_bo_unreserve(rbo); | 2821 | amdgpu_bo_unreserve(abo); |
2822 | } | 2822 | } |
2823 | } | 2823 | } |
2824 | /* disable the GRPH */ | 2824 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 5f26024f7d3b..e8a6919baf46 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -2088,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2088 | struct amdgpu_framebuffer *amdgpu_fb; | 2088 | struct amdgpu_framebuffer *amdgpu_fb; |
2089 | struct drm_framebuffer *target_fb; | 2089 | struct drm_framebuffer *target_fb; |
2090 | struct drm_gem_object *obj; | 2090 | struct drm_gem_object *obj; |
2091 | struct amdgpu_bo *rbo; | 2091 | struct amdgpu_bo *abo; |
2092 | uint64_t fb_location, tiling_flags; | 2092 | uint64_t fb_location, tiling_flags; |
2093 | uint32_t fb_format, fb_pitch_pixels; | 2093 | uint32_t fb_format, fb_pitch_pixels; |
2094 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | 2094 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); |
@@ -2115,23 +2115,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2115 | * just update base pointers | 2115 | * just update base pointers |
2116 | */ | 2116 | */ |
2117 | obj = amdgpu_fb->obj; | 2117 | obj = amdgpu_fb->obj; |
2118 | rbo = gem_to_amdgpu_bo(obj); | 2118 | abo = gem_to_amdgpu_bo(obj); |
2119 | r = amdgpu_bo_reserve(rbo, false); | 2119 | r = amdgpu_bo_reserve(abo, false); |
2120 | if (unlikely(r != 0)) | 2120 | if (unlikely(r != 0)) |
2121 | return r; | 2121 | return r; |
2122 | 2122 | ||
2123 | if (atomic) { | 2123 | if (atomic) { |
2124 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2124 | fb_location = amdgpu_bo_gpu_offset(abo); |
2125 | } else { | 2125 | } else { |
2126 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2126 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2127 | if (unlikely(r != 0)) { | 2127 | if (unlikely(r != 0)) { |
2128 | amdgpu_bo_unreserve(rbo); | 2128 | amdgpu_bo_unreserve(abo); |
2129 | return -EINVAL; | 2129 | return -EINVAL; |
2130 | } | 2130 | } |
2131 | } | 2131 | } |
2132 | 2132 | ||
2133 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2133 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2134 | amdgpu_bo_unreserve(rbo); | 2134 | amdgpu_bo_unreserve(abo); |
2135 | 2135 | ||
2136 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2136 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2137 | 2137 | ||
@@ -2305,12 +2305,12 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2305 | 2305 | ||
2306 | if (!atomic && fb && fb != crtc->primary->fb) { | 2306 | if (!atomic && fb && fb != crtc->primary->fb) { |
2307 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2307 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2308 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2308 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2309 | r = amdgpu_bo_reserve(rbo, false); | 2309 | r = amdgpu_bo_reserve(abo, false); |
2310 | if (unlikely(r != 0)) | 2310 | if (unlikely(r != 0)) |
2311 | return r; | 2311 | return r; |
2312 | amdgpu_bo_unpin(rbo); | 2312 | amdgpu_bo_unpin(abo); |
2313 | amdgpu_bo_unreserve(rbo); | 2313 | amdgpu_bo_unreserve(abo); |
2314 | } | 2314 | } |
2315 | 2315 | ||
2316 | /* Bytes per pixel may have changed */ | 2316 | /* Bytes per pixel may have changed */ |
@@ -2825,16 +2825,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) | |||
2825 | if (crtc->primary->fb) { | 2825 | if (crtc->primary->fb) { |
2826 | int r; | 2826 | int r; |
2827 | struct amdgpu_framebuffer *amdgpu_fb; | 2827 | struct amdgpu_framebuffer *amdgpu_fb; |
2828 | struct amdgpu_bo *rbo; | 2828 | struct amdgpu_bo *abo; |
2829 | 2829 | ||
2830 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2830 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2831 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2831 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2832 | r = amdgpu_bo_reserve(rbo, false); | 2832 | r = amdgpu_bo_reserve(abo, false); |
2833 | if (unlikely(r)) | 2833 | if (unlikely(r)) |
2834 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2834 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2835 | else { | 2835 | else { |
2836 | amdgpu_bo_unpin(rbo); | 2836 | amdgpu_bo_unpin(abo); |
2837 | amdgpu_bo_unreserve(rbo); | 2837 | amdgpu_bo_unreserve(abo); |
2838 | } | 2838 | } |
2839 | } | 2839 | } |
2840 | /* disable the GRPH */ | 2840 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index fce9256fb066..42a4e7e745da 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -1533,7 +1533,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1533 | struct amdgpu_framebuffer *amdgpu_fb; | 1533 | struct amdgpu_framebuffer *amdgpu_fb; |
1534 | struct drm_framebuffer *target_fb; | 1534 | struct drm_framebuffer *target_fb; |
1535 | struct drm_gem_object *obj; | 1535 | struct drm_gem_object *obj; |
1536 | struct amdgpu_bo *rbo; | 1536 | struct amdgpu_bo *abo; |
1537 | uint64_t fb_location, tiling_flags; | 1537 | uint64_t fb_location, tiling_flags; |
1538 | uint32_t fb_format, fb_pitch_pixels, pipe_config; | 1538 | uint32_t fb_format, fb_pitch_pixels, pipe_config; |
1539 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | 1539 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); |
@@ -1560,23 +1560,23 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1560 | * just update base pointers | 1560 | * just update base pointers |
1561 | */ | 1561 | */ |
1562 | obj = amdgpu_fb->obj; | 1562 | obj = amdgpu_fb->obj; |
1563 | rbo = gem_to_amdgpu_bo(obj); | 1563 | abo = gem_to_amdgpu_bo(obj); |
1564 | r = amdgpu_bo_reserve(rbo, false); | 1564 | r = amdgpu_bo_reserve(abo, false); |
1565 | if (unlikely(r != 0)) | 1565 | if (unlikely(r != 0)) |
1566 | return r; | 1566 | return r; |
1567 | 1567 | ||
1568 | if (atomic) | 1568 | if (atomic) |
1569 | fb_location = amdgpu_bo_gpu_offset(rbo); | 1569 | fb_location = amdgpu_bo_gpu_offset(abo); |
1570 | else { | 1570 | else { |
1571 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 1571 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
1572 | if (unlikely(r != 0)) { | 1572 | if (unlikely(r != 0)) { |
1573 | amdgpu_bo_unreserve(rbo); | 1573 | amdgpu_bo_unreserve(abo); |
1574 | return -EINVAL; | 1574 | return -EINVAL; |
1575 | } | 1575 | } |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 1578 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
1579 | amdgpu_bo_unreserve(rbo); | 1579 | amdgpu_bo_unreserve(abo); |
1580 | 1580 | ||
1581 | switch (target_fb->pixel_format) { | 1581 | switch (target_fb->pixel_format) { |
1582 | case DRM_FORMAT_C8: | 1582 | case DRM_FORMAT_C8: |
@@ -1728,12 +1728,12 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
1728 | 1728 | ||
1729 | if (!atomic && fb && fb != crtc->primary->fb) { | 1729 | if (!atomic && fb && fb != crtc->primary->fb) { |
1730 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 1730 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
1731 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 1731 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
1732 | r = amdgpu_bo_reserve(rbo, false); | 1732 | r = amdgpu_bo_reserve(abo, false); |
1733 | if (unlikely(r != 0)) | 1733 | if (unlikely(r != 0)) |
1734 | return r; | 1734 | return r; |
1735 | amdgpu_bo_unpin(rbo); | 1735 | amdgpu_bo_unpin(abo); |
1736 | amdgpu_bo_unreserve(rbo); | 1736 | amdgpu_bo_unreserve(abo); |
1737 | } | 1737 | } |
1738 | 1738 | ||
1739 | /* Bytes per pixel may have changed */ | 1739 | /* Bytes per pixel may have changed */ |
@@ -2181,16 +2181,16 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) | |||
2181 | if (crtc->primary->fb) { | 2181 | if (crtc->primary->fb) { |
2182 | int r; | 2182 | int r; |
2183 | struct amdgpu_framebuffer *amdgpu_fb; | 2183 | struct amdgpu_framebuffer *amdgpu_fb; |
2184 | struct amdgpu_bo *rbo; | 2184 | struct amdgpu_bo *abo; |
2185 | 2185 | ||
2186 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2186 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2187 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2187 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2188 | r = amdgpu_bo_reserve(rbo, false); | 2188 | r = amdgpu_bo_reserve(abo, false); |
2189 | if (unlikely(r)) | 2189 | if (unlikely(r)) |
2190 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2190 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2191 | else { | 2191 | else { |
2192 | amdgpu_bo_unpin(rbo); | 2192 | amdgpu_bo_unpin(abo); |
2193 | amdgpu_bo_unreserve(rbo); | 2193 | amdgpu_bo_unreserve(abo); |
2194 | } | 2194 | } |
2195 | } | 2195 | } |
2196 | /* disable the GRPH */ | 2196 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c6c4e073028e..c5b286617da4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2022,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2022 | struct amdgpu_framebuffer *amdgpu_fb; | 2022 | struct amdgpu_framebuffer *amdgpu_fb; |
2023 | struct drm_framebuffer *target_fb; | 2023 | struct drm_framebuffer *target_fb; |
2024 | struct drm_gem_object *obj; | 2024 | struct drm_gem_object *obj; |
2025 | struct amdgpu_bo *rbo; | 2025 | struct amdgpu_bo *abo; |
2026 | uint64_t fb_location, tiling_flags; | 2026 | uint64_t fb_location, tiling_flags; |
2027 | uint32_t fb_format, fb_pitch_pixels; | 2027 | uint32_t fb_format, fb_pitch_pixels; |
2028 | u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); | 2028 | u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); |
@@ -2049,23 +2049,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2049 | * just update base pointers | 2049 | * just update base pointers |
2050 | */ | 2050 | */ |
2051 | obj = amdgpu_fb->obj; | 2051 | obj = amdgpu_fb->obj; |
2052 | rbo = gem_to_amdgpu_bo(obj); | 2052 | abo = gem_to_amdgpu_bo(obj); |
2053 | r = amdgpu_bo_reserve(rbo, false); | 2053 | r = amdgpu_bo_reserve(abo, false); |
2054 | if (unlikely(r != 0)) | 2054 | if (unlikely(r != 0)) |
2055 | return r; | 2055 | return r; |
2056 | 2056 | ||
2057 | if (atomic) { | 2057 | if (atomic) { |
2058 | fb_location = amdgpu_bo_gpu_offset(rbo); | 2058 | fb_location = amdgpu_bo_gpu_offset(abo); |
2059 | } else { | 2059 | } else { |
2060 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | 2060 | r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); |
2061 | if (unlikely(r != 0)) { | 2061 | if (unlikely(r != 0)) { |
2062 | amdgpu_bo_unreserve(rbo); | 2062 | amdgpu_bo_unreserve(abo); |
2063 | return -EINVAL; | 2063 | return -EINVAL; |
2064 | } | 2064 | } |
2065 | } | 2065 | } |
2066 | 2066 | ||
2067 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | 2067 | amdgpu_bo_get_tiling_flags(abo, &tiling_flags); |
2068 | amdgpu_bo_unreserve(rbo); | 2068 | amdgpu_bo_unreserve(abo); |
2069 | 2069 | ||
2070 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | 2070 | pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
2071 | 2071 | ||
@@ -2220,12 +2220,12 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, | |||
2220 | 2220 | ||
2221 | if (!atomic && fb && fb != crtc->primary->fb) { | 2221 | if (!atomic && fb && fb != crtc->primary->fb) { |
2222 | amdgpu_fb = to_amdgpu_framebuffer(fb); | 2222 | amdgpu_fb = to_amdgpu_framebuffer(fb); |
2223 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2223 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2224 | r = amdgpu_bo_reserve(rbo, false); | 2224 | r = amdgpu_bo_reserve(abo, false); |
2225 | if (unlikely(r != 0)) | 2225 | if (unlikely(r != 0)) |
2226 | return r; | 2226 | return r; |
2227 | amdgpu_bo_unpin(rbo); | 2227 | amdgpu_bo_unpin(abo); |
2228 | amdgpu_bo_unreserve(rbo); | 2228 | amdgpu_bo_unreserve(abo); |
2229 | } | 2229 | } |
2230 | 2230 | ||
2231 | /* Bytes per pixel may have changed */ | 2231 | /* Bytes per pixel may have changed */ |
@@ -2697,16 +2697,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) | |||
2697 | if (crtc->primary->fb) { | 2697 | if (crtc->primary->fb) { |
2698 | int r; | 2698 | int r; |
2699 | struct amdgpu_framebuffer *amdgpu_fb; | 2699 | struct amdgpu_framebuffer *amdgpu_fb; |
2700 | struct amdgpu_bo *rbo; | 2700 | struct amdgpu_bo *abo; |
2701 | 2701 | ||
2702 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 2702 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
2703 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 2703 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
2704 | r = amdgpu_bo_reserve(rbo, false); | 2704 | r = amdgpu_bo_reserve(abo, false); |
2705 | if (unlikely(r)) | 2705 | if (unlikely(r)) |
2706 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 2706 | DRM_ERROR("failed to reserve abo before unpin\n"); |
2707 | else { | 2707 | else { |
2708 | amdgpu_bo_unpin(rbo); | 2708 | amdgpu_bo_unpin(abo); |
2709 | amdgpu_bo_unreserve(rbo); | 2709 | amdgpu_bo_unreserve(abo); |
2710 | } | 2710 | } |
2711 | } | 2711 | } |
2712 | /* disable the GRPH */ | 2712 | /* disable the GRPH */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 2d02acd55829..23ff9f206fb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -229,16 +229,16 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) | |||
229 | if (crtc->primary->fb) { | 229 | if (crtc->primary->fb) { |
230 | int r; | 230 | int r; |
231 | struct amdgpu_framebuffer *amdgpu_fb; | 231 | struct amdgpu_framebuffer *amdgpu_fb; |
232 | struct amdgpu_bo *rbo; | 232 | struct amdgpu_bo *abo; |
233 | 233 | ||
234 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | 234 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); |
235 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | 235 | abo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
236 | r = amdgpu_bo_reserve(rbo, false); | 236 | r = amdgpu_bo_reserve(abo, false); |
237 | if (unlikely(r)) | 237 | if (unlikely(r)) |
238 | DRM_ERROR("failed to reserve rbo before unpin\n"); | 238 | DRM_ERROR("failed to reserve abo before unpin\n"); |
239 | else { | 239 | else { |
240 | amdgpu_bo_unpin(rbo); | 240 | amdgpu_bo_unpin(abo); |
241 | amdgpu_bo_unreserve(rbo); | 241 | amdgpu_bo_unreserve(abo); |
242 | } | 242 | } |
243 | } | 243 | } |
244 | 244 | ||