aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-25 02:02:06 -0400
committerDave Airlie <airlied@redhat.com>2016-03-25 02:02:06 -0400
commit4604202ca8d2a5e33b4bca0812b5d92975ed1bd8 (patch)
tree520ea356b99bf0e8b314bafd1f09d0967fb3cd0a
parent17efca93c8728445522dedafc033b3384a26a39d (diff)
parent1135035d9275ef9aecad23fc715a69ff78904adf (diff)
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
some amd fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: drm/radeon/mst: cleanup code indentation drm/radeon/mst: fix regression in lane/link handling. drm/amdgpu: add invalidate_page callback for userptrs drm/amdgpu: Revert "remove the userptr rmn->lock" drm/amdgpu: clean up path handling for powerplay drm/amd/powerplay: fix memory leak of tdp_table
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c120
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c22
4 files changed, 102 insertions, 58 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d7ec9bd6755f..9f4a45cd2aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,7 +48,8 @@ struct amdgpu_mn {
48 /* protected by adev->mn_lock */ 48 /* protected by adev->mn_lock */
49 struct hlist_node node; 49 struct hlist_node node;
50 50
51 /* objects protected by mm->mmap_sem */ 51 /* objects protected by lock */
52 struct mutex lock;
52 struct rb_root objects; 53 struct rb_root objects;
53}; 54};
54 55
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
72 struct amdgpu_bo *bo, *next_bo; 73 struct amdgpu_bo *bo, *next_bo;
73 74
74 mutex_lock(&adev->mn_lock); 75 mutex_lock(&adev->mn_lock);
75 down_write(&rmn->mm->mmap_sem); 76 mutex_lock(&rmn->lock);
76 hash_del(&rmn->node); 77 hash_del(&rmn->node);
77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, 78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
78 it.rb) { 79 it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
82 } 83 }
83 kfree(node); 84 kfree(node);
84 } 85 }
85 up_write(&rmn->mm->mmap_sem); 86 mutex_unlock(&rmn->lock);
86 mutex_unlock(&adev->mn_lock); 87 mutex_unlock(&adev->mn_lock);
87 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 88 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
88 kfree(rmn); 89 kfree(rmn);
@@ -105,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
105} 106}
106 107
107/** 108/**
109 * amdgpu_mn_invalidate_node - unmap all BOs of a node
110 *
111 * @node: the node with the BOs to unmap
112 *
113 * We block for all BOs and unmap them by move them
114 * into system domain again.
115 */
116static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
117 unsigned long start,
118 unsigned long end)
119{
120 struct amdgpu_bo *bo;
121 long r;
122
123 list_for_each_entry(bo, &node->bos, mn_list) {
124
125 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
126 continue;
127
128 r = amdgpu_bo_reserve(bo, true);
129 if (r) {
130 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
131 continue;
132 }
133
134 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
135 true, false, MAX_SCHEDULE_TIMEOUT);
136 if (r <= 0)
137 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
138
139 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
140 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
141 if (r)
142 DRM_ERROR("(%ld) failed to validate user bo\n", r);
143
144 amdgpu_bo_unreserve(bo);
145 }
146}
147
148/**
149 * amdgpu_mn_invalidate_page - callback to notify about mm change
150 *
151 * @mn: our notifier
152 * @mn: the mm this callback is about
153 * @address: address of invalidate page
154 *
155 * Invalidation of a single page. Blocks for all BOs mapping it
156 * and unmap them by move them into system domain again.
157 */
158static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
159 struct mm_struct *mm,
160 unsigned long address)
161{
162 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
163 struct interval_tree_node *it;
164
165 mutex_lock(&rmn->lock);
166
167 it = interval_tree_iter_first(&rmn->objects, address, address);
168 if (it) {
169 struct amdgpu_mn_node *node;
170
171 node = container_of(it, struct amdgpu_mn_node, it);
172 amdgpu_mn_invalidate_node(node, address, address);
173 }
174
175 mutex_unlock(&rmn->lock);
176}
177
178/**
108 * amdgpu_mn_invalidate_range_start - callback to notify about mm change 179 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
109 * 180 *
110 * @mn: our notifier 181 * @mn: our notifier
@@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
126 /* notification is exclusive, but interval is inclusive */ 197 /* notification is exclusive, but interval is inclusive */
127 end -= 1; 198 end -= 1;
128 199
200 mutex_lock(&rmn->lock);
201
129 it = interval_tree_iter_first(&rmn->objects, start, end); 202 it = interval_tree_iter_first(&rmn->objects, start, end);
130 while (it) { 203 while (it) {
131 struct amdgpu_mn_node *node; 204 struct amdgpu_mn_node *node;
132 struct amdgpu_bo *bo;
133 long r;
134 205
135 node = container_of(it, struct amdgpu_mn_node, it); 206 node = container_of(it, struct amdgpu_mn_node, it);
136 it = interval_tree_iter_next(it, start, end); 207 it = interval_tree_iter_next(it, start, end);
137 208
138 list_for_each_entry(bo, &node->bos, mn_list) { 209 amdgpu_mn_invalidate_node(node, start, end);
139
140 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
141 end))
142 continue;
143
144 r = amdgpu_bo_reserve(bo, true);
145 if (r) {
146 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
147 continue;
148 }
149
150 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
151 true, false, MAX_SCHEDULE_TIMEOUT);
152 if (r <= 0)
153 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
154
155 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
156 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
157 if (r)
158 DRM_ERROR("(%ld) failed to validate user bo\n", r);
159
160 amdgpu_bo_unreserve(bo);
161 }
162 } 210 }
211
212 mutex_unlock(&rmn->lock);
163} 213}
164 214
165static const struct mmu_notifier_ops amdgpu_mn_ops = { 215static const struct mmu_notifier_ops amdgpu_mn_ops = {
166 .release = amdgpu_mn_release, 216 .release = amdgpu_mn_release,
217 .invalidate_page = amdgpu_mn_invalidate_page,
167 .invalidate_range_start = amdgpu_mn_invalidate_range_start, 218 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
168}; 219};
169 220
@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
196 rmn->adev = adev; 247 rmn->adev = adev;
197 rmn->mm = mm; 248 rmn->mm = mm;
198 rmn->mn.ops = &amdgpu_mn_ops; 249 rmn->mn.ops = &amdgpu_mn_ops;
250 mutex_init(&rmn->lock);
199 rmn->objects = RB_ROOT; 251 rmn->objects = RB_ROOT;
200 252
201 r = __mmu_notifier_register(&rmn->mn, mm); 253 r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
242 294
243 INIT_LIST_HEAD(&bos); 295 INIT_LIST_HEAD(&bos);
244 296
245 down_write(&rmn->mm->mmap_sem); 297 mutex_lock(&rmn->lock);
246 298
247 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 299 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
248 kfree(node); 300 kfree(node);
@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
256 if (!node) { 308 if (!node) {
257 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); 309 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
258 if (!node) { 310 if (!node) {
259 up_write(&rmn->mm->mmap_sem); 311 mutex_unlock(&rmn->lock);
260 return -ENOMEM; 312 return -ENOMEM;
261 } 313 }
262 } 314 }
@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
271 323
272 interval_tree_insert(&node->it, &rmn->objects); 324 interval_tree_insert(&node->it, &rmn->objects);
273 325
274 up_write(&rmn->mm->mmap_sem); 326 mutex_unlock(&rmn->lock);
275 327
276 return 0; 328 return 0;
277} 329}
@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
297 return; 349 return;
298 } 350 }
299 351
300 down_write(&rmn->mm->mmap_sem); 352 mutex_lock(&rmn->lock);
301 353
302 /* save the next list entry for later */ 354 /* save the next list entry for later */
303 head = bo->mn_list.next; 355 head = bo->mn_list.next;
@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
312 kfree(node); 364 kfree(node);
313 } 365 }
314 366
315 up_write(&rmn->mm->mmap_sem); 367 mutex_unlock(&rmn->lock);
316 mutex_unlock(&adev->mn_lock); 368 mutex_unlock(&adev->mn_lock);
317} 369}
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index e195bf59da86..043e6ebab575 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,17 +1,17 @@
1 1
2subdir-ccflags-y += -Iinclude/drm \ 2subdir-ccflags-y += -Iinclude/drm \
3 -Idrivers/gpu/drm/amd/powerplay/inc/ \ 3 -I$(FULL_AMD_PATH)/powerplay/inc/ \
4 -Idrivers/gpu/drm/amd/include/asic_reg \ 4 -I$(FULL_AMD_PATH)/include/asic_reg \
5 -Idrivers/gpu/drm/amd/include \ 5 -I$(FULL_AMD_PATH)/include \
6 -Idrivers/gpu/drm/amd/powerplay/smumgr\ 6 -I$(FULL_AMD_PATH)/powerplay/smumgr\
7 -Idrivers/gpu/drm/amd/powerplay/hwmgr \ 7 -I$(FULL_AMD_PATH)/powerplay/hwmgr \
8 -Idrivers/gpu/drm/amd/powerplay/eventmgr 8 -I$(FULL_AMD_PATH)/powerplay/eventmgr
9 9
10AMD_PP_PATH = ../powerplay 10AMD_PP_PATH = ../powerplay
11 11
12PP_LIBS = smumgr hwmgr eventmgr 12PP_LIBS = smumgr hwmgr eventmgr
13 13
14AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS))) 14AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
15 15
16include $(AMD_POWERPLAY) 16include $(AMD_POWERPLAY)
17 17
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 34f4bef3691f..b156481b50e8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -512,8 +512,10 @@ static int get_cac_tdp_table(
512 512
513 hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); 513 hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
514 514
515 if (NULL == hwmgr->dyn_state.cac_dtp_table) 515 if (NULL == hwmgr->dyn_state.cac_dtp_table) {
516 kfree(tdp_table);
516 return -ENOMEM; 517 return -ENOMEM;
518 }
517 519
518 memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); 520 memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
519 521
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index df7a1719c841..43cffb526b0c 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
510{ 510{
511 struct radeon_encoder_mst *mst_enc; 511 struct radeon_encoder_mst *mst_enc;
512 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 512 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
513 struct radeon_connector_atom_dig *dig_connector;
513 int bpp = 24; 514 int bpp = 24;
514 515
515 mst_enc = radeon_encoder->enc_priv; 516 mst_enc = radeon_encoder->enc_priv;
@@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
523 524
524 525
525 drm_mode_set_crtcinfo(adjusted_mode, 0); 526 drm_mode_set_crtcinfo(adjusted_mode, 0);
526 { 527 dig_connector = mst_enc->connector->con_priv;
527 struct radeon_connector_atom_dig *dig_connector; 528 dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
528 int ret; 529 dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
529 530 DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
530 dig_connector = mst_enc->connector->con_priv; 531 dig_connector->dp_lane_count, dig_connector->dp_clock);
531 ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
532 dig_connector->dpcd, adjusted_mode->clock,
533 &dig_connector->dp_lane_count,
534 &dig_connector->dp_clock);
535 if (ret) {
536 dig_connector->dp_lane_count = 0;
537 dig_connector->dp_clock = 0;
538 }
539 DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
540 dig_connector->dp_lane_count, dig_connector->dp_clock);
541 }
542 return true; 532 return true;
543} 533}
544 534