aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
3 files changed, 62 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b316b301152f..85ef693850e7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1142,6 +1142,48 @@ struct r600_vram_scratch {
1142 u64 gpu_addr; 1142 u64 gpu_addr;
1143}; 1143};
1144 1144
1145
1146/*
1147 * Mutex which allows recursive locking from the same process.
1148 */
1149struct radeon_mutex {
1150 struct mutex mutex;
1151 struct task_struct *owner;
1152 int level;
1153};
1154
1155static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1156{
1157 mutex_init(&mutex->mutex);
1158 mutex->owner = NULL;
1159 mutex->level = 0;
1160}
1161
1162static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1163{
1164 if (mutex_trylock(&mutex->mutex)) {
1165 /* The mutex was unlocked before, so it's ours now */
1166 mutex->owner = current;
1167 } else if (mutex->owner != current) {
1168 /* Another process locked the mutex, take it */
1169 mutex_lock(&mutex->mutex);
1170 mutex->owner = current;
1171 }
1172 /* Otherwise the mutex was already locked by this process */
1173
1174 mutex->level++;
1175}
1176
1177static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1178{
1179 if (--mutex->level > 0)
1180 return;
1181
1182 mutex->owner = NULL;
1183 mutex_unlock(&mutex->mutex);
1184}
1185
1186
1145/* 1187/*
1146 * Core structure, functions and helpers. 1188 * Core structure, functions and helpers.
1147 */ 1189 */
@@ -1197,7 +1239,7 @@ struct radeon_device {
1197 struct radeon_gem gem; 1239 struct radeon_gem gem;
1198 struct radeon_pm pm; 1240 struct radeon_pm pm;
1199 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1241 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1200 struct mutex cs_mutex; 1242 struct radeon_mutex cs_mutex;
1201 struct radeon_wb wb; 1243 struct radeon_wb wb;
1202 struct radeon_dummy_page dummy_page; 1244 struct radeon_dummy_page dummy_page;
1203 bool gpu_lockup; 1245 bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..ccaa243c1442 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 222 struct radeon_cs_chunk *ib_chunk;
223 int r; 223 int r;
224 224
225 mutex_lock(&rdev->cs_mutex); 225 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 226 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 227 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 228 parser.filp = filp;
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 233 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 235 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 236 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 237 return r;
238 } 238 }
239 r = radeon_ib_get(rdev, &parser.ib); 239 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 240 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 241 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 242 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 243 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 244 return r;
245 } 245 }
246 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 249 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 251 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 252 return r;
253 } 253 }
254 /* Copy the packet into the IB, the parser will read from the 254 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 263 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 264 return r;
265 } 265 }
266 r = radeon_cs_finish_pages(&parser); 266 r = radeon_cs_finish_pages(&parser);
267 if (r) { 267 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 270 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 271 return r;
272 } 272 }
273 r = radeon_ib_schedule(rdev, parser.ib); 273 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 275 DRM_ERROR("Failed to schedule IB !\n");
276 } 276 }
277 radeon_cs_parser_fini(&parser, r); 277 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 278 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 279 return r;
280} 280}
281 281
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c33bc914d93d..c4d00a171411 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
955 int r; 955 int r;
956 int resched; 956 int resched;
957 957
958 /* Prevent CS ioctl from interfering */
959 radeon_mutex_lock(&rdev->cs_mutex);
960
958 radeon_save_bios_scratch_regs(rdev); 961 radeon_save_bios_scratch_regs(rdev);
959 /* block TTM */ 962 /* block TTM */
960 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 963 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
967 radeon_restore_bios_scratch_regs(rdev); 970 radeon_restore_bios_scratch_regs(rdev);
968 drm_helper_resume_force_mode(rdev->ddev); 971 drm_helper_resume_force_mode(rdev->ddev);
969 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 972 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
970 return 0;
971 } 973 }
972 /* bad news, how to tell it to userspace ? */ 974
973 dev_info(rdev->dev, "GPU reset failed\n"); 975 radeon_mutex_unlock(&rdev->cs_mutex);
976
977 if (r) {
978 /* bad news, how to tell it to userspace ? */
979 dev_info(rdev->dev, "GPU reset failed\n");
980 }
981
974 return r; 982 return r;
975} 983}
976 984