diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-10-10 06:44:43 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-10-10 06:44:43 -0400 |
commit | 967ad7f1489da7babbe0746f81c283458ecd3f84 (patch) | |
tree | 812608fd6efcfe81096bd51b1ec1c2a4167385f6 | |
parent | d7bf63f2465b3b6335dd66ffbf387768d81a59d5 (diff) | |
parent | 6aba5b6cf098ba305fc31b23cc14114a16768d22 (diff) |
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next
The conflict in intel_drv.h tripped me up a bit since a patch in dinq
moves all the functions around, but another one in drm-next removes a
single function. So I'ev figured backing this into a backmerge would
be good.
i915_dma.c is just adjacent lines changed, nothing nefarious there.
Conflicts:
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/intel_drv.h
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
85 files changed, 625 insertions, 873 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 32e270dc714e..5137f15dba19 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c | |||
@@ -211,7 +211,6 @@ static struct drm_driver driver = { | |||
211 | .minor = DRIVER_MINOR, | 211 | .minor = DRIVER_MINOR, |
212 | .patchlevel = DRIVER_PATCHLEVEL, | 212 | .patchlevel = DRIVER_PATCHLEVEL, |
213 | 213 | ||
214 | .gem_init_object = ast_gem_init_object, | ||
215 | .gem_free_object = ast_gem_free_object, | 214 | .gem_free_object = ast_gem_free_object, |
216 | .dumb_create = ast_dumb_create, | 215 | .dumb_create = ast_dumb_create, |
217 | .dumb_map_offset = ast_dumb_mmap_offset, | 216 | .dumb_map_offset = ast_dumb_mmap_offset, |
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 8492b68e873c..9833a1b1acc1 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h | |||
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file, | |||
323 | struct drm_device *dev, | 323 | struct drm_device *dev, |
324 | struct drm_mode_create_dumb *args); | 324 | struct drm_mode_create_dumb *args); |
325 | 325 | ||
326 | extern int ast_gem_init_object(struct drm_gem_object *obj); | ||
327 | extern void ast_gem_free_object(struct drm_gem_object *obj); | 326 | extern void ast_gem_free_object(struct drm_gem_object *obj); |
328 | extern int ast_dumb_mmap_offset(struct drm_file *file, | 327 | extern int ast_dumb_mmap_offset(struct drm_file *file, |
329 | struct drm_device *dev, | 328 | struct drm_device *dev, |
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 7f6152d374ca..af0b868a9dfd 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c | |||
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file, | |||
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
451 | 451 | ||
452 | int ast_gem_init_object(struct drm_gem_object *obj) | ||
453 | { | ||
454 | BUG(); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | void ast_bo_unref(struct ast_bo **bo) | 452 | void ast_bo_unref(struct ast_bo **bo) |
459 | { | 453 | { |
460 | struct ttm_buffer_object *tbo; | 454 | struct ttm_buffer_object *tbo; |
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 138364d91782..953fc8aea69c 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c | |||
@@ -97,7 +97,6 @@ static struct drm_driver driver = { | |||
97 | .major = DRIVER_MAJOR, | 97 | .major = DRIVER_MAJOR, |
98 | .minor = DRIVER_MINOR, | 98 | .minor = DRIVER_MINOR, |
99 | .patchlevel = DRIVER_PATCHLEVEL, | 99 | .patchlevel = DRIVER_PATCHLEVEL, |
100 | .gem_init_object = cirrus_gem_init_object, | ||
101 | .gem_free_object = cirrus_gem_free_object, | 100 | .gem_free_object = cirrus_gem_free_object, |
102 | .dumb_create = cirrus_dumb_create, | 101 | .dumb_create = cirrus_dumb_create, |
103 | .dumb_map_offset = cirrus_dumb_mmap_offset, | 102 | .dumb_map_offset = cirrus_dumb_mmap_offset, |
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 9b0bb9184afd..b6aded73838b 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h | |||
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev, | |||
191 | struct pci_dev *pdev, | 191 | struct pci_dev *pdev, |
192 | uint32_t flags); | 192 | uint32_t flags); |
193 | void cirrus_device_fini(struct cirrus_device *cdev); | 193 | void cirrus_device_fini(struct cirrus_device *cdev); |
194 | int cirrus_gem_init_object(struct drm_gem_object *obj); | ||
195 | void cirrus_gem_free_object(struct drm_gem_object *obj); | 194 | void cirrus_gem_free_object(struct drm_gem_object *obj); |
196 | int cirrus_dumb_mmap_offset(struct drm_file *file, | 195 | int cirrus_dumb_mmap_offset(struct drm_file *file, |
197 | struct drm_device *dev, | 196 | struct drm_device *dev, |
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index f130a533a512..78e76f24343d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c | |||
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file, | |||
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | int cirrus_gem_init_object(struct drm_gem_object *obj) | ||
259 | { | ||
260 | BUG(); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | void cirrus_bo_unref(struct cirrus_bo **bo) | 258 | void cirrus_bo_unref(struct cirrus_bo **bo) |
265 | { | 259 | { |
266 | struct ttm_buffer_object *tbo; | 260 | struct ttm_buffer_object *tbo; |
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 224ff965bcf7..a4b017b6849e 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c | |||
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data, | |||
334 | 334 | ||
335 | mutex_lock(&dev->ctxlist_mutex); | 335 | mutex_lock(&dev->ctxlist_mutex); |
336 | list_add(&ctx_entry->head, &dev->ctxlist); | 336 | list_add(&ctx_entry->head, &dev->ctxlist); |
337 | ++dev->ctx_count; | ||
338 | mutex_unlock(&dev->ctxlist_mutex); | 337 | mutex_unlock(&dev->ctxlist_mutex); |
339 | 338 | ||
340 | return 0; | 339 | return 0; |
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data, | |||
432 | if (pos->handle == ctx->handle) { | 431 | if (pos->handle == ctx->handle) { |
433 | list_del(&pos->head); | 432 | list_del(&pos->head); |
434 | kfree(pos); | 433 | kfree(pos); |
435 | --dev->ctx_count; | ||
436 | } | 434 | } |
437 | } | 435 | } |
438 | } | 436 | } |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 4280e37f34c5..5fcb9d487672 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -109,9 +109,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector, | |||
109 | * then culled (based on validity and the @maxX, @maxY parameters) and put into | 109 | * then culled (based on validity and the @maxX, @maxY parameters) and put into |
110 | * the normal modes list. | 110 | * the normal modes list. |
111 | * | 111 | * |
112 | * Intended to be use as a generic implementation of the ->probe() @connector | 112 | * Intended to be use as a generic implementation of the ->fill_modes() |
113 | * callback for drivers that use the crtc helpers for output mode filtering and | 113 | * @connector vfunc for drivers that use the crtc helpers for output mode |
114 | * detection. | 114 | * filtering and detection. |
115 | * | 115 | * |
116 | * RETURNS: | 116 | * RETURNS: |
117 | * Number of modes found on @connector. | 117 | * Number of modes found on @connector. |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 89e196627160..9e978aae8972 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | |||
228 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); | 228 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); |
229 | 229 | ||
230 | /* Helpers for DP link training */ | 230 | /* Helpers for DP link training */ |
231 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) | 231 | static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) |
232 | { | 232 | { |
233 | return link_status[r - DP_LANE0_1_STATUS]; | 233 | return link_status[r - DP_LANE0_1_STATUS]; |
234 | } | 234 | } |
235 | 235 | ||
236 | static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], | 236 | static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE], |
237 | int lane) | 237 | int lane) |
238 | { | 238 | { |
239 | int i = DP_LANE0_1_STATUS + (lane >> 1); | 239 | int i = DP_LANE0_1_STATUS + (lane >> 1); |
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], | |||
242 | return (l >> s) & 0xf; | 242 | return (l >> s) & 0xf; |
243 | } | 243 | } |
244 | 244 | ||
245 | bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | 245 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
246 | int lane_count) | 246 | int lane_count) |
247 | { | 247 | { |
248 | u8 lane_align; | 248 | u8 lane_align; |
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | |||
262 | } | 262 | } |
263 | EXPORT_SYMBOL(drm_dp_channel_eq_ok); | 263 | EXPORT_SYMBOL(drm_dp_channel_eq_ok); |
264 | 264 | ||
265 | bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | 265 | bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
266 | int lane_count) | 266 | int lane_count) |
267 | { | 267 | { |
268 | int lane; | 268 | int lane; |
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | |||
277 | } | 277 | } |
278 | EXPORT_SYMBOL(drm_dp_clock_recovery_ok); | 278 | EXPORT_SYMBOL(drm_dp_clock_recovery_ok); |
279 | 279 | ||
280 | u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], | 280 | u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], |
281 | int lane) | 281 | int lane) |
282 | { | 282 | { |
283 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | 283 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); |
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], | |||
290 | } | 290 | } |
291 | EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); | 291 | EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); |
292 | 292 | ||
293 | u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], | 293 | u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], |
294 | int lane) | 294 | int lane) |
295 | { | 295 | { |
296 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | 296 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); |
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], | |||
303 | } | 303 | } |
304 | EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); | 304 | EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); |
305 | 305 | ||
306 | void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { | 306 | void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { |
307 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) | 307 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) |
308 | udelay(100); | 308 | udelay(100); |
309 | else | 309 | else |
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { | |||
311 | } | 311 | } |
312 | EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); | 312 | EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); |
313 | 313 | ||
314 | void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { | 314 | void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { |
315 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) | 315 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) |
316 | udelay(400); | 316 | udelay(400); |
317 | else | 317 | else |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index e79d8d9ca203..b55f138bd990 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -171,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = { | |||
171 | 171 | ||
172 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 172 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
173 | 173 | ||
174 | /** | ||
175 | * drm_legacy_dev_reinit | ||
176 | * | ||
177 | * Reinitializes a legacy/ums drm device in it's lastclose function. | ||
178 | */ | ||
179 | static void drm_legacy_dev_reinit(struct drm_device *dev) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
184 | return; | ||
185 | |||
186 | atomic_set(&dev->ioctl_count, 0); | ||
187 | atomic_set(&dev->vma_count, 0); | ||
188 | |||
189 | for (i = 0; i < ARRAY_SIZE(dev->counts); i++) | ||
190 | atomic_set(&dev->counts[i], 0); | ||
191 | |||
192 | dev->sigdata.lock = NULL; | ||
193 | |||
194 | dev->context_flag = 0; | ||
195 | dev->last_context = 0; | ||
196 | dev->if_version = 0; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * Take down the DRM device. | ||
201 | * | ||
202 | * \param dev DRM device structure. | ||
203 | * | ||
204 | * Frees every resource in \p dev. | ||
205 | * | ||
206 | * \sa drm_device | ||
207 | */ | ||
208 | int drm_lastclose(struct drm_device * dev) | ||
209 | { | ||
210 | struct drm_vma_entry *vma, *vma_temp; | ||
211 | |||
212 | DRM_DEBUG("\n"); | ||
213 | |||
214 | if (dev->driver->lastclose) | ||
215 | dev->driver->lastclose(dev); | ||
216 | DRM_DEBUG("driver lastclose completed\n"); | ||
217 | |||
218 | if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) | ||
219 | drm_irq_uninstall(dev); | ||
220 | |||
221 | mutex_lock(&dev->struct_mutex); | ||
222 | |||
223 | drm_agp_clear(dev); | ||
224 | |||
225 | drm_legacy_sg_cleanup(dev); | ||
226 | |||
227 | /* Clear vma list (only built for debugging) */ | ||
228 | list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { | ||
229 | list_del(&vma->head); | ||
230 | kfree(vma); | ||
231 | } | ||
232 | |||
233 | drm_legacy_dma_takedown(dev); | ||
234 | |||
235 | dev->dev_mapping = NULL; | ||
236 | mutex_unlock(&dev->struct_mutex); | ||
237 | |||
238 | drm_legacy_dev_reinit(dev); | ||
239 | |||
240 | DRM_DEBUG("lastclose completed\n"); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | /** File operations structure */ | 174 | /** File operations structure */ |
245 | static const struct file_operations drm_stub_fops = { | 175 | static const struct file_operations drm_stub_fops = { |
246 | .owner = THIS_MODULE, | 176 | .owner = THIS_MODULE, |
@@ -386,7 +316,6 @@ long drm_ioctl(struct file *filp, | |||
386 | return -ENODEV; | 316 | return -ENODEV; |
387 | 317 | ||
388 | atomic_inc(&dev->ioctl_count); | 318 | atomic_inc(&dev->ioctl_count); |
389 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); | ||
390 | ++file_priv->ioctl_count; | 319 | ++file_priv->ioctl_count; |
391 | 320 | ||
392 | if ((nr >= DRM_CORE_IOCTL_COUNT) && | 321 | if ((nr >= DRM_CORE_IOCTL_COUNT) && |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7d1e8a90480a..9e81609b1e29 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector, | |||
1264 | } | 1264 | } |
1265 | EXPORT_SYMBOL(drm_get_edid); | 1265 | EXPORT_SYMBOL(drm_get_edid); |
1266 | 1266 | ||
1267 | /** | ||
1268 | * drm_edid_duplicate - duplicate an EDID and the extensions | ||
1269 | * @edid: EDID to duplicate | ||
1270 | * | ||
1271 | * Return duplicate edid or NULL on allocation failure. | ||
1272 | */ | ||
1273 | struct edid *drm_edid_duplicate(const struct edid *edid) | ||
1274 | { | ||
1275 | return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
1276 | } | ||
1277 | EXPORT_SYMBOL(drm_edid_duplicate); | ||
1278 | |||
1267 | /*** EDID parsing ***/ | 1279 | /*** EDID parsing ***/ |
1268 | 1280 | ||
1269 | /** | 1281 | /** |
@@ -3013,6 +3025,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) | |||
3013 | /* Speaker Allocation Data Block */ | 3025 | /* Speaker Allocation Data Block */ |
3014 | if (dbl == 3) { | 3026 | if (dbl == 3) { |
3015 | *sadb = kmalloc(dbl, GFP_KERNEL); | 3027 | *sadb = kmalloc(dbl, GFP_KERNEL); |
3028 | if (!*sadb) | ||
3029 | return -ENOMEM; | ||
3016 | memcpy(*sadb, &db[1], dbl); | 3030 | memcpy(*sadb, &db[1], dbl); |
3017 | count = dbl; | 3031 | count = dbl; |
3018 | break; | 3032 | break; |
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 271b42bbfb72..9081172ef057 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c | |||
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " | |||
32 | "from built-in data or /lib/firmware instead. "); | 32 | "from built-in data or /lib/firmware instead. "); |
33 | 33 | ||
34 | #define GENERIC_EDIDS 5 | 34 | #define GENERIC_EDIDS 5 |
35 | static char *generic_edid_name[GENERIC_EDIDS] = { | 35 | static const char *generic_edid_name[GENERIC_EDIDS] = { |
36 | "edid/1024x768.bin", | 36 | "edid/1024x768.bin", |
37 | "edid/1280x1024.bin", | 37 | "edid/1280x1024.bin", |
38 | "edid/1600x1200.bin", | 38 | "edid/1600x1200.bin", |
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = { | |||
40 | "edid/1920x1080.bin", | 40 | "edid/1920x1080.bin", |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static u8 generic_edid[GENERIC_EDIDS][128] = { | 43 | static const u8 generic_edid[GENERIC_EDIDS][128] = { |
44 | { | 44 | { |
45 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | 45 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, |
46 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 46 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = { | |||
133 | }, | 133 | }, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int edid_size(const u8 *edid, int data_size) | ||
137 | { | ||
138 | if (data_size < EDID_LENGTH) | ||
139 | return 0; | ||
140 | |||
141 | return (edid[0x7e] + 1) * EDID_LENGTH; | ||
142 | } | ||
143 | |||
136 | static u8 *edid_load(struct drm_connector *connector, const char *name, | 144 | static u8 *edid_load(struct drm_connector *connector, const char *name, |
137 | const char *connector_name) | 145 | const char *connector_name) |
138 | { | 146 | { |
139 | const struct firmware *fw; | 147 | const struct firmware *fw = NULL; |
140 | struct platform_device *pdev; | 148 | const u8 *fwdata; |
141 | u8 *fwdata = NULL, *edid, *new_edid; | 149 | u8 *edid; |
142 | int fwsize, expected; | 150 | int fwsize, builtin; |
143 | int builtin = 0, err = 0; | ||
144 | int i, valid_extensions = 0; | 151 | int i, valid_extensions = 0; |
145 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); | 152 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); |
146 | 153 | ||
147 | pdev = platform_device_register_simple(connector_name, -1, NULL, 0); | 154 | builtin = 0; |
148 | if (IS_ERR(pdev)) { | 155 | for (i = 0; i < GENERIC_EDIDS; i++) { |
149 | DRM_ERROR("Failed to register EDID firmware platform device " | 156 | if (strcmp(name, generic_edid_name[i]) == 0) { |
150 | "for connector \"%s\"\n", connector_name); | ||
151 | err = -EINVAL; | ||
152 | goto out; | ||
153 | } | ||
154 | |||
155 | err = request_firmware(&fw, name, &pdev->dev); | ||
156 | platform_device_unregister(pdev); | ||
157 | |||
158 | if (err) { | ||
159 | i = 0; | ||
160 | while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i])) | ||
161 | i++; | ||
162 | if (i < GENERIC_EDIDS) { | ||
163 | err = 0; | ||
164 | builtin = 1; | ||
165 | fwdata = generic_edid[i]; | 157 | fwdata = generic_edid[i]; |
166 | fwsize = sizeof(generic_edid[i]); | 158 | fwsize = sizeof(generic_edid[i]); |
159 | builtin = 1; | ||
160 | break; | ||
167 | } | 161 | } |
168 | } | 162 | } |
163 | if (!builtin) { | ||
164 | struct platform_device *pdev; | ||
165 | int err; | ||
169 | 166 | ||
170 | if (err) { | 167 | pdev = platform_device_register_simple(connector_name, -1, NULL, 0); |
171 | DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", | 168 | if (IS_ERR(pdev)) { |
172 | name, err); | 169 | DRM_ERROR("Failed to register EDID firmware platform device " |
173 | goto out; | 170 | "for connector \"%s\"\n", connector_name); |
174 | } | 171 | return ERR_CAST(pdev); |
172 | } | ||
173 | |||
174 | err = request_firmware(&fw, name, &pdev->dev); | ||
175 | platform_device_unregister(pdev); | ||
176 | if (err) { | ||
177 | DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", | ||
178 | name, err); | ||
179 | return ERR_PTR(err); | ||
180 | } | ||
175 | 181 | ||
176 | if (fwdata == NULL) { | 182 | fwdata = fw->data; |
177 | fwdata = (u8 *) fw->data; | ||
178 | fwsize = fw->size; | 183 | fwsize = fw->size; |
179 | } | 184 | } |
180 | 185 | ||
181 | expected = (fwdata[0x7e] + 1) * EDID_LENGTH; | 186 | if (edid_size(fwdata, fwsize) != fwsize) { |
182 | if (expected != fwsize) { | ||
183 | DRM_ERROR("Size of EDID firmware \"%s\" is invalid " | 187 | DRM_ERROR("Size of EDID firmware \"%s\" is invalid " |
184 | "(expected %d, got %d)\n", name, expected, (int) fwsize); | 188 | "(expected %d, got %d\n", name, |
185 | err = -EINVAL; | 189 | edid_size(fwdata, fwsize), (int)fwsize); |
186 | goto relfw_out; | 190 | edid = ERR_PTR(-EINVAL); |
191 | goto out; | ||
187 | } | 192 | } |
188 | 193 | ||
189 | edid = kmemdup(fwdata, fwsize, GFP_KERNEL); | 194 | edid = kmemdup(fwdata, fwsize, GFP_KERNEL); |
190 | if (edid == NULL) { | 195 | if (edid == NULL) { |
191 | err = -ENOMEM; | 196 | edid = ERR_PTR(-ENOMEM); |
192 | goto relfw_out; | 197 | goto out; |
193 | } | 198 | } |
194 | 199 | ||
195 | if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { | 200 | if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { |
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name, | |||
197 | DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", | 202 | DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", |
198 | name); | 203 | name); |
199 | kfree(edid); | 204 | kfree(edid); |
200 | err = -EINVAL; | 205 | edid = ERR_PTR(-EINVAL); |
201 | goto relfw_out; | 206 | goto out; |
202 | } | 207 | } |
203 | 208 | ||
204 | for (i = 1; i <= edid[0x7e]; i++) { | 209 | for (i = 1; i <= edid[0x7e]; i++) { |
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name, | |||
210 | } | 215 | } |
211 | 216 | ||
212 | if (valid_extensions != edid[0x7e]) { | 217 | if (valid_extensions != edid[0x7e]) { |
218 | u8 *new_edid; | ||
219 | |||
213 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; | 220 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; |
214 | DRM_INFO("Found %d valid extensions instead of %d in EDID data " | 221 | DRM_INFO("Found %d valid extensions instead of %d in EDID data " |
215 | "\"%s\" for connector \"%s\"\n", valid_extensions, | 222 | "\"%s\" for connector \"%s\"\n", valid_extensions, |
216 | edid[0x7e], name, connector_name); | 223 | edid[0x7e], name, connector_name); |
217 | edid[0x7e] = valid_extensions; | 224 | edid[0x7e] = valid_extensions; |
225 | |||
218 | new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, | 226 | new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, |
219 | GFP_KERNEL); | 227 | GFP_KERNEL); |
220 | if (new_edid == NULL) { | 228 | if (new_edid) |
221 | err = -ENOMEM; | 229 | edid = new_edid; |
222 | kfree(edid); | ||
223 | goto relfw_out; | ||
224 | } | ||
225 | edid = new_edid; | ||
226 | } | 230 | } |
227 | 231 | ||
228 | DRM_INFO("Got %s EDID base block and %d extension%s from " | 232 | DRM_INFO("Got %s EDID base block and %d extension%s from " |
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name, | |||
230 | "external", valid_extensions, valid_extensions == 1 ? "" : "s", | 234 | "external", valid_extensions, valid_extensions == 1 ? "" : "s", |
231 | name, connector_name); | 235 | name, connector_name); |
232 | 236 | ||
233 | relfw_out: | ||
234 | release_firmware(fw); | ||
235 | |||
236 | out: | 237 | out: |
237 | if (err) | 238 | if (fw) |
238 | return ERR_PTR(err); | 239 | release_firmware(fw); |
239 | |||
240 | return edid; | 240 | return edid; |
241 | } | 241 | } |
242 | 242 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f6f6cc7fc133..21742a81cb9c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -852,7 +852,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
852 | struct drm_fb_helper *fb_helper = info->par; | 852 | struct drm_fb_helper *fb_helper = info->par; |
853 | struct drm_device *dev = fb_helper->dev; | 853 | struct drm_device *dev = fb_helper->dev; |
854 | struct drm_mode_set *modeset; | 854 | struct drm_mode_set *modeset; |
855 | struct drm_crtc *crtc; | ||
856 | int ret = 0; | 855 | int ret = 0; |
857 | int i; | 856 | int i; |
858 | 857 | ||
@@ -863,8 +862,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
863 | } | 862 | } |
864 | 863 | ||
865 | for (i = 0; i < fb_helper->crtc_count; i++) { | 864 | for (i = 0; i < fb_helper->crtc_count; i++) { |
866 | crtc = fb_helper->crtc_info[i].mode_set.crtc; | ||
867 | |||
868 | modeset = &fb_helper->crtc_info[i].mode_set; | 865 | modeset = &fb_helper->crtc_info[i].mode_set; |
869 | 866 | ||
870 | modeset->x = var->xoffset; | 867 | modeset->x = var->xoffset; |
@@ -1360,7 +1357,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
1360 | struct drm_connector *connector; | 1357 | struct drm_connector *connector; |
1361 | struct drm_connector_helper_funcs *connector_funcs; | 1358 | struct drm_connector_helper_funcs *connector_funcs; |
1362 | struct drm_encoder *encoder; | 1359 | struct drm_encoder *encoder; |
1363 | struct drm_fb_helper_crtc *best_crtc; | ||
1364 | int my_score, best_score, score; | 1360 | int my_score, best_score, score; |
1365 | struct drm_fb_helper_crtc **crtcs, *crtc; | 1361 | struct drm_fb_helper_crtc **crtcs, *crtc; |
1366 | struct drm_fb_helper_connector *fb_helper_conn; | 1362 | struct drm_fb_helper_connector *fb_helper_conn; |
@@ -1372,7 +1368,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
1372 | connector = fb_helper_conn->connector; | 1368 | connector = fb_helper_conn->connector; |
1373 | 1369 | ||
1374 | best_crtcs[n] = NULL; | 1370 | best_crtcs[n] = NULL; |
1375 | best_crtc = NULL; | ||
1376 | best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); | 1371 | best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); |
1377 | if (modes[n] == NULL) | 1372 | if (modes[n] == NULL) |
1378 | return best_score; | 1373 | return best_score; |
@@ -1421,7 +1416,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
1421 | score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, | 1416 | score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, |
1422 | width, height); | 1417 | width, height); |
1423 | if (score > best_score) { | 1418 | if (score > best_score) { |
1424 | best_crtc = crtc; | ||
1425 | best_score = score; | 1419 | best_score = score; |
1426 | memcpy(best_crtcs, crtcs, | 1420 | memcpy(best_crtcs, crtcs, |
1427 | dev->mode_config.num_connector * | 1421 | dev->mode_config.num_connector * |
@@ -1588,8 +1582,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); | |||
1588 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | 1582 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) |
1589 | { | 1583 | { |
1590 | struct drm_device *dev = fb_helper->dev; | 1584 | struct drm_device *dev = fb_helper->dev; |
1591 | int count = 0; | 1585 | u32 max_width, max_height; |
1592 | u32 max_width, max_height, bpp_sel; | ||
1593 | 1586 | ||
1594 | if (!fb_helper->fb) | 1587 | if (!fb_helper->fb) |
1595 | return 0; | 1588 | return 0; |
@@ -1604,10 +1597,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1604 | 1597 | ||
1605 | max_width = fb_helper->fb->width; | 1598 | max_width = fb_helper->fb->width; |
1606 | max_height = fb_helper->fb->height; | 1599 | max_height = fb_helper->fb->height; |
1607 | bpp_sel = fb_helper->fb->bits_per_pixel; | ||
1608 | 1600 | ||
1609 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | 1601 | drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); |
1610 | max_height); | ||
1611 | mutex_unlock(&fb_helper->dev->mode_config.mutex); | 1602 | mutex_unlock(&fb_helper->dev->mode_config.mutex); |
1612 | 1603 | ||
1613 | drm_modeset_lock_all(dev); | 1604 | drm_modeset_lock_all(dev); |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 3f84277d7036..d0e27667a4eb 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp) | |||
113 | retcode = drm_open_helper(inode, filp, dev); | 113 | retcode = drm_open_helper(inode, filp, dev); |
114 | if (retcode) | 114 | if (retcode) |
115 | goto err_undo; | 115 | goto err_undo; |
116 | atomic_inc(&dev->counts[_DRM_STAT_OPENS]); | ||
117 | if (need_setup) { | 116 | if (need_setup) { |
118 | retcode = drm_setup(dev); | 117 | retcode = drm_setup(dev); |
119 | if (retcode) | 118 | if (retcode) |
@@ -386,6 +385,71 @@ static void drm_events_release(struct drm_file *file_priv) | |||
386 | } | 385 | } |
387 | 386 | ||
388 | /** | 387 | /** |
388 | * drm_legacy_dev_reinit | ||
389 | * | ||
390 | * Reinitializes a legacy/ums drm device in it's lastclose function. | ||
391 | */ | ||
392 | static void drm_legacy_dev_reinit(struct drm_device *dev) | ||
393 | { | ||
394 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
395 | return; | ||
396 | |||
397 | atomic_set(&dev->ioctl_count, 0); | ||
398 | atomic_set(&dev->vma_count, 0); | ||
399 | |||
400 | dev->sigdata.lock = NULL; | ||
401 | |||
402 | dev->context_flag = 0; | ||
403 | dev->last_context = 0; | ||
404 | dev->if_version = 0; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * Take down the DRM device. | ||
409 | * | ||
410 | * \param dev DRM device structure. | ||
411 | * | ||
412 | * Frees every resource in \p dev. | ||
413 | * | ||
414 | * \sa drm_device | ||
415 | */ | ||
416 | int drm_lastclose(struct drm_device * dev) | ||
417 | { | ||
418 | struct drm_vma_entry *vma, *vma_temp; | ||
419 | |||
420 | DRM_DEBUG("\n"); | ||
421 | |||
422 | if (dev->driver->lastclose) | ||
423 | dev->driver->lastclose(dev); | ||
424 | DRM_DEBUG("driver lastclose completed\n"); | ||
425 | |||
426 | if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) | ||
427 | drm_irq_uninstall(dev); | ||
428 | |||
429 | mutex_lock(&dev->struct_mutex); | ||
430 | |||
431 | drm_agp_clear(dev); | ||
432 | |||
433 | drm_legacy_sg_cleanup(dev); | ||
434 | |||
435 | /* Clear vma list (only built for debugging) */ | ||
436 | list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { | ||
437 | list_del(&vma->head); | ||
438 | kfree(vma); | ||
439 | } | ||
440 | |||
441 | drm_legacy_dma_takedown(dev); | ||
442 | |||
443 | dev->dev_mapping = NULL; | ||
444 | mutex_unlock(&dev->struct_mutex); | ||
445 | |||
446 | drm_legacy_dev_reinit(dev); | ||
447 | |||
448 | DRM_DEBUG("lastclose completed\n"); | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | /** | ||
389 | * Release file. | 453 | * Release file. |
390 | * | 454 | * |
391 | * \param inode device inode | 455 | * \param inode device inode |
@@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp) | |||
454 | 518 | ||
455 | list_del(&pos->head); | 519 | list_del(&pos->head); |
456 | kfree(pos); | 520 | kfree(pos); |
457 | --dev->ctx_count; | ||
458 | } | 521 | } |
459 | } | 522 | } |
460 | } | 523 | } |
@@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp) | |||
516 | * End inline drm_release | 579 | * End inline drm_release |
517 | */ | 580 | */ |
518 | 581 | ||
519 | atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); | ||
520 | if (!--dev->open_count) { | 582 | if (!--dev->open_count) { |
521 | if (atomic_read(&dev->ioctl_count)) { | 583 | if (atomic_read(&dev->ioctl_count)) { |
522 | DRM_ERROR("Device busy: %d\n", | 584 | DRM_ERROR("Device busy: %d\n", |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 49293bdc972a..4761adedad2a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev, | |||
160 | } | 160 | } |
161 | EXPORT_SYMBOL(drm_gem_private_object_init); | 161 | EXPORT_SYMBOL(drm_gem_private_object_init); |
162 | 162 | ||
163 | /** | ||
164 | * Allocate a GEM object of the specified size with shmfs backing store | ||
165 | */ | ||
166 | struct drm_gem_object * | ||
167 | drm_gem_object_alloc(struct drm_device *dev, size_t size) | ||
168 | { | ||
169 | struct drm_gem_object *obj; | ||
170 | |||
171 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
172 | if (!obj) | ||
173 | goto free; | ||
174 | |||
175 | if (drm_gem_object_init(dev, obj, size) != 0) | ||
176 | goto free; | ||
177 | |||
178 | if (dev->driver->gem_init_object != NULL && | ||
179 | dev->driver->gem_init_object(obj) != 0) { | ||
180 | goto fput; | ||
181 | } | ||
182 | return obj; | ||
183 | fput: | ||
184 | /* Object_init mangles the global counters - readjust them. */ | ||
185 | fput(obj->filp); | ||
186 | free: | ||
187 | kfree(obj); | ||
188 | return NULL; | ||
189 | } | ||
190 | EXPORT_SYMBOL(drm_gem_object_alloc); | ||
191 | |||
192 | static void | 163 | static void |
193 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | 164 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
194 | { | 165 | { |
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c index f7311162a61d..3d2e91c4d78e 100644 --- a/drivers/gpu/drm/drm_global.c +++ b/drivers/gpu/drm/drm_global.c | |||
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref) | |||
67 | { | 67 | { |
68 | int ret; | 68 | int ret; |
69 | struct drm_global_item *item = &glob[ref->global_type]; | 69 | struct drm_global_item *item = &glob[ref->global_type]; |
70 | void *object; | ||
71 | 70 | ||
72 | mutex_lock(&item->mutex); | 71 | mutex_lock(&item->mutex); |
73 | if (item->refcount == 0) { | 72 | if (item->refcount == 0) { |
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref) | |||
85 | } | 84 | } |
86 | ++item->refcount; | 85 | ++item->refcount; |
87 | ref->object = item->object; | 86 | ref->object = item->object; |
88 | object = item->object; | ||
89 | mutex_unlock(&item->mutex); | 87 | mutex_unlock(&item->mutex); |
90 | return 0; | 88 | return 0; |
91 | out_err: | 89 | out_err: |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 53298320080b..7d5a152eeb02 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data) | |||
163 | mutex_lock(&dev->struct_mutex); | 163 | mutex_lock(&dev->struct_mutex); |
164 | for (crtc = 0; crtc < dev->num_crtcs; crtc++) { | 164 | for (crtc = 0; crtc < dev->num_crtcs; crtc++) { |
165 | seq_printf(m, "CRTC %d enable: %d\n", | 165 | seq_printf(m, "CRTC %d enable: %d\n", |
166 | crtc, atomic_read(&dev->vblank_refcount[crtc])); | 166 | crtc, atomic_read(&dev->vblank[crtc].refcount)); |
167 | seq_printf(m, "CRTC %d counter: %d\n", | 167 | seq_printf(m, "CRTC %d counter: %d\n", |
168 | crtc, drm_vblank_count(dev, crtc)); | 168 | crtc, drm_vblank_count(dev, crtc)); |
169 | seq_printf(m, "CRTC %d last wait: %d\n", | 169 | seq_printf(m, "CRTC %d last wait: %d\n", |
170 | crtc, dev->last_vblank_wait[crtc]); | 170 | crtc, dev->vblank[crtc].last_wait); |
171 | seq_printf(m, "CRTC %d in modeset: %d\n", | 171 | seq_printf(m, "CRTC %d in modeset: %d\n", |
172 | crtc, dev->vblank_inmodeset[crtc]); | 172 | crtc, dev->vblank[crtc].inmodeset); |
173 | } | 173 | } |
174 | mutex_unlock(&dev->struct_mutex); | 174 | mutex_unlock(&dev->struct_mutex); |
175 | return 0; | 175 | return 0; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f92da0a32f0d..f9af048828ea 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -43,9 +43,8 @@ | |||
43 | #include <linux/export.h> | 43 | #include <linux/export.h> |
44 | 44 | ||
45 | /* Access macro for slots in vblank timestamp ringbuffer. */ | 45 | /* Access macro for slots in vblank timestamp ringbuffer. */ |
46 | #define vblanktimestamp(dev, crtc, count) ( \ | 46 | #define vblanktimestamp(dev, crtc, count) \ |
47 | (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ | 47 | ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE]) |
48 | ((count) % DRM_VBLANKTIME_RBSIZE)]) | ||
49 | 48 | ||
50 | /* Retry timestamp calculation up to 3 times to satisfy | 49 | /* Retry timestamp calculation up to 3 times to satisfy |
51 | * drm_timestamp_precision before giving up. | 50 | * drm_timestamp_precision before giving up. |
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, | |||
89 | */ | 88 | */ |
90 | static void clear_vblank_timestamps(struct drm_device *dev, int crtc) | 89 | static void clear_vblank_timestamps(struct drm_device *dev, int crtc) |
91 | { | 90 | { |
92 | memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, | 91 | memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time)); |
93 | DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); | ||
94 | } | 92 | } |
95 | 93 | ||
96 | /* | 94 | /* |
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
115 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); | 113 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); |
116 | 114 | ||
117 | dev->driver->disable_vblank(dev, crtc); | 115 | dev->driver->disable_vblank(dev, crtc); |
118 | dev->vblank_enabled[crtc] = 0; | 116 | dev->vblank[crtc].enabled = false; |
119 | 117 | ||
120 | /* No further vblank irq's will be processed after | 118 | /* No further vblank irq's will be processed after |
121 | * this point. Get current hardware vblank count and | 119 | * this point. Get current hardware vblank count and |
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
130 | * delayed gpu counter increment. | 128 | * delayed gpu counter increment. |
131 | */ | 129 | */ |
132 | do { | 130 | do { |
133 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | 131 | dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); |
134 | vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); | 132 | vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); |
135 | } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); | 133 | } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); |
136 | 134 | ||
137 | if (!count) | 135 | if (!count) |
138 | vblrc = 0; | 136 | vblrc = 0; |
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
140 | /* Compute time difference to stored timestamp of last vblank | 138 | /* Compute time difference to stored timestamp of last vblank |
141 | * as updated by last invocation of drm_handle_vblank() in vblank irq. | 139 | * as updated by last invocation of drm_handle_vblank() in vblank irq. |
142 | */ | 140 | */ |
143 | vblcount = atomic_read(&dev->_vblank_count[crtc]); | 141 | vblcount = atomic_read(&dev->vblank[crtc].count); |
144 | diff_ns = timeval_to_ns(&tvblank) - | 142 | diff_ns = timeval_to_ns(&tvblank) - |
145 | timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); | 143 | timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); |
146 | 144 | ||
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
157 | * hope for the best. | 155 | * hope for the best. |
158 | */ | 156 | */ |
159 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { | 157 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
160 | atomic_inc(&dev->_vblank_count[crtc]); | 158 | atomic_inc(&dev->vblank[crtc].count); |
161 | smp_mb__after_atomic_inc(); | 159 | smp_mb__after_atomic_inc(); |
162 | } | 160 | } |
163 | 161 | ||
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg) | |||
178 | 176 | ||
179 | for (i = 0; i < dev->num_crtcs; i++) { | 177 | for (i = 0; i < dev->num_crtcs; i++) { |
180 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 178 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
181 | if (atomic_read(&dev->vblank_refcount[i]) == 0 && | 179 | if (atomic_read(&dev->vblank[i].refcount) == 0 && |
182 | dev->vblank_enabled[i]) { | 180 | dev->vblank[i].enabled) { |
183 | DRM_DEBUG("disabling vblank on crtc %d\n", i); | 181 | DRM_DEBUG("disabling vblank on crtc %d\n", i); |
184 | vblank_disable_and_save(dev, i); | 182 | vblank_disable_and_save(dev, i); |
185 | } | 183 | } |
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev) | |||
197 | 195 | ||
198 | vblank_disable_fn((unsigned long)dev); | 196 | vblank_disable_fn((unsigned long)dev); |
199 | 197 | ||
200 | kfree(dev->vbl_queue); | 198 | kfree(dev->vblank); |
201 | kfree(dev->_vblank_count); | ||
202 | kfree(dev->vblank_refcount); | ||
203 | kfree(dev->vblank_enabled); | ||
204 | kfree(dev->last_vblank); | ||
205 | kfree(dev->last_vblank_wait); | ||
206 | kfree(dev->vblank_inmodeset); | ||
207 | kfree(dev->_vblank_time); | ||
208 | 199 | ||
209 | dev->num_crtcs = 0; | 200 | dev->num_crtcs = 0; |
210 | } | 201 | } |
@@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
221 | 212 | ||
222 | dev->num_crtcs = num_crtcs; | 213 | dev->num_crtcs = num_crtcs; |
223 | 214 | ||
224 | dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, | 215 | dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); |
225 | GFP_KERNEL); | 216 | if (!dev->vblank) |
226 | if (!dev->vbl_queue) | ||
227 | goto err; | 217 | goto err; |
228 | 218 | ||
229 | dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); | 219 | for (i = 0; i < num_crtcs; i++) |
230 | if (!dev->_vblank_count) | 220 | init_waitqueue_head(&dev->vblank[i].queue); |
231 | goto err; | ||
232 | |||
233 | dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs, | ||
234 | GFP_KERNEL); | ||
235 | if (!dev->vblank_refcount) | ||
236 | goto err; | ||
237 | |||
238 | dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); | ||
239 | if (!dev->vblank_enabled) | ||
240 | goto err; | ||
241 | |||
242 | dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); | ||
243 | if (!dev->last_vblank) | ||
244 | goto err; | ||
245 | |||
246 | dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); | ||
247 | if (!dev->last_vblank_wait) | ||
248 | goto err; | ||
249 | |||
250 | dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); | ||
251 | if (!dev->vblank_inmodeset) | ||
252 | goto err; | ||
253 | |||
254 | dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE, | ||
255 | sizeof(struct timeval), GFP_KERNEL); | ||
256 | if (!dev->_vblank_time) | ||
257 | goto err; | ||
258 | 221 | ||
259 | DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); | 222 | DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); |
260 | 223 | ||
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
264 | else | 227 | else |
265 | DRM_INFO("No driver support for vblank timestamp query.\n"); | 228 | DRM_INFO("No driver support for vblank timestamp query.\n"); |
266 | 229 | ||
267 | /* Zero per-crtc vblank stuff */ | 230 | dev->vblank_disable_allowed = false; |
268 | for (i = 0; i < num_crtcs; i++) { | ||
269 | init_waitqueue_head(&dev->vbl_queue[i]); | ||
270 | atomic_set(&dev->_vblank_count[i], 0); | ||
271 | atomic_set(&dev->vblank_refcount[i], 0); | ||
272 | } | ||
273 | 231 | ||
274 | dev->vblank_disable_allowed = 0; | ||
275 | return 0; | 232 | return 0; |
276 | 233 | ||
277 | err: | 234 | err: |
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev) | |||
336 | mutex_unlock(&dev->struct_mutex); | 293 | mutex_unlock(&dev->struct_mutex); |
337 | return -EBUSY; | 294 | return -EBUSY; |
338 | } | 295 | } |
339 | dev->irq_enabled = 1; | 296 | dev->irq_enabled = true; |
340 | mutex_unlock(&dev->struct_mutex); | 297 | mutex_unlock(&dev->struct_mutex); |
341 | 298 | ||
342 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); | 299 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); |
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev) | |||
359 | 316 | ||
360 | if (ret < 0) { | 317 | if (ret < 0) { |
361 | mutex_lock(&dev->struct_mutex); | 318 | mutex_lock(&dev->struct_mutex); |
362 | dev->irq_enabled = 0; | 319 | dev->irq_enabled = false; |
363 | mutex_unlock(&dev->struct_mutex); | 320 | mutex_unlock(&dev->struct_mutex); |
364 | return ret; | 321 | return ret; |
365 | } | 322 | } |
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev) | |||
373 | 330 | ||
374 | if (ret < 0) { | 331 | if (ret < 0) { |
375 | mutex_lock(&dev->struct_mutex); | 332 | mutex_lock(&dev->struct_mutex); |
376 | dev->irq_enabled = 0; | 333 | dev->irq_enabled = false; |
377 | mutex_unlock(&dev->struct_mutex); | 334 | mutex_unlock(&dev->struct_mutex); |
378 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 335 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
379 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 336 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install); | |||
394 | int drm_irq_uninstall(struct drm_device *dev) | 351 | int drm_irq_uninstall(struct drm_device *dev) |
395 | { | 352 | { |
396 | unsigned long irqflags; | 353 | unsigned long irqflags; |
397 | int irq_enabled, i; | 354 | bool irq_enabled; |
355 | int i; | ||
398 | 356 | ||
399 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 357 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
400 | return -EINVAL; | 358 | return -EINVAL; |
401 | 359 | ||
402 | mutex_lock(&dev->struct_mutex); | 360 | mutex_lock(&dev->struct_mutex); |
403 | irq_enabled = dev->irq_enabled; | 361 | irq_enabled = dev->irq_enabled; |
404 | dev->irq_enabled = 0; | 362 | dev->irq_enabled = false; |
405 | mutex_unlock(&dev->struct_mutex); | 363 | mutex_unlock(&dev->struct_mutex); |
406 | 364 | ||
407 | /* | 365 | /* |
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
410 | if (dev->num_crtcs) { | 368 | if (dev->num_crtcs) { |
411 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 369 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
412 | for (i = 0; i < dev->num_crtcs; i++) { | 370 | for (i = 0; i < dev->num_crtcs; i++) { |
413 | DRM_WAKEUP(&dev->vbl_queue[i]); | 371 | DRM_WAKEUP(&dev->vblank[i].queue); |
414 | dev->vblank_enabled[i] = 0; | 372 | dev->vblank[i].enabled = false; |
415 | dev->last_vblank[i] = | 373 | dev->vblank[i].last = |
416 | dev->driver->get_vblank_counter(dev, i); | 374 | dev->driver->get_vblank_counter(dev, i); |
417 | } | 375 | } |
418 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 376 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
@@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp); | |||
795 | */ | 753 | */ |
796 | u32 drm_vblank_count(struct drm_device *dev, int crtc) | 754 | u32 drm_vblank_count(struct drm_device *dev, int crtc) |
797 | { | 755 | { |
798 | return atomic_read(&dev->_vblank_count[crtc]); | 756 | return atomic_read(&dev->vblank[crtc].count); |
799 | } | 757 | } |
800 | EXPORT_SYMBOL(drm_vblank_count); | 758 | EXPORT_SYMBOL(drm_vblank_count); |
801 | 759 | ||
@@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, | |||
824 | * a seqlock. | 782 | * a seqlock. |
825 | */ | 783 | */ |
826 | do { | 784 | do { |
827 | cur_vblank = atomic_read(&dev->_vblank_count[crtc]); | 785 | cur_vblank = atomic_read(&dev->vblank[crtc].count); |
828 | *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); | 786 | *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); |
829 | smp_rmb(); | 787 | smp_rmb(); |
830 | } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); | 788 | } while (cur_vblank != atomic_read(&dev->vblank[crtc].count)); |
831 | 789 | ||
832 | return cur_vblank; | 790 | return cur_vblank; |
833 | } | 791 | } |
@@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
914 | } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); | 872 | } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); |
915 | 873 | ||
916 | /* Deal with counter wrap */ | 874 | /* Deal with counter wrap */ |
917 | diff = cur_vblank - dev->last_vblank[crtc]; | 875 | diff = cur_vblank - dev->vblank[crtc].last; |
918 | if (cur_vblank < dev->last_vblank[crtc]) { | 876 | if (cur_vblank < dev->vblank[crtc].last) { |
919 | diff += dev->max_vblank_count; | 877 | diff += dev->max_vblank_count; |
920 | 878 | ||
921 | DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", | 879 | DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", |
922 | crtc, dev->last_vblank[crtc], cur_vblank, diff); | 880 | crtc, dev->vblank[crtc].last, cur_vblank, diff); |
923 | } | 881 | } |
924 | 882 | ||
925 | DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", | 883 | DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", |
@@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
930 | * reinitialize delayed at next vblank interrupt in that case. | 888 | * reinitialize delayed at next vblank interrupt in that case. |
931 | */ | 889 | */ |
932 | if (rc) { | 890 | if (rc) { |
933 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | 891 | tslot = atomic_read(&dev->vblank[crtc].count) + diff; |
934 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 892 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
935 | } | 893 | } |
936 | 894 | ||
937 | smp_mb__before_atomic_inc(); | 895 | smp_mb__before_atomic_inc(); |
938 | atomic_add(diff, &dev->_vblank_count[crtc]); | 896 | atomic_add(diff, &dev->vblank[crtc].count); |
939 | smp_mb__after_atomic_inc(); | 897 | smp_mb__after_atomic_inc(); |
940 | } | 898 | } |
941 | 899 | ||
@@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
957 | 915 | ||
958 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 916 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
959 | /* Going from 0->1 means we have to enable interrupts again */ | 917 | /* Going from 0->1 means we have to enable interrupts again */ |
960 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { | 918 | if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) { |
961 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); | 919 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); |
962 | if (!dev->vblank_enabled[crtc]) { | 920 | if (!dev->vblank[crtc].enabled) { |
963 | /* Enable vblank irqs under vblank_time_lock protection. | 921 | /* Enable vblank irqs under vblank_time_lock protection. |
964 | * All vblank count & timestamp updates are held off | 922 | * All vblank count & timestamp updates are held off |
965 | * until we are done reinitializing master counter and | 923 | * until we are done reinitializing master counter and |
@@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
970 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", | 928 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", |
971 | crtc, ret); | 929 | crtc, ret); |
972 | if (ret) | 930 | if (ret) |
973 | atomic_dec(&dev->vblank_refcount[crtc]); | 931 | atomic_dec(&dev->vblank[crtc].refcount); |
974 | else { | 932 | else { |
975 | dev->vblank_enabled[crtc] = 1; | 933 | dev->vblank[crtc].enabled = true; |
976 | drm_update_vblank_count(dev, crtc); | 934 | drm_update_vblank_count(dev, crtc); |
977 | } | 935 | } |
978 | } | 936 | } |
979 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); | 937 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); |
980 | } else { | 938 | } else { |
981 | if (!dev->vblank_enabled[crtc]) { | 939 | if (!dev->vblank[crtc].enabled) { |
982 | atomic_dec(&dev->vblank_refcount[crtc]); | 940 | atomic_dec(&dev->vblank[crtc].refcount); |
983 | ret = -EINVAL; | 941 | ret = -EINVAL; |
984 | } | 942 | } |
985 | } | 943 | } |
@@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get); | |||
999 | */ | 957 | */ |
1000 | void drm_vblank_put(struct drm_device *dev, int crtc) | 958 | void drm_vblank_put(struct drm_device *dev, int crtc) |
1001 | { | 959 | { |
1002 | BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); | 960 | BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0); |
1003 | 961 | ||
1004 | /* Last user schedules interrupt disable */ | 962 | /* Last user schedules interrupt disable */ |
1005 | if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && | 963 | if (atomic_dec_and_test(&dev->vblank[crtc].refcount) && |
1006 | (drm_vblank_offdelay > 0)) | 964 | (drm_vblank_offdelay > 0)) |
1007 | mod_timer(&dev->vblank_disable_timer, | 965 | mod_timer(&dev->vblank_disable_timer, |
1008 | jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); | 966 | jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); |
@@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) | |||
1025 | 983 | ||
1026 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 984 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
1027 | vblank_disable_and_save(dev, crtc); | 985 | vblank_disable_and_save(dev, crtc); |
1028 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 986 | DRM_WAKEUP(&dev->vblank[crtc].queue); |
1029 | 987 | ||
1030 | /* Send any queued vblank events, lest the natives grow disquiet */ | 988 | /* Send any queued vblank events, lest the natives grow disquiet */ |
1031 | seq = drm_vblank_count_and_time(dev, crtc, &now); | 989 | seq = drm_vblank_count_and_time(dev, crtc, &now); |
@@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) | |||
1067 | * to avoid corrupting the count if multiple, mismatch calls occur), | 1025 | * to avoid corrupting the count if multiple, mismatch calls occur), |
1068 | * so that interrupts remain enabled in the interim. | 1026 | * so that interrupts remain enabled in the interim. |
1069 | */ | 1027 | */ |
1070 | if (!dev->vblank_inmodeset[crtc]) { | 1028 | if (!dev->vblank[crtc].inmodeset) { |
1071 | dev->vblank_inmodeset[crtc] = 0x1; | 1029 | dev->vblank[crtc].inmodeset = 0x1; |
1072 | if (drm_vblank_get(dev, crtc) == 0) | 1030 | if (drm_vblank_get(dev, crtc) == 0) |
1073 | dev->vblank_inmodeset[crtc] |= 0x2; | 1031 | dev->vblank[crtc].inmodeset |= 0x2; |
1074 | } | 1032 | } |
1075 | } | 1033 | } |
1076 | EXPORT_SYMBOL(drm_vblank_pre_modeset); | 1034 | EXPORT_SYMBOL(drm_vblank_pre_modeset); |
@@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) | |||
1083 | if (!dev->num_crtcs) | 1041 | if (!dev->num_crtcs) |
1084 | return; | 1042 | return; |
1085 | 1043 | ||
1086 | if (dev->vblank_inmodeset[crtc]) { | 1044 | if (dev->vblank[crtc].inmodeset) { |
1087 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 1045 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
1088 | dev->vblank_disable_allowed = 1; | 1046 | dev->vblank_disable_allowed = true; |
1089 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 1047 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
1090 | 1048 | ||
1091 | if (dev->vblank_inmodeset[crtc] & 0x2) | 1049 | if (dev->vblank[crtc].inmodeset & 0x2) |
1092 | drm_vblank_put(dev, crtc); | 1050 | drm_vblank_put(dev, crtc); |
1093 | 1051 | ||
1094 | dev->vblank_inmodeset[crtc] = 0; | 1052 | dev->vblank[crtc].inmodeset = 0; |
1095 | } | 1053 | } |
1096 | } | 1054 | } |
1097 | EXPORT_SYMBOL(drm_vblank_post_modeset); | 1055 | EXPORT_SYMBOL(drm_vblank_post_modeset); |
@@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1288 | 1246 | ||
1289 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", | 1247 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", |
1290 | vblwait->request.sequence, crtc); | 1248 | vblwait->request.sequence, crtc); |
1291 | dev->last_vblank_wait[crtc] = vblwait->request.sequence; | 1249 | dev->vblank[crtc].last_wait = vblwait->request.sequence; |
1292 | DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, | 1250 | DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ, |
1293 | (((drm_vblank_count(dev, crtc) - | 1251 | (((drm_vblank_count(dev, crtc) - |
1294 | vblwait->request.sequence) <= (1 << 23)) || | 1252 | vblwait->request.sequence) <= (1 << 23)) || |
1295 | !dev->irq_enabled)); | 1253 | !dev->irq_enabled)); |
@@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1367 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); | 1325 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); |
1368 | 1326 | ||
1369 | /* Vblank irq handling disabled. Nothing to do. */ | 1327 | /* Vblank irq handling disabled. Nothing to do. */ |
1370 | if (!dev->vblank_enabled[crtc]) { | 1328 | if (!dev->vblank[crtc].enabled) { |
1371 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1329 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
1372 | return false; | 1330 | return false; |
1373 | } | 1331 | } |
@@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1377 | */ | 1335 | */ |
1378 | 1336 | ||
1379 | /* Get current timestamp and count. */ | 1337 | /* Get current timestamp and count. */ |
1380 | vblcount = atomic_read(&dev->_vblank_count[crtc]); | 1338 | vblcount = atomic_read(&dev->vblank[crtc].count); |
1381 | drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); | 1339 | drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); |
1382 | 1340 | ||
1383 | /* Compute time difference to timestamp of last vblank */ | 1341 | /* Compute time difference to timestamp of last vblank */ |
@@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1401 | * the timestamp computed above. | 1359 | * the timestamp computed above. |
1402 | */ | 1360 | */ |
1403 | smp_mb__before_atomic_inc(); | 1361 | smp_mb__before_atomic_inc(); |
1404 | atomic_inc(&dev->_vblank_count[crtc]); | 1362 | atomic_inc(&dev->vblank[crtc].count); |
1405 | smp_mb__after_atomic_inc(); | 1363 | smp_mb__after_atomic_inc(); |
1406 | } else { | 1364 | } else { |
1407 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1365 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1408 | crtc, (int) diff_ns); | 1366 | crtc, (int) diff_ns); |
1409 | } | 1367 | } |
1410 | 1368 | ||
1411 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 1369 | DRM_WAKEUP(&dev->vblank[crtc].queue); |
1412 | drm_handle_vblank_events(dev, crtc); | 1370 | drm_handle_vblank_events(dev, crtc); |
1413 | 1371 | ||
1414 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1372 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index d752c96d6090..f6452682141b 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
86 | if (drm_lock_take(&master->lock, lock->context)) { | 86 | if (drm_lock_take(&master->lock, lock->context)) { |
87 | master->lock.file_priv = file_priv; | 87 | master->lock.file_priv = file_priv; |
88 | master->lock.lock_time = jiffies; | 88 | master->lock.lock_time = jiffies; |
89 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
90 | break; /* Got lock */ | 89 | break; /* Got lock */ |
91 | } | 90 | } |
92 | 91 | ||
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
157 | return -EINVAL; | 156 | return -EINVAL; |
158 | } | 157 | } |
159 | 158 | ||
160 | atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); | ||
161 | |||
162 | if (drm_lock_free(&master->lock, lock->context)) { | 159 | if (drm_lock_free(&master->lock, lock->context)) { |
163 | /* FIXME: Should really bail out here. */ | 160 | /* FIXME: Should really bail out here. */ |
164 | } | 161 | } |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 1f96cee6eee8..f00d7a9671ea 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
322 | 322 | ||
323 | DRM_DEBUG("\n"); | 323 | DRM_DEBUG("\n"); |
324 | 324 | ||
325 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 325 | dev = drm_dev_alloc(driver, &pdev->dev); |
326 | if (!dev) | 326 | if (!dev) |
327 | return -ENOMEM; | 327 | return -ENOMEM; |
328 | 328 | ||
329 | ret = pci_enable_device(pdev); | 329 | ret = pci_enable_device(pdev); |
330 | if (ret) | 330 | if (ret) |
331 | goto err_g1; | 331 | goto err_free; |
332 | 332 | ||
333 | dev->pdev = pdev; | 333 | dev->pdev = pdev; |
334 | dev->dev = &pdev->dev; | ||
335 | |||
336 | dev->pci_device = pdev->device; | ||
337 | dev->pci_vendor = pdev->vendor; | ||
338 | |||
339 | #ifdef __alpha__ | 334 | #ifdef __alpha__ |
340 | dev->hose = pdev->sysdata; | 335 | dev->hose = pdev->sysdata; |
341 | #endif | 336 | #endif |
342 | 337 | ||
343 | mutex_lock(&drm_global_mutex); | 338 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
344 | |||
345 | if ((ret = drm_fill_in_dev(dev, ent, driver))) { | ||
346 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | ||
347 | goto err_g2; | ||
348 | } | ||
349 | |||
350 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
351 | pci_set_drvdata(pdev, dev); | 339 | pci_set_drvdata(pdev, dev); |
352 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | ||
353 | if (ret) | ||
354 | goto err_g2; | ||
355 | } | ||
356 | |||
357 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | ||
358 | ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); | ||
359 | if (ret) | ||
360 | goto err_g21; | ||
361 | } | ||
362 | |||
363 | if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) | ||
364 | goto err_g3; | ||
365 | |||
366 | if (dev->driver->load) { | ||
367 | ret = dev->driver->load(dev, ent->driver_data); | ||
368 | if (ret) | ||
369 | goto err_g4; | ||
370 | } | ||
371 | 340 | ||
372 | /* setup the grouping for the legacy output */ | 341 | ret = drm_dev_register(dev, ent->driver_data); |
373 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 342 | if (ret) |
374 | ret = drm_mode_group_init_legacy_group(dev, | 343 | goto err_pci; |
375 | &dev->primary->mode_group); | ||
376 | if (ret) | ||
377 | goto err_g4; | ||
378 | } | ||
379 | |||
380 | list_add_tail(&dev->driver_item, &driver->device_list); | ||
381 | 344 | ||
382 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | 345 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", |
383 | driver->name, driver->major, driver->minor, driver->patchlevel, | 346 | driver->name, driver->major, driver->minor, driver->patchlevel, |
384 | driver->date, pci_name(pdev), dev->primary->index); | 347 | driver->date, pci_name(pdev), dev->primary->index); |
385 | 348 | ||
386 | mutex_unlock(&drm_global_mutex); | ||
387 | return 0; | 349 | return 0; |
388 | 350 | ||
389 | err_g4: | 351 | err_pci: |
390 | drm_put_minor(&dev->primary); | ||
391 | err_g3: | ||
392 | if (dev->render) | ||
393 | drm_put_minor(&dev->render); | ||
394 | err_g21: | ||
395 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
396 | drm_put_minor(&dev->control); | ||
397 | err_g2: | ||
398 | pci_disable_device(pdev); | 352 | pci_disable_device(pdev); |
399 | err_g1: | 353 | err_free: |
400 | kfree(dev); | 354 | drm_dev_free(dev); |
401 | mutex_unlock(&drm_global_mutex); | ||
402 | return ret; | 355 | return ret; |
403 | } | 356 | } |
404 | EXPORT_SYMBOL(drm_get_pci_dev); | 357 | EXPORT_SYMBOL(drm_get_pci_dev); |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index f7a18c6ba4c4..fc24fee8ec83 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev, | |||
47 | 47 | ||
48 | DRM_DEBUG("\n"); | 48 | DRM_DEBUG("\n"); |
49 | 49 | ||
50 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 50 | dev = drm_dev_alloc(driver, &platdev->dev); |
51 | if (!dev) | 51 | if (!dev) |
52 | return -ENOMEM; | 52 | return -ENOMEM; |
53 | 53 | ||
54 | dev->platformdev = platdev; | 54 | dev->platformdev = platdev; |
55 | dev->dev = &platdev->dev; | ||
56 | 55 | ||
57 | mutex_lock(&drm_global_mutex); | 56 | ret = drm_dev_register(dev, 0); |
58 | |||
59 | ret = drm_fill_in_dev(dev, NULL, driver); | ||
60 | |||
61 | if (ret) { | ||
62 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | ||
63 | goto err_g1; | ||
64 | } | ||
65 | |||
66 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
67 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | ||
68 | if (ret) | ||
69 | goto err_g1; | ||
70 | } | ||
71 | |||
72 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | ||
73 | ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); | ||
74 | if (ret) | ||
75 | goto err_g11; | ||
76 | } | ||
77 | |||
78 | ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); | ||
79 | if (ret) | 57 | if (ret) |
80 | goto err_g2; | 58 | goto err_free; |
81 | |||
82 | if (dev->driver->load) { | ||
83 | ret = dev->driver->load(dev, 0); | ||
84 | if (ret) | ||
85 | goto err_g3; | ||
86 | } | ||
87 | |||
88 | /* setup the grouping for the legacy output */ | ||
89 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
90 | ret = drm_mode_group_init_legacy_group(dev, | ||
91 | &dev->primary->mode_group); | ||
92 | if (ret) | ||
93 | goto err_g3; | ||
94 | } | ||
95 | |||
96 | list_add_tail(&dev->driver_item, &driver->device_list); | ||
97 | |||
98 | mutex_unlock(&drm_global_mutex); | ||
99 | 59 | ||
100 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | 60 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", |
101 | driver->name, driver->major, driver->minor, driver->patchlevel, | 61 | driver->name, driver->major, driver->minor, driver->patchlevel, |
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev, | |||
103 | 63 | ||
104 | return 0; | 64 | return 0; |
105 | 65 | ||
106 | err_g3: | 66 | err_free: |
107 | drm_put_minor(&dev->primary); | 67 | drm_dev_free(dev); |
108 | err_g2: | ||
109 | if (dev->render) | ||
110 | drm_put_minor(&dev->render); | ||
111 | err_g11: | ||
112 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
113 | drm_put_minor(&dev->control); | ||
114 | err_g1: | ||
115 | kfree(dev); | ||
116 | mutex_unlock(&drm_global_mutex); | ||
117 | return ret; | 68 | return ret; |
118 | } | 69 | } |
119 | 70 | ||
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 276d470f7b3e..56805c39c906 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, | |||
637 | unsigned count; | 637 | unsigned count; |
638 | struct scatterlist *sg; | 638 | struct scatterlist *sg; |
639 | struct page *page; | 639 | struct page *page; |
640 | u32 len, offset; | 640 | u32 len; |
641 | int pg_index; | 641 | int pg_index; |
642 | dma_addr_t addr; | 642 | dma_addr_t addr; |
643 | 643 | ||
644 | pg_index = 0; | 644 | pg_index = 0; |
645 | for_each_sg(sgt->sgl, sg, sgt->nents, count) { | 645 | for_each_sg(sgt->sgl, sg, sgt->nents, count) { |
646 | len = sg->length; | 646 | len = sg->length; |
647 | offset = sg->offset; | ||
648 | page = sg_page(sg); | 647 | page = sg_page(sg); |
649 | addr = sg_dma_address(sg); | 648 | addr = sg_dma_address(sg); |
650 | 649 | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 39d864576be4..26055abf94ee 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | |||
254 | return 0; | 254 | return 0; |
255 | } | 255 | } |
256 | 256 | ||
257 | int drm_fill_in_dev(struct drm_device *dev, | ||
258 | const struct pci_device_id *ent, | ||
259 | struct drm_driver *driver) | ||
260 | { | ||
261 | int retcode; | ||
262 | |||
263 | INIT_LIST_HEAD(&dev->filelist); | ||
264 | INIT_LIST_HEAD(&dev->ctxlist); | ||
265 | INIT_LIST_HEAD(&dev->vmalist); | ||
266 | INIT_LIST_HEAD(&dev->maplist); | ||
267 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||
268 | |||
269 | spin_lock_init(&dev->count_lock); | ||
270 | spin_lock_init(&dev->event_lock); | ||
271 | mutex_init(&dev->struct_mutex); | ||
272 | mutex_init(&dev->ctxlist_mutex); | ||
273 | |||
274 | if (drm_ht_create(&dev->map_hash, 12)) { | ||
275 | return -ENOMEM; | ||
276 | } | ||
277 | |||
278 | /* the DRM has 6 basic counters */ | ||
279 | dev->counters = 6; | ||
280 | dev->types[0] = _DRM_STAT_LOCK; | ||
281 | dev->types[1] = _DRM_STAT_OPENS; | ||
282 | dev->types[2] = _DRM_STAT_CLOSES; | ||
283 | dev->types[3] = _DRM_STAT_IOCTLS; | ||
284 | dev->types[4] = _DRM_STAT_LOCKS; | ||
285 | dev->types[5] = _DRM_STAT_UNLOCKS; | ||
286 | |||
287 | dev->driver = driver; | ||
288 | |||
289 | if (dev->driver->bus->agp_init) { | ||
290 | retcode = dev->driver->bus->agp_init(dev); | ||
291 | if (retcode) | ||
292 | goto error_out_unreg; | ||
293 | } | ||
294 | |||
295 | |||
296 | |||
297 | retcode = drm_ctxbitmap_init(dev); | ||
298 | if (retcode) { | ||
299 | DRM_ERROR("Cannot allocate memory for context bitmap.\n"); | ||
300 | goto error_out_unreg; | ||
301 | } | ||
302 | |||
303 | if (driver->driver_features & DRIVER_GEM) { | ||
304 | retcode = drm_gem_init(dev); | ||
305 | if (retcode) { | ||
306 | DRM_ERROR("Cannot initialize graphics execution " | ||
307 | "manager (GEM)\n"); | ||
308 | goto error_out_unreg; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | return 0; | ||
313 | |||
314 | error_out_unreg: | ||
315 | drm_lastclose(dev); | ||
316 | return retcode; | ||
317 | } | ||
318 | EXPORT_SYMBOL(drm_fill_in_dev); | ||
319 | |||
320 | |||
321 | /** | 257 | /** |
322 | * Get a secondary minor number. | 258 | * Get a secondary minor number. |
323 | * | 259 | * |
@@ -427,66 +363,237 @@ static void drm_unplug_minor(struct drm_minor *minor) | |||
427 | */ | 363 | */ |
428 | void drm_put_dev(struct drm_device *dev) | 364 | void drm_put_dev(struct drm_device *dev) |
429 | { | 365 | { |
430 | struct drm_driver *driver; | ||
431 | struct drm_map_list *r_list, *list_temp; | ||
432 | |||
433 | DRM_DEBUG("\n"); | 366 | DRM_DEBUG("\n"); |
434 | 367 | ||
435 | if (!dev) { | 368 | if (!dev) { |
436 | DRM_ERROR("cleanup called no dev\n"); | 369 | DRM_ERROR("cleanup called no dev\n"); |
437 | return; | 370 | return; |
438 | } | 371 | } |
439 | driver = dev->driver; | ||
440 | 372 | ||
441 | drm_lastclose(dev); | 373 | drm_dev_unregister(dev); |
374 | drm_dev_free(dev); | ||
375 | } | ||
376 | EXPORT_SYMBOL(drm_put_dev); | ||
442 | 377 | ||
443 | if (dev->driver->unload) | 378 | void drm_unplug_dev(struct drm_device *dev) |
444 | dev->driver->unload(dev); | 379 | { |
380 | /* for a USB device */ | ||
381 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
382 | drm_unplug_minor(dev->control); | ||
383 | if (dev->render) | ||
384 | drm_unplug_minor(dev->render); | ||
385 | drm_unplug_minor(dev->primary); | ||
445 | 386 | ||
446 | if (dev->driver->bus->agp_destroy) | 387 | mutex_lock(&drm_global_mutex); |
447 | dev->driver->bus->agp_destroy(dev); | ||
448 | 388 | ||
449 | drm_vblank_cleanup(dev); | 389 | drm_device_set_unplugged(dev); |
450 | 390 | ||
451 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | 391 | if (dev->open_count == 0) { |
452 | drm_rmmap(dev, r_list->map); | 392 | drm_put_dev(dev); |
453 | drm_ht_remove(&dev->map_hash); | 393 | } |
394 | mutex_unlock(&drm_global_mutex); | ||
395 | } | ||
396 | EXPORT_SYMBOL(drm_unplug_dev); | ||
454 | 397 | ||
455 | drm_ctxbitmap_cleanup(dev); | 398 | /** |
399 | * drm_dev_alloc - Allocate new drm device | ||
400 | * @driver: DRM driver to allocate device for | ||
401 | * @parent: Parent device object | ||
402 | * | ||
403 | * Allocate and initialize a new DRM device. No device registration is done. | ||
404 | * Call drm_dev_register() to advertice the device to user space and register it | ||
405 | * with other core subsystems. | ||
406 | * | ||
407 | * RETURNS: | ||
408 | * Pointer to new DRM device, or NULL if out of memory. | ||
409 | */ | ||
410 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, | ||
411 | struct device *parent) | ||
412 | { | ||
413 | struct drm_device *dev; | ||
414 | int ret; | ||
456 | 415 | ||
457 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 416 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
458 | drm_put_minor(&dev->control); | 417 | if (!dev) |
418 | return NULL; | ||
459 | 419 | ||
460 | if (dev->render) | 420 | dev->dev = parent; |
461 | drm_put_minor(&dev->render); | 421 | dev->driver = driver; |
422 | |||
423 | INIT_LIST_HEAD(&dev->filelist); | ||
424 | INIT_LIST_HEAD(&dev->ctxlist); | ||
425 | INIT_LIST_HEAD(&dev->vmalist); | ||
426 | INIT_LIST_HEAD(&dev->maplist); | ||
427 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||
428 | |||
429 | spin_lock_init(&dev->count_lock); | ||
430 | spin_lock_init(&dev->event_lock); | ||
431 | mutex_init(&dev->struct_mutex); | ||
432 | mutex_init(&dev->ctxlist_mutex); | ||
433 | |||
434 | if (drm_ht_create(&dev->map_hash, 12)) | ||
435 | goto err_free; | ||
462 | 436 | ||
463 | if (driver->driver_features & DRIVER_GEM) | 437 | ret = drm_ctxbitmap_init(dev); |
438 | if (ret) { | ||
439 | DRM_ERROR("Cannot allocate memory for context bitmap.\n"); | ||
440 | goto err_ht; | ||
441 | } | ||
442 | |||
443 | if (driver->driver_features & DRIVER_GEM) { | ||
444 | ret = drm_gem_init(dev); | ||
445 | if (ret) { | ||
446 | DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); | ||
447 | goto err_ctxbitmap; | ||
448 | } | ||
449 | } | ||
450 | |||
451 | return dev; | ||
452 | |||
453 | err_ctxbitmap: | ||
454 | drm_ctxbitmap_cleanup(dev); | ||
455 | err_ht: | ||
456 | drm_ht_remove(&dev->map_hash); | ||
457 | err_free: | ||
458 | kfree(dev); | ||
459 | return NULL; | ||
460 | } | ||
461 | EXPORT_SYMBOL(drm_dev_alloc); | ||
462 | |||
463 | /** | ||
464 | * drm_dev_free - Free DRM device | ||
465 | * @dev: DRM device to free | ||
466 | * | ||
467 | * Free a DRM device that has previously been allocated via drm_dev_alloc(). | ||
468 | * You must not use kfree() instead or you will leak memory. | ||
469 | * | ||
470 | * This must not be called once the device got registered. Use drm_put_dev() | ||
471 | * instead, which then calls drm_dev_free(). | ||
472 | */ | ||
473 | void drm_dev_free(struct drm_device *dev) | ||
474 | { | ||
475 | if (dev->driver->driver_features & DRIVER_GEM) | ||
464 | drm_gem_destroy(dev); | 476 | drm_gem_destroy(dev); |
465 | 477 | ||
466 | drm_put_minor(&dev->primary); | 478 | drm_ctxbitmap_cleanup(dev); |
479 | drm_ht_remove(&dev->map_hash); | ||
467 | 480 | ||
468 | list_del(&dev->driver_item); | ||
469 | kfree(dev->devname); | 481 | kfree(dev->devname); |
470 | kfree(dev); | 482 | kfree(dev); |
471 | } | 483 | } |
472 | EXPORT_SYMBOL(drm_put_dev); | 484 | EXPORT_SYMBOL(drm_dev_free); |
473 | 485 | ||
474 | void drm_unplug_dev(struct drm_device *dev) | 486 | /** |
487 | * drm_dev_register - Register DRM device | ||
488 | * @dev: Device to register | ||
489 | * | ||
490 | * Register the DRM device @dev with the system, advertise device to user-space | ||
491 | * and start normal device operation. @dev must be allocated via drm_dev_alloc() | ||
492 | * previously. | ||
493 | * | ||
494 | * Never call this twice on any device! | ||
495 | * | ||
496 | * RETURNS: | ||
497 | * 0 on success, negative error code on failure. | ||
498 | */ | ||
499 | int drm_dev_register(struct drm_device *dev, unsigned long flags) | ||
475 | { | 500 | { |
476 | /* for a USB device */ | 501 | int ret; |
477 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
478 | drm_unplug_minor(dev->control); | ||
479 | if (dev->render) | ||
480 | drm_unplug_minor(dev->render); | ||
481 | drm_unplug_minor(dev->primary); | ||
482 | 502 | ||
483 | mutex_lock(&drm_global_mutex); | 503 | mutex_lock(&drm_global_mutex); |
484 | 504 | ||
485 | drm_device_set_unplugged(dev); | 505 | if (dev->driver->bus->agp_init) { |
506 | ret = dev->driver->bus->agp_init(dev); | ||
507 | if (ret) | ||
508 | goto out_unlock; | ||
509 | } | ||
486 | 510 | ||
487 | if (dev->open_count == 0) { | 511 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
488 | drm_put_dev(dev); | 512 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); |
513 | if (ret) | ||
514 | goto err_agp; | ||
489 | } | 515 | } |
516 | |||
517 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | ||
518 | ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); | ||
519 | if (ret) | ||
520 | goto err_control_node; | ||
521 | } | ||
522 | |||
523 | ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); | ||
524 | if (ret) | ||
525 | goto err_render_node; | ||
526 | |||
527 | if (dev->driver->load) { | ||
528 | ret = dev->driver->load(dev, flags); | ||
529 | if (ret) | ||
530 | goto err_primary_node; | ||
531 | } | ||
532 | |||
533 | /* setup grouping for legacy outputs */ | ||
534 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
535 | ret = drm_mode_group_init_legacy_group(dev, | ||
536 | &dev->primary->mode_group); | ||
537 | if (ret) | ||
538 | goto err_unload; | ||
539 | } | ||
540 | |||
541 | list_add_tail(&dev->driver_item, &dev->driver->device_list); | ||
542 | |||
543 | ret = 0; | ||
544 | goto out_unlock; | ||
545 | |||
546 | err_unload: | ||
547 | if (dev->driver->unload) | ||
548 | dev->driver->unload(dev); | ||
549 | err_primary_node: | ||
550 | drm_put_minor(&dev->primary); | ||
551 | err_render_node: | ||
552 | if (dev->render) | ||
553 | drm_put_minor(&dev->render); | ||
554 | err_control_node: | ||
555 | if (dev->control) | ||
556 | drm_put_minor(&dev->control); | ||
557 | err_agp: | ||
558 | if (dev->driver->bus->agp_destroy) | ||
559 | dev->driver->bus->agp_destroy(dev); | ||
560 | out_unlock: | ||
490 | mutex_unlock(&drm_global_mutex); | 561 | mutex_unlock(&drm_global_mutex); |
562 | return ret; | ||
491 | } | 563 | } |
492 | EXPORT_SYMBOL(drm_unplug_dev); | 564 | EXPORT_SYMBOL(drm_dev_register); |
565 | |||
566 | /** | ||
567 | * drm_dev_unregister - Unregister DRM device | ||
568 | * @dev: Device to unregister | ||
569 | * | ||
570 | * Unregister the DRM device from the system. This does the reverse of | ||
571 | * drm_dev_register() but does not deallocate the device. The caller must call | ||
572 | * drm_dev_free() to free all resources. | ||
573 | */ | ||
574 | void drm_dev_unregister(struct drm_device *dev) | ||
575 | { | ||
576 | struct drm_map_list *r_list, *list_temp; | ||
577 | |||
578 | drm_lastclose(dev); | ||
579 | |||
580 | if (dev->driver->unload) | ||
581 | dev->driver->unload(dev); | ||
582 | |||
583 | if (dev->driver->bus->agp_destroy) | ||
584 | dev->driver->bus->agp_destroy(dev); | ||
585 | |||
586 | drm_vblank_cleanup(dev); | ||
587 | |||
588 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | ||
589 | drm_rmmap(dev, r_list->map); | ||
590 | |||
591 | if (dev->control) | ||
592 | drm_put_minor(&dev->control); | ||
593 | if (dev->render) | ||
594 | drm_put_minor(&dev->render); | ||
595 | drm_put_minor(&dev->primary); | ||
596 | |||
597 | list_del(&dev->driver_item); | ||
598 | } | ||
599 | EXPORT_SYMBOL(drm_dev_unregister); | ||
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 87664723b9ce..b179b70e7853 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c | |||
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface, | |||
7 | struct drm_driver *driver) | 7 | struct drm_driver *driver) |
8 | { | 8 | { |
9 | struct drm_device *dev; | 9 | struct drm_device *dev; |
10 | struct usb_device *usbdev; | ||
11 | int ret; | 10 | int ret; |
12 | 11 | ||
13 | DRM_DEBUG("\n"); | 12 | DRM_DEBUG("\n"); |
14 | 13 | ||
15 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 14 | dev = drm_dev_alloc(driver, &interface->dev); |
16 | if (!dev) | 15 | if (!dev) |
17 | return -ENOMEM; | 16 | return -ENOMEM; |
18 | 17 | ||
19 | usbdev = interface_to_usbdev(interface); | 18 | dev->usbdev = interface_to_usbdev(interface); |
20 | dev->usbdev = usbdev; | ||
21 | dev->dev = &interface->dev; | ||
22 | |||
23 | mutex_lock(&drm_global_mutex); | ||
24 | |||
25 | ret = drm_fill_in_dev(dev, NULL, driver); | ||
26 | if (ret) { | ||
27 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | ||
28 | goto err_g1; | ||
29 | } | ||
30 | |||
31 | usb_set_intfdata(interface, dev); | 19 | usb_set_intfdata(interface, dev); |
32 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | ||
33 | if (ret) | ||
34 | goto err_g1; | ||
35 | |||
36 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | ||
37 | ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); | ||
38 | if (ret) | ||
39 | goto err_g11; | ||
40 | } | ||
41 | 20 | ||
42 | ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); | 21 | ret = drm_dev_register(dev, 0); |
43 | if (ret) | 22 | if (ret) |
44 | goto err_g2; | 23 | goto err_free; |
45 | |||
46 | if (dev->driver->load) { | ||
47 | ret = dev->driver->load(dev, 0); | ||
48 | if (ret) | ||
49 | goto err_g3; | ||
50 | } | ||
51 | |||
52 | /* setup the grouping for the legacy output */ | ||
53 | ret = drm_mode_group_init_legacy_group(dev, | ||
54 | &dev->primary->mode_group); | ||
55 | if (ret) | ||
56 | goto err_g3; | ||
57 | |||
58 | list_add_tail(&dev->driver_item, &driver->device_list); | ||
59 | |||
60 | mutex_unlock(&drm_global_mutex); | ||
61 | 24 | ||
62 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | 25 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", |
63 | driver->name, driver->major, driver->minor, driver->patchlevel, | 26 | driver->name, driver->major, driver->minor, driver->patchlevel, |
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface, | |||
65 | 28 | ||
66 | return 0; | 29 | return 0; |
67 | 30 | ||
68 | err_g3: | 31 | err_free: |
69 | drm_put_minor(&dev->primary); | 32 | drm_dev_free(dev); |
70 | err_g2: | ||
71 | if (dev->render) | ||
72 | drm_put_minor(&dev->render); | ||
73 | err_g11: | ||
74 | drm_put_minor(&dev->control); | ||
75 | err_g1: | ||
76 | kfree(dev); | ||
77 | mutex_unlock(&drm_global_mutex); | ||
78 | return ret; | 33 | return ret; |
79 | 34 | ||
80 | } | 35 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index bb82ef78ca85..3a1e6d9b25f7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = { | |||
264 | .get_vblank_counter = drm_vblank_count, | 264 | .get_vblank_counter = drm_vblank_count, |
265 | .enable_vblank = exynos_drm_crtc_enable_vblank, | 265 | .enable_vblank = exynos_drm_crtc_enable_vblank, |
266 | .disable_vblank = exynos_drm_crtc_disable_vblank, | 266 | .disable_vblank = exynos_drm_crtc_disable_vblank, |
267 | .gem_init_object = exynos_drm_gem_init_object, | ||
268 | .gem_free_object = exynos_drm_gem_free_object, | 267 | .gem_free_object = exynos_drm_gem_free_object, |
269 | .gem_vm_ops = &exynos_drm_gem_vm_ops, | 268 | .gem_vm_ops = &exynos_drm_gem_vm_ops, |
270 | .dumb_create = exynos_drm_gem_dumb_create, | 269 | .dumb_create = exynos_drm_gem_dumb_create, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 868a14d52995..23da72b5eae9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
716 | { | 716 | { |
717 | /* | 717 | /* |
718 | * enable drm irq mode. | 718 | * enable drm irq mode. |
719 | * - with irq_enabled = 1, we can use the vblank feature. | 719 | * - with irq_enabled = true, we can use the vblank feature. |
720 | * | 720 | * |
721 | * P.S. note that we wouldn't use drm irq handler but | 721 | * P.S. note that we wouldn't use drm irq handler but |
722 | * just specific driver own one instead because | 722 | * just specific driver own one instead because |
723 | * drm framework supports only one irq handler. | 723 | * drm framework supports only one irq handler. |
724 | */ | 724 | */ |
725 | drm_dev->irq_enabled = 1; | 725 | drm_dev->irq_enabled = true; |
726 | 726 | ||
727 | /* | 727 | /* |
728 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | 728 | * with vblank_disable_allowed = true, vblank interrupt will be disabled |
729 | * by drm timer once a current process gives up ownership of | 729 | * by drm timer once a current process gives up ownership of |
730 | * vblank event.(after drm_vblank_put function is called) | 730 | * vblank event.(after drm_vblank_put function is called) |
731 | */ | 731 | */ |
732 | drm_dev->vblank_disable_allowed = 1; | 732 | drm_dev->vblank_disable_allowed = true; |
733 | 733 | ||
734 | /* attach this sub driver to iommu mapping if supported. */ | 734 | /* attach this sub driver to iommu mapping if supported. */ |
735 | if (is_drm_iommu_supported(drm_dev)) | 735 | if (is_drm_iommu_supported(drm_dev)) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 49f9cd232757..1ade191d84f4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, | |||
630 | dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); | 630 | dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); |
631 | } | 631 | } |
632 | 632 | ||
633 | int exynos_drm_gem_init_object(struct drm_gem_object *obj) | ||
634 | { | ||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | void exynos_drm_gem_free_object(struct drm_gem_object *obj) | 633 | void exynos_drm_gem_free_object(struct drm_gem_object *obj) |
639 | { | 634 | { |
640 | struct exynos_drm_gem_obj *exynos_gem_obj; | 635 | struct exynos_drm_gem_obj *exynos_gem_obj; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 09555afdfe9c..702ec3abe85c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | |||
135 | unsigned int gem_handle, | 135 | unsigned int gem_handle, |
136 | struct drm_file *file_priv); | 136 | struct drm_file *file_priv); |
137 | 137 | ||
138 | /* initialize gem object. */ | ||
139 | int exynos_drm_gem_init_object(struct drm_gem_object *obj); | ||
140 | |||
141 | /* free gem object. */ | 138 | /* free gem object. */ |
142 | void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); | 139 | void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); |
143 | 140 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 4400330e4449..ddaaedde173d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev, | |||
101 | { | 101 | { |
102 | struct vidi_context *ctx = get_vidi_context(dev); | 102 | struct vidi_context *ctx = get_vidi_context(dev); |
103 | struct edid *edid; | 103 | struct edid *edid; |
104 | int edid_len; | ||
105 | 104 | ||
106 | /* | 105 | /* |
107 | * the edid data comes from user side and it would be set | 106 | * the edid data comes from user side and it would be set |
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev, | |||
112 | return ERR_PTR(-EFAULT); | 111 | return ERR_PTR(-EFAULT); |
113 | } | 112 | } |
114 | 113 | ||
115 | edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; | 114 | edid = drm_edid_duplicate(ctx->raw_edid); |
116 | edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); | ||
117 | if (!edid) { | 115 | if (!edid) { |
118 | DRM_DEBUG_KMS("failed to allocate edid\n"); | 116 | DRM_DEBUG_KMS("failed to allocate edid\n"); |
119 | return ERR_PTR(-ENOMEM); | 117 | return ERR_PTR(-ENOMEM); |
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
385 | { | 383 | { |
386 | /* | 384 | /* |
387 | * enable drm irq mode. | 385 | * enable drm irq mode. |
388 | * - with irq_enabled = 1, we can use the vblank feature. | 386 | * - with irq_enabled = true, we can use the vblank feature. |
389 | * | 387 | * |
390 | * P.S. note that we wouldn't use drm irq handler but | 388 | * P.S. note that we wouldn't use drm irq handler but |
391 | * just specific driver own one instead because | 389 | * just specific driver own one instead because |
392 | * drm framework supports only one irq handler. | 390 | * drm framework supports only one irq handler. |
393 | */ | 391 | */ |
394 | drm_dev->irq_enabled = 1; | 392 | drm_dev->irq_enabled = true; |
395 | 393 | ||
396 | /* | 394 | /* |
397 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | 395 | * with vblank_disable_allowed = true, vblank interrupt will be disabled |
398 | * by drm timer once a current process gives up ownership of | 396 | * by drm timer once a current process gives up ownership of |
399 | * vblank event.(after drm_vblank_put function is called) | 397 | * vblank event.(after drm_vblank_put function is called) |
400 | */ | 398 | */ |
401 | drm_dev->vblank_disable_allowed = 1; | 399 | drm_dev->vblank_disable_allowed = true; |
402 | 400 | ||
403 | return 0; | 401 | return 0; |
404 | } | 402 | } |
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | |||
485 | struct exynos_drm_manager *manager; | 483 | struct exynos_drm_manager *manager; |
486 | struct exynos_drm_display_ops *display_ops; | 484 | struct exynos_drm_display_ops *display_ops; |
487 | struct drm_exynos_vidi_connection *vidi = data; | 485 | struct drm_exynos_vidi_connection *vidi = data; |
488 | int edid_len; | ||
489 | 486 | ||
490 | if (!vidi) { | 487 | if (!vidi) { |
491 | DRM_DEBUG_KMS("user data for vidi is null.\n"); | 488 | DRM_DEBUG_KMS("user data for vidi is null.\n"); |
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | |||
524 | DRM_DEBUG_KMS("edid data is invalid.\n"); | 521 | DRM_DEBUG_KMS("edid data is invalid.\n"); |
525 | return -EINVAL; | 522 | return -EINVAL; |
526 | } | 523 | } |
527 | edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; | 524 | ctx->raw_edid = drm_edid_duplicate(raw_edid); |
528 | ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL); | ||
529 | if (!ctx->raw_edid) { | 525 | if (!ctx->raw_edid) { |
530 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); | 526 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); |
531 | return -ENOMEM; | 527 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 10ae8c52d06f..e2db48a81ed0 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c | |||
@@ -29,11 +29,6 @@ | |||
29 | #include <drm/drm_vma_manager.h> | 29 | #include <drm/drm_vma_manager.h> |
30 | #include "psb_drv.h" | 30 | #include "psb_drv.h" |
31 | 31 | ||
32 | int psb_gem_init_object(struct drm_gem_object *obj) | ||
33 | { | ||
34 | return -EINVAL; | ||
35 | } | ||
36 | |||
37 | void psb_gem_free_object(struct drm_gem_object *obj) | 32 | void psb_gem_free_object(struct drm_gem_object *obj) |
38 | { | 33 | { |
39 | struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); | 34 | struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); |
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index fcb4e9ff1f20..dd607f820a26 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c | |||
@@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) | |||
359 | 359 | ||
360 | drm_irq_install(dev); | 360 | drm_irq_install(dev); |
361 | 361 | ||
362 | dev->vblank_disable_allowed = 1; | 362 | dev->vblank_disable_allowed = true; |
363 | 363 | ||
364 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 364 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
365 | 365 | ||
@@ -646,7 +646,6 @@ static struct drm_driver driver = { | |||
646 | .preclose = psb_driver_preclose, | 646 | .preclose = psb_driver_preclose, |
647 | .postclose = psb_driver_close, | 647 | .postclose = psb_driver_close, |
648 | 648 | ||
649 | .gem_init_object = psb_gem_init_object, | ||
650 | .gem_free_object = psb_gem_free_object, | 649 | .gem_free_object = psb_gem_free_object, |
651 | .gem_vm_ops = &psb_gem_vm_ops, | 650 | .gem_vm_ops = &psb_gem_vm_ops, |
652 | .dumb_create = psb_gem_dumb_create, | 651 | .dumb_create = psb_gem_dumb_create, |
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 4535ac7708f8..0bab46bd73d2 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h | |||
@@ -44,10 +44,10 @@ enum { | |||
44 | CHIP_MFLD_0130 = 3, /* Medfield */ | 44 | CHIP_MFLD_0130 = 3, /* Medfield */ |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) | 47 | #define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108) |
48 | #define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) | 48 | #define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100) |
49 | #define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) | 49 | #define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) |
50 | #define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0) | 50 | #define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Driver definitions | 53 | * Driver definitions |
@@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs | |||
837 | extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; | 837 | extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; |
838 | 838 | ||
839 | /* gem.c */ | 839 | /* gem.c */ |
840 | extern int psb_gem_init_object(struct drm_gem_object *obj); | ||
841 | extern void psb_gem_free_object(struct drm_gem_object *obj); | 840 | extern void psb_gem_free_object(struct drm_gem_object *obj); |
842 | extern int psb_gem_get_aperture(struct drm_device *dev, void *data, | 841 | extern int psb_gem_get_aperture(struct drm_device *dev, void *data, |
843 | struct drm_file *file); | 842 | struct drm_file *file); |
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 029eccf30137..ba4830342d34 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c | |||
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev) | |||
271 | 271 | ||
272 | if (gma_power_is_on(dev)) | 272 | if (gma_power_is_on(dev)) |
273 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); | 273 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); |
274 | if (dev->vblank_enabled[0]) | 274 | if (dev->vblank[0].enabled) |
275 | dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; | 275 | dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; |
276 | if (dev->vblank_enabled[1]) | 276 | if (dev->vblank[1].enabled) |
277 | dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; | 277 | dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; |
278 | 278 | ||
279 | /* FIXME: Handle Medfield irq mask | 279 | /* FIXME: Handle Medfield irq mask |
280 | if (dev->vblank_enabled[1]) | 280 | if (dev->vblank[1].enabled) |
281 | dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; | 281 | dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; |
282 | if (dev->vblank_enabled[2]) | 282 | if (dev->vblank[2].enabled) |
283 | dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; | 283 | dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; |
284 | */ | 284 | */ |
285 | 285 | ||
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev) | |||
305 | PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); | 305 | PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); |
306 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); | 306 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); |
307 | 307 | ||
308 | if (dev->vblank_enabled[0]) | 308 | if (dev->vblank[0].enabled) |
309 | psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | 309 | psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); |
310 | else | 310 | else |
311 | psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | 311 | psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); |
312 | 312 | ||
313 | if (dev->vblank_enabled[1]) | 313 | if (dev->vblank[1].enabled) |
314 | psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | 314 | psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); |
315 | else | 315 | else |
316 | psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | 316 | psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); |
317 | 317 | ||
318 | if (dev->vblank_enabled[2]) | 318 | if (dev->vblank[2].enabled) |
319 | psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | 319 | psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); |
320 | else | 320 | else |
321 | psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | 321 | psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); |
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev) | |||
339 | 339 | ||
340 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); | 340 | PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); |
341 | 341 | ||
342 | if (dev->vblank_enabled[0]) | 342 | if (dev->vblank[0].enabled) |
343 | psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | 343 | psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); |
344 | 344 | ||
345 | if (dev->vblank_enabled[1]) | 345 | if (dev->vblank[1].enabled) |
346 | psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | 346 | psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); |
347 | 347 | ||
348 | if (dev->vblank_enabled[2]) | 348 | if (dev->vblank[2].enabled) |
349 | psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | 349 | psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); |
350 | 350 | ||
351 | dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | | 351 | dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev, | |||
456 | { | 456 | { |
457 | unsigned int cur_vblank; | 457 | unsigned int cur_vblank; |
458 | int ret = 0; | 458 | int ret = 0; |
459 | DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, | 459 | DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ, |
460 | (((cur_vblank = atomic_read(counter)) | 460 | (((cur_vblank = atomic_read(counter)) |
461 | - *sequence) <= (1 << 23))); | 461 | - *sequence) <= (1 << 23))); |
462 | *sequence = cur_vblank; | 462 | *sequence = cur_vblank; |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index ab1892eb1074..249fdff305c6 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data, | |||
944 | dma->buflist[vertex->idx], | 944 | dma->buflist[vertex->idx], |
945 | vertex->discard, vertex->used); | 945 | vertex->discard, vertex->used); |
946 | 946 | ||
947 | atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); | ||
948 | atomic_inc(&dev->counts[_DRM_STAT_DMA]); | ||
949 | sarea_priv->last_enqueue = dev_priv->counter - 1; | 947 | sarea_priv->last_enqueue = dev_priv->counter - 1; |
950 | sarea_priv->last_dispatch = (int)hw_status[5]; | 948 | sarea_priv->last_dispatch = (int)hw_status[5]; |
951 | 949 | ||
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data, | |||
1105 | i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, | 1103 | i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, |
1106 | mc->last_render); | 1104 | mc->last_render); |
1107 | 1105 | ||
1108 | atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); | ||
1109 | atomic_inc(&dev->counts[_DRM_STAT_DMA]); | ||
1110 | sarea_priv->last_enqueue = dev_priv->counter - 1; | 1106 | sarea_priv->last_enqueue = dev_priv->counter - 1; |
1111 | sarea_priv->last_dispatch = (int)hw_status[5]; | 1107 | sarea_priv->last_dispatch = (int)hw_status[5]; |
1112 | 1108 | ||
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data, | |||
1197 | 1193 | ||
1198 | int i810_driver_load(struct drm_device *dev, unsigned long flags) | 1194 | int i810_driver_load(struct drm_device *dev, unsigned long flags) |
1199 | { | 1195 | { |
1200 | /* i810 has 4 more counters */ | ||
1201 | dev->counters += 4; | ||
1202 | dev->types[6] = _DRM_STAT_IRQ; | ||
1203 | dev->types[7] = _DRM_STAT_PRIMARY; | ||
1204 | dev->types[8] = _DRM_STAT_SECONDARY; | ||
1205 | dev->types[9] = _DRM_STAT_DMA; | ||
1206 | |||
1207 | pci_set_master(dev->pdev); | 1196 | pci_set_master(dev->pdev); |
1208 | 1197 | ||
1209 | return 0; | 1198 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f221631cdc86..b3873c945d1b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
931 | value = READ_BREADCRUMB(dev_priv); | 931 | value = READ_BREADCRUMB(dev_priv); |
932 | break; | 932 | break; |
933 | case I915_PARAM_CHIPSET_ID: | 933 | case I915_PARAM_CHIPSET_ID: |
934 | value = dev->pci_device; | 934 | value = dev->pdev->device; |
935 | break; | 935 | break; |
936 | case I915_PARAM_HAS_GEM: | 936 | case I915_PARAM_HAS_GEM: |
937 | value = 1; | 937 | value = 1; |
@@ -1333,7 +1333,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1333 | 1333 | ||
1334 | /* Always safe in the mode setting case. */ | 1334 | /* Always safe in the mode setting case. */ |
1335 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1335 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1336 | dev->vblank_disable_allowed = 1; | 1336 | dev->vblank_disable_allowed = true; |
1337 | if (INTEL_INFO(dev)->num_pipes == 0) { | 1337 | if (INTEL_INFO(dev)->num_pipes == 0) { |
1338 | intel_display_power_put(dev, POWER_DOMAIN_VGA); | 1338 | intel_display_power_put(dev, POWER_DOMAIN_VGA); |
1339 | return 0; | 1339 | return 0; |
@@ -1480,13 +1480,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1480 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | 1480 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) |
1481 | return -ENODEV; | 1481 | return -ENODEV; |
1482 | 1482 | ||
1483 | /* i915 has 4 more counters */ | ||
1484 | dev->counters += 4; | ||
1485 | dev->types[6] = _DRM_STAT_IRQ; | ||
1486 | dev->types[7] = _DRM_STAT_PRIMARY; | ||
1487 | dev->types[8] = _DRM_STAT_SECONDARY; | ||
1488 | dev->types[9] = _DRM_STAT_DMA; | ||
1489 | |||
1490 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 1483 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
1491 | if (dev_priv == NULL) | 1484 | if (dev_priv == NULL) |
1492 | return -ENOMEM; | 1485 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0fc96586acf3..96f230497cbe 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -940,7 +940,6 @@ static struct drm_driver driver = { | |||
940 | .debugfs_init = i915_debugfs_init, | 940 | .debugfs_init = i915_debugfs_init, |
941 | .debugfs_cleanup = i915_debugfs_cleanup, | 941 | .debugfs_cleanup = i915_debugfs_cleanup, |
942 | #endif | 942 | #endif |
943 | .gem_init_object = i915_gem_init_object, | ||
944 | .gem_free_object = i915_gem_free_object, | 943 | .gem_free_object = i915_gem_free_object, |
945 | .gem_vm_ops = &i915_gem_vm_ops, | 944 | .gem_vm_ops = &i915_gem_vm_ops, |
946 | 945 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed8653fd97ad..6a5b7ab0c3fa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1615,39 +1615,39 @@ struct drm_i915_file_private { | |||
1615 | 1615 | ||
1616 | #define INTEL_INFO(dev) (to_i915(dev)->info) | 1616 | #define INTEL_INFO(dev) (to_i915(dev)->info) |
1617 | 1617 | ||
1618 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1618 | #define IS_I830(dev) ((dev)->pdev->device == 0x3577) |
1619 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1619 | #define IS_845G(dev) ((dev)->pdev->device == 0x2562) |
1620 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | 1620 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
1621 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1621 | #define IS_I865G(dev) ((dev)->pdev->device == 0x2572) |
1622 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1622 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
1623 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1623 | #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) |
1624 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1624 | #define IS_I945G(dev) ((dev)->pdev->device == 0x2772) |
1625 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | 1625 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
1626 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | 1626 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
1627 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | 1627 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
1628 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | 1628 | #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) |
1629 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | 1629 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1630 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | 1630 | #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) |
1631 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | 1631 | #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) |
1632 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) | 1632 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
1633 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | 1633 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
1634 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1634 | #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) |
1635 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) | 1635 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
1636 | #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ | 1636 | #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ |
1637 | (dev)->pci_device == 0x0152 || \ | 1637 | (dev)->pdev->device == 0x0152 || \ |
1638 | (dev)->pci_device == 0x015a) | 1638 | (dev)->pdev->device == 0x015a) |
1639 | #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ | 1639 | #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ |
1640 | (dev)->pci_device == 0x0106 || \ | 1640 | (dev)->pdev->device == 0x0106 || \ |
1641 | (dev)->pci_device == 0x010A) | 1641 | (dev)->pdev->device == 0x010A) |
1642 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) | 1642 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
1643 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) | 1643 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
1644 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1644 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1645 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 1645 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
1646 | ((dev)->pci_device & 0xFF00) == 0x0C00) | 1646 | ((dev)->pdev->device & 0xFF00) == 0x0C00) |
1647 | #define IS_ULT(dev) (IS_HASWELL(dev) && \ | 1647 | #define IS_ULT(dev) (IS_HASWELL(dev) && \ |
1648 | ((dev)->pci_device & 0xFF00) == 0x0A00) | 1648 | ((dev)->pdev->device & 0xFF00) == 0x0A00) |
1649 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ | 1649 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
1650 | ((dev)->pci_device & 0x00F0) == 0x0020) | 1650 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
1651 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 1651 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
1652 | 1652 | ||
1653 | /* | 1653 | /* |
@@ -1874,7 +1874,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, | |||
1874 | void i915_gem_load(struct drm_device *dev); | 1874 | void i915_gem_load(struct drm_device *dev); |
1875 | void *i915_gem_object_alloc(struct drm_device *dev); | 1875 | void *i915_gem_object_alloc(struct drm_device *dev); |
1876 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | 1876 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
1877 | int i915_gem_init_object(struct drm_gem_object *obj); | ||
1878 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | 1877 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
1879 | const struct drm_i915_gem_object_ops *ops); | 1878 | const struct drm_i915_gem_object_ops *ops); |
1880 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1879 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19ecfa8ad2e1..13c885d66383 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -4148,13 +4148,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
4148 | return obj; | 4148 | return obj; |
4149 | } | 4149 | } |
4150 | 4150 | ||
4151 | int i915_gem_init_object(struct drm_gem_object *obj) | ||
4152 | { | ||
4153 | BUG(); | ||
4154 | |||
4155 | return 0; | ||
4156 | } | ||
4157 | |||
4158 | void i915_gem_free_object(struct drm_gem_object *gem_obj) | 4151 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4159 | { | 4152 | { |
4160 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 4153 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index da1022a328e3..915c8ca08969 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -304,7 +304,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
304 | err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, | 304 | err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, |
305 | error->time.tv_usec); | 305 | error->time.tv_usec); |
306 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); | 306 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); |
307 | err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | 307 | err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); |
308 | err_printf(m, "EIR: 0x%08x\n", error->eir); | 308 | err_printf(m, "EIR: 0x%08x\n", error->eir); |
309 | err_printf(m, "IER: 0x%08x\n", error->ier); | 309 | err_printf(m, "IER: 0x%08x\n", error->ier); |
310 | err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 310 | err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index a43ac3a17bdc..6d335f8ca343 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -316,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder) | |||
316 | DRM_DEBUG_DRIVER("DP audio: write eld information\n"); | 316 | DRM_DEBUG_DRIVER("DP audio: write eld information\n"); |
317 | intel_write_eld(&encoder->base, adjusted_mode); | 317 | intel_write_eld(&encoder->base, adjusted_mode); |
318 | } | 318 | } |
319 | |||
320 | intel_dp_init_link_config(intel_dp); | ||
321 | |||
322 | } else if (type == INTEL_OUTPUT_HDMI) { | 319 | } else if (type == INTEL_OUTPUT_HDMI) { |
323 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 320 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
324 | 321 | ||
@@ -1222,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) | |||
1222 | 1219 | ||
1223 | val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | | 1220 | val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | |
1224 | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; | 1221 | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; |
1225 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) | 1222 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
1226 | val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; | 1223 | val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; |
1227 | I915_WRITE(DP_TP_CTL(port), val); | 1224 | I915_WRITE(DP_TP_CTL(port), val); |
1228 | POSTING_READ(DP_TP_CTL(port)); | 1225 | POSTING_READ(DP_TP_CTL(port)); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0f77b8ce64d1..4f52ec75b39f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -884,21 +884,6 @@ found: | |||
884 | return true; | 884 | return true; |
885 | } | 885 | } |
886 | 886 | ||
887 | void intel_dp_init_link_config(struct intel_dp *intel_dp) | ||
888 | { | ||
889 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | ||
890 | intel_dp->link_configuration[0] = intel_dp->link_bw; | ||
891 | intel_dp->link_configuration[1] = intel_dp->lane_count; | ||
892 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | ||
893 | /* | ||
894 | * Check for DPCD version > 1.1 and enhanced framing support | ||
895 | */ | ||
896 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
897 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
898 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) | 887 | static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) |
903 | { | 888 | { |
904 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 889 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
@@ -971,8 +956,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder) | |||
971 | intel_write_eld(&encoder->base, adjusted_mode); | 956 | intel_write_eld(&encoder->base, adjusted_mode); |
972 | } | 957 | } |
973 | 958 | ||
974 | intel_dp_init_link_config(intel_dp); | ||
975 | |||
976 | /* Split out the IBX/CPU vs CPT settings */ | 959 | /* Split out the IBX/CPU vs CPT settings */ |
977 | 960 | ||
978 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { | 961 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
@@ -982,7 +965,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder) | |||
982 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 965 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
983 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 966 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
984 | 967 | ||
985 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) | 968 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
986 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 969 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
987 | 970 | ||
988 | intel_dp->DP |= crtc->pipe << 29; | 971 | intel_dp->DP |= crtc->pipe << 29; |
@@ -996,7 +979,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder) | |||
996 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 979 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
997 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 980 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
998 | 981 | ||
999 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) | 982 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
1000 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 983 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
1001 | 984 | ||
1002 | if (crtc->pipe == 1) | 985 | if (crtc->pipe == 1) |
@@ -2474,14 +2457,21 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
2474 | uint8_t voltage; | 2457 | uint8_t voltage; |
2475 | int voltage_tries, loop_tries; | 2458 | int voltage_tries, loop_tries; |
2476 | uint32_t DP = intel_dp->DP; | 2459 | uint32_t DP = intel_dp->DP; |
2460 | uint8_t link_config[2]; | ||
2477 | 2461 | ||
2478 | if (HAS_DDI(dev)) | 2462 | if (HAS_DDI(dev)) |
2479 | intel_ddi_prepare_link_retrain(encoder); | 2463 | intel_ddi_prepare_link_retrain(encoder); |
2480 | 2464 | ||
2481 | /* Write the link configuration data */ | 2465 | /* Write the link configuration data */ |
2482 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 2466 | link_config[0] = intel_dp->link_bw; |
2483 | intel_dp->link_configuration, | 2467 | link_config[1] = intel_dp->lane_count; |
2484 | DP_LINK_CONFIGURATION_SIZE); | 2468 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
2469 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
2470 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); | ||
2471 | |||
2472 | link_config[0] = 0; | ||
2473 | link_config[1] = DP_SET_ANSI_8B10B; | ||
2474 | intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); | ||
2485 | 2475 | ||
2486 | DP |= DP_PORT_EN; | 2476 | DP |= DP_PORT_EN; |
2487 | 2477 | ||
@@ -2862,7 +2852,6 @@ static enum drm_connector_status | |||
2862 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) | 2852 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
2863 | { | 2853 | { |
2864 | uint8_t *dpcd = intel_dp->dpcd; | 2854 | uint8_t *dpcd = intel_dp->dpcd; |
2865 | bool hpd; | ||
2866 | uint8_t type; | 2855 | uint8_t type; |
2867 | 2856 | ||
2868 | if (!intel_dp_get_dpcd(intel_dp)) | 2857 | if (!intel_dp_get_dpcd(intel_dp)) |
@@ -2873,8 +2862,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
2873 | return connector_status_connected; | 2862 | return connector_status_connected; |
2874 | 2863 | ||
2875 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ | 2864 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
2876 | hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); | 2865 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
2877 | if (hpd) { | 2866 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { |
2878 | uint8_t reg; | 2867 | uint8_t reg; |
2879 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, | 2868 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, |
2880 | ®, 1)) | 2869 | ®, 1)) |
@@ -2888,9 +2877,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
2888 | return connector_status_connected; | 2877 | return connector_status_connected; |
2889 | 2878 | ||
2890 | /* Well we tried, say unknown for unreliable port types */ | 2879 | /* Well we tried, say unknown for unreliable port types */ |
2891 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; | 2880 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
2892 | if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) | 2881 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; |
2893 | return connector_status_unknown; | 2882 | if (type == DP_DS_PORT_TYPE_VGA || |
2883 | type == DP_DS_PORT_TYPE_NON_EDID) | ||
2884 | return connector_status_unknown; | ||
2885 | } else { | ||
2886 | type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | ||
2887 | DP_DWN_STRM_PORT_TYPE_MASK; | ||
2888 | if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || | ||
2889 | type == DP_DWN_STRM_PORT_TYPE_OTHER) | ||
2890 | return connector_status_unknown; | ||
2891 | } | ||
2894 | 2892 | ||
2895 | /* Anything else is out of spec, warn and ignore */ | 2893 | /* Anything else is out of spec, warn and ignore */ |
2896 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); | 2894 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); |
@@ -2964,19 +2962,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | |||
2964 | 2962 | ||
2965 | /* use cached edid if we have one */ | 2963 | /* use cached edid if we have one */ |
2966 | if (intel_connector->edid) { | 2964 | if (intel_connector->edid) { |
2967 | struct edid *edid; | ||
2968 | int size; | ||
2969 | |||
2970 | /* invalid edid */ | 2965 | /* invalid edid */ |
2971 | if (IS_ERR(intel_connector->edid)) | 2966 | if (IS_ERR(intel_connector->edid)) |
2972 | return NULL; | 2967 | return NULL; |
2973 | 2968 | ||
2974 | size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; | 2969 | return drm_edid_duplicate(intel_connector->edid); |
2975 | edid = kmemdup(intel_connector->edid, size, GFP_KERNEL); | ||
2976 | if (!edid) | ||
2977 | return NULL; | ||
2978 | |||
2979 | return edid; | ||
2980 | } | 2970 | } |
2981 | 2971 | ||
2982 | return drm_get_edid(connector, adapter); | 2972 | return drm_get_edid(connector, adapter); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 922c5d752cc3..eaf0003ddfd9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -436,13 +436,11 @@ struct intel_hdmi { | |||
436 | }; | 436 | }; |
437 | 437 | ||
438 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 | 438 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 |
439 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
440 | 439 | ||
441 | struct intel_dp { | 440 | struct intel_dp { |
442 | uint32_t output_reg; | 441 | uint32_t output_reg; |
443 | uint32_t aux_ch_ctl_reg; | 442 | uint32_t aux_ch_ctl_reg; |
444 | uint32_t DP; | 443 | uint32_t DP; |
445 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
446 | bool has_audio; | 444 | bool has_audio; |
447 | enum hdmi_force_audio force_audio; | 445 | enum hdmi_force_audio force_audio; |
448 | uint32_t color_range; | 446 | uint32_t color_range; |
@@ -685,7 +683,6 @@ void i915_disable_vga_mem(struct drm_device *dev); | |||
685 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); | 683 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); |
686 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 684 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
687 | struct intel_connector *intel_connector); | 685 | struct intel_connector *intel_connector); |
688 | void intel_dp_init_link_config(struct intel_dp *intel_dp); | ||
689 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 686 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
690 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 687 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
691 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 688 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b08a96cd9c94..008ec0bb017f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -5423,7 +5423,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5423 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 5423 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
5424 | for_each_pipe(p) | 5424 | for_each_pipe(p) |
5425 | if (p != PIPE_A) | 5425 | if (p != PIPE_A) |
5426 | dev->last_vblank[p] = 0; | 5426 | dev->vblank[p].last = 0; |
5427 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 5427 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
5428 | } | 5428 | } |
5429 | } | 5429 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 92895f92a738..d61aec23a523 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder) | |||
1044 | tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; | 1044 | tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; |
1045 | 1045 | ||
1046 | /* Enable two fixes for the chips that need them. */ | 1046 | /* Enable two fixes for the chips that need them. */ |
1047 | if (dev->pci_device < 0x2772) | 1047 | if (dev->pdev->device < 0x2772) |
1048 | tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; | 1048 | tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; |
1049 | 1049 | ||
1050 | I915_WRITE(TV_H_CTL_1, hctl1); | 1050 | I915_WRITE(TV_H_CTL_1, hctl1); |
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index cc3166dd445a..087db33f6cff 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c | |||
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) | |||
406 | dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); | 406 | dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); |
407 | dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); | 407 | dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); |
408 | 408 | ||
409 | dev->counters += 3; | ||
410 | dev->types[6] = _DRM_STAT_IRQ; | ||
411 | dev->types[7] = _DRM_STAT_PRIMARY; | ||
412 | dev->types[8] = _DRM_STAT_SECONDARY; | ||
413 | |||
414 | ret = drm_vblank_init(dev, 1); | 409 | ret = drm_vblank_init(dev, 1); |
415 | 410 | ||
416 | if (ret) { | 411 | if (ret) { |
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index 598c281def0a..2b0ceb8dc11b 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c | |||
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev) | |||
169 | /* Disable *all* interrupts */ | 169 | /* Disable *all* interrupts */ |
170 | MGA_WRITE(MGA_IEN, 0); | 170 | MGA_WRITE(MGA_IEN, 0); |
171 | 171 | ||
172 | dev->irq_enabled = 0; | 172 | dev->irq_enabled = false; |
173 | } | 173 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index fcce7b2f8011..f15ea3c4a90a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c | |||
@@ -99,7 +99,6 @@ static struct drm_driver driver = { | |||
99 | .minor = DRIVER_MINOR, | 99 | .minor = DRIVER_MINOR, |
100 | .patchlevel = DRIVER_PATCHLEVEL, | 100 | .patchlevel = DRIVER_PATCHLEVEL, |
101 | 101 | ||
102 | .gem_init_object = mgag200_gem_init_object, | ||
103 | .gem_free_object = mgag200_gem_free_object, | 102 | .gem_free_object = mgag200_gem_free_object, |
104 | .dumb_create = mgag200_dumb_create, | 103 | .dumb_create = mgag200_dumb_create, |
105 | .dumb_map_offset = mgag200_dumb_mmap_offset, | 104 | .dumb_map_offset = mgag200_dumb_mmap_offset, |
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index baaae19332e2..cf11ee68a6d9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h | |||
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev); | |||
260 | int mgag200_gem_create(struct drm_device *dev, | 260 | int mgag200_gem_create(struct drm_device *dev, |
261 | u32 size, bool iskernel, | 261 | u32 size, bool iskernel, |
262 | struct drm_gem_object **obj); | 262 | struct drm_gem_object **obj); |
263 | int mgag200_gem_init_object(struct drm_gem_object *obj); | ||
264 | int mgag200_dumb_create(struct drm_file *file, | 263 | int mgag200_dumb_create(struct drm_file *file, |
265 | struct drm_device *dev, | 264 | struct drm_device *dev, |
266 | struct drm_mode_create_dumb *args); | 265 | struct drm_mode_create_dumb *args); |
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 0f8b861b10b3..b1120cb1db6d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c | |||
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file, | |||
310 | return 0; | 310 | return 0; |
311 | } | 311 | } |
312 | 312 | ||
313 | int mgag200_gem_init_object(struct drm_gem_object *obj) | ||
314 | { | ||
315 | BUG(); | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | void mgag200_bo_unref(struct mgag200_bo **bo) | 313 | void mgag200_bo_unref(struct mgag200_bo **bo) |
320 | { | 314 | { |
321 | struct ttm_buffer_object *tbo; | 315 | struct ttm_buffer_object *tbo; |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 2e70462883e8..2a15b98b4d2b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c | |||
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, | |||
210 | sim_data.nvclk_khz = NVClk; | 210 | sim_data.nvclk_khz = NVClk; |
211 | sim_data.bpp = bpp; | 211 | sim_data.bpp = bpp; |
212 | sim_data.two_heads = nv_two_heads(dev); | 212 | sim_data.two_heads = nv_two_heads(dev); |
213 | if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || | 213 | if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || |
214 | (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { | 214 | (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { |
215 | uint32_t type; | 215 | uint32_t type; |
216 | 216 | ||
217 | pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); | 217 | pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); |
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm | |||
256 | 256 | ||
257 | if (nv_device(drm->device)->card_type < NV_20) | 257 | if (nv_device(drm->device)->card_type < NV_20) |
258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); | 258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); |
259 | else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || | 259 | else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || |
260 | (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { | 260 | (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { |
261 | *burst = 128; | 261 | *burst = 128; |
262 | *lwm = 0x0480; | 262 | *lwm = 0x0480; |
263 | } else | 263 | } else |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 93dd23ff0093..59d1c040b84f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c | |||
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) | |||
490 | /* BIOS scripts usually take care of the backlight, thanks | 490 | /* BIOS scripts usually take care of the backlight, thanks |
491 | * Apple for your consistency. | 491 | * Apple for your consistency. |
492 | */ | 492 | */ |
493 | if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 || | 493 | if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 || |
494 | dev->pci_device == 0x0189 || dev->pci_device == 0x0329) { | 494 | dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) { |
495 | if (mode == DRM_MODE_DPMS_ON) { | 495 | if (mode == DRM_MODE_DPMS_ON) { |
496 | nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); | 496 | nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); |
497 | nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); | 497 | nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 9928187f0a7d..2cf65e0b517e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
@@ -127,7 +127,7 @@ static inline bool | |||
127 | nv_two_heads(struct drm_device *dev) | 127 | nv_two_heads(struct drm_device *dev) |
128 | { | 128 | { |
129 | struct nouveau_drm *drm = nouveau_drm(dev); | 129 | struct nouveau_drm *drm = nouveau_drm(dev); |
130 | const int impl = dev->pci_device & 0x0ff0; | 130 | const int impl = dev->pdev->device & 0x0ff0; |
131 | 131 | ||
132 | if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && | 132 | if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && |
133 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) | 133 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) |
@@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev) | |||
139 | static inline bool | 139 | static inline bool |
140 | nv_gf4_disp_arch(struct drm_device *dev) | 140 | nv_gf4_disp_arch(struct drm_device *dev) |
141 | { | 141 | { |
142 | return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; | 142 | return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline bool | 145 | static inline bool |
146 | nv_two_reg_pll(struct drm_device *dev) | 146 | nv_two_reg_pll(struct drm_device *dev) |
147 | { | 147 | { |
148 | struct nouveau_drm *drm = nouveau_drm(dev); | 148 | struct nouveau_drm *drm = nouveau_drm(dev); |
149 | const int impl = dev->pci_device & 0x0ff0; | 149 | const int impl = dev->pdev->device & 0x0ff0; |
150 | 150 | ||
151 | if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) | 151 | if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) |
152 | return true; | 152 | return true; |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 973056b86207..f8dee834527f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c | |||
@@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
220 | int ret; | 220 | int ret; |
221 | 221 | ||
222 | if (plltype == PLL_MEMORY && | 222 | if (plltype == PLL_MEMORY && |
223 | (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { | 223 | (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) { |
224 | uint32_t mpllP; | 224 | uint32_t mpllP; |
225 | 225 | ||
226 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); | 226 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); |
@@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
230 | return 400000 / mpllP; | 230 | return 400000 / mpllP; |
231 | } else | 231 | } else |
232 | if (plltype == PLL_MEMORY && | 232 | if (plltype == PLL_MEMORY && |
233 | (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { | 233 | (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) { |
234 | uint32_t clock; | 234 | uint32_t clock; |
235 | 235 | ||
236 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); | 236 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 8f467e7bfd19..72055a35f845 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, | |||
130 | if (chan->ntfy) { | 130 | if (chan->ntfy) { |
131 | nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); | 131 | nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); |
132 | nouveau_bo_unpin(chan->ntfy); | 132 | nouveau_bo_unpin(chan->ntfy); |
133 | drm_gem_object_unreference_unlocked(chan->ntfy->gem); | 133 | drm_gem_object_unreference_unlocked(&chan->ntfy->gem); |
134 | } | 134 | } |
135 | 135 | ||
136 | if (chan->heap.block_size) | 136 | if (chan->heap.block_size) |
@@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) | |||
178 | getparam->value = device->chipset; | 178 | getparam->value = device->chipset; |
179 | break; | 179 | break; |
180 | case NOUVEAU_GETPARAM_PCI_VENDOR: | 180 | case NOUVEAU_GETPARAM_PCI_VENDOR: |
181 | getparam->value = dev->pci_vendor; | 181 | getparam->value = dev->pdev->vendor; |
182 | break; | 182 | break; |
183 | case NOUVEAU_GETPARAM_PCI_DEVICE: | 183 | case NOUVEAU_GETPARAM_PCI_DEVICE: |
184 | getparam->value = dev->pci_device; | 184 | getparam->value = dev->pdev->device; |
185 | break; | 185 | break; |
186 | case NOUVEAU_GETPARAM_BUS_TYPE: | 186 | case NOUVEAU_GETPARAM_BUS_TYPE: |
187 | if (drm_pci_device_is_agp(dev)) | 187 | if (drm_pci_device_is_agp(dev)) |
@@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
320 | goto done; | 320 | goto done; |
321 | } | 321 | } |
322 | 322 | ||
323 | ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, | 323 | ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem, |
324 | &init->notifier_handle); | 324 | &init->notifier_handle); |
325 | if (ret) | 325 | if (ret) |
326 | goto done; | 326 | goto done; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 3e7287675ecf..4c3feaaa1037 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp | |||
127 | #ifdef __powerpc__ | 127 | #ifdef __powerpc__ |
128 | /* Powerbook specific quirks */ | 128 | /* Powerbook specific quirks */ |
129 | if (script == LVDS_RESET && | 129 | if (script == LVDS_RESET && |
130 | (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || | 130 | (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 || |
131 | dev->pci_device == 0x0329)) | 131 | dev->pdev->device == 0x0329)) |
132 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); | 132 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); |
133 | #endif | 133 | #endif |
134 | 134 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 755c38d06271..4172854d4365 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
146 | struct drm_device *dev = drm->dev; | 146 | struct drm_device *dev = drm->dev; |
147 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 147 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
148 | 148 | ||
149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem.filp)) |
150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
151 | WARN_ON(nvbo->pin_refcnt > 0); | 151 | WARN_ON(nvbo->pin_refcnt > 0); |
152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
@@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
1267 | { | 1267 | { |
1268 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1268 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1269 | 1269 | ||
1270 | return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp); | 1270 | return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | static int | 1273 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 653dbbbd4fa1..ff17c1f432fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h | |||
@@ -27,7 +27,10 @@ struct nouveau_bo { | |||
27 | u32 tile_flags; | 27 | u32 tile_flags; |
28 | struct nouveau_drm_tile *tile; | 28 | struct nouveau_drm_tile *tile; |
29 | 29 | ||
30 | struct drm_gem_object *gem; | 30 | /* Only valid if allocated via nouveau_gem_new() and iff you hold a |
31 | * gem reference to it! For debugging, use gem.filp != NULL to test | ||
32 | * whether it is valid. */ | ||
33 | struct drm_gem_object gem; | ||
31 | 34 | ||
32 | /* protect by the ttm reservation lock */ | 35 | /* protect by the ttm reservation lock */ |
33 | int pin_refcnt; | 36 | int pin_refcnt; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index c5b36f9e9a10..2136d0038252 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
215 | connector->doublescan_allowed = true; | 215 | connector->doublescan_allowed = true; |
216 | if (nv_device(drm->device)->card_type == NV_20 || | 216 | if (nv_device(drm->device)->card_type == NV_20 || |
217 | (nv_device(drm->device)->card_type == NV_10 && | 217 | (nv_device(drm->device)->card_type == NV_10 && |
218 | (dev->pci_device & 0x0ff0) != 0x0100 && | 218 | (dev->pdev->device & 0x0ff0) != 0x0100 && |
219 | (dev->pci_device & 0x0ff0) != 0x0150)) | 219 | (dev->pdev->device & 0x0ff0) != 0x0150)) |
220 | /* HW is broken */ | 220 | /* HW is broken */ |
221 | connector->interlace_allowed = false; | 221 | connector->interlace_allowed = false; |
222 | else | 222 | else |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7848590f5568..bdd5cf71a24c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) | |||
50 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 50 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); |
51 | 51 | ||
52 | if (fb->nvbo) | 52 | if (fb->nvbo) |
53 | drm_gem_object_unreference_unlocked(fb->nvbo->gem); | 53 | drm_gem_object_unreference_unlocked(&fb->nvbo->gem); |
54 | 54 | ||
55 | drm_framebuffer_cleanup(drm_fb); | 55 | drm_framebuffer_cleanup(drm_fb); |
56 | kfree(fb); | 56 | kfree(fb); |
@@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb, | |||
63 | { | 63 | { |
64 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 64 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); |
65 | 65 | ||
66 | return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); | 66 | return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle); |
67 | } | 67 | } |
68 | 68 | ||
69 | static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { | 69 | static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { |
@@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, | |||
674 | if (ret) | 674 | if (ret) |
675 | return ret; | 675 | return ret; |
676 | 676 | ||
677 | ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); | 677 | ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); |
678 | drm_gem_object_unreference_unlocked(bo->gem); | 678 | drm_gem_object_unreference_unlocked(&bo->gem); |
679 | return ret; | 679 | return ret; |
680 | } | 680 | } |
681 | 681 | ||
@@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, | |||
688 | 688 | ||
689 | gem = drm_gem_object_lookup(dev, file_priv, handle); | 689 | gem = drm_gem_object_lookup(dev, file_priv, handle); |
690 | if (gem) { | 690 | if (gem) { |
691 | struct nouveau_bo *bo = gem->driver_private; | 691 | struct nouveau_bo *bo = nouveau_gem_object(gem); |
692 | *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); | 692 | *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); |
693 | drm_gem_object_unreference_unlocked(gem); | 693 | drm_gem_object_unreference_unlocked(gem); |
694 | return 0; | 694 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index e893c5362402..428d818be775 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -834,7 +834,6 @@ driver = { | |||
834 | .gem_prime_vmap = nouveau_gem_prime_vmap, | 834 | .gem_prime_vmap = nouveau_gem_prime_vmap, |
835 | .gem_prime_vunmap = nouveau_gem_prime_vunmap, | 835 | .gem_prime_vunmap = nouveau_gem_prime_vunmap, |
836 | 836 | ||
837 | .gem_init_object = nouveau_gem_object_new, | ||
838 | .gem_free_object = nouveau_gem_object_del, | 837 | .gem_free_object = nouveau_gem_object_del, |
839 | .gem_open_object = nouveau_gem_object_open, | 838 | .gem_open_object = nouveau_gem_object_open, |
840 | .gem_close_object = nouveau_gem_object_close, | 839 | .gem_close_object = nouveau_gem_object_close, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index a86ecf65c164..c80b519b513a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) | |||
420 | nouveau_bo_unmap(nouveau_fb->nvbo); | 420 | nouveau_bo_unmap(nouveau_fb->nvbo); |
421 | nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); | 421 | nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); |
422 | nouveau_bo_unpin(nouveau_fb->nvbo); | 422 | nouveau_bo_unpin(nouveau_fb->nvbo); |
423 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 423 | drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem); |
424 | nouveau_fb->nvbo = NULL; | 424 | nouveau_fb->nvbo = NULL; |
425 | } | 425 | } |
426 | drm_fb_helper_fini(&fbcon->helper); | 426 | drm_fb_helper_fini(&fbcon->helper); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index f32b71238c03..418a6177a653 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -34,29 +34,20 @@ | |||
34 | #include "nouveau_ttm.h" | 34 | #include "nouveau_ttm.h" |
35 | #include "nouveau_gem.h" | 35 | #include "nouveau_gem.h" |
36 | 36 | ||
37 | int | ||
38 | nouveau_gem_object_new(struct drm_gem_object *gem) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | void | 37 | void |
44 | nouveau_gem_object_del(struct drm_gem_object *gem) | 38 | nouveau_gem_object_del(struct drm_gem_object *gem) |
45 | { | 39 | { |
46 | struct nouveau_bo *nvbo = gem->driver_private; | 40 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
47 | struct ttm_buffer_object *bo = &nvbo->bo; | 41 | struct ttm_buffer_object *bo = &nvbo->bo; |
48 | 42 | ||
49 | if (!nvbo) | ||
50 | return; | ||
51 | nvbo->gem = NULL; | ||
52 | |||
53 | if (gem->import_attach) | 43 | if (gem->import_attach) |
54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 44 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
55 | 45 | ||
56 | ttm_bo_unref(&bo); | ||
57 | |||
58 | drm_gem_object_release(gem); | 46 | drm_gem_object_release(gem); |
59 | kfree(gem); | 47 | |
48 | /* reset filp so nouveau_bo_del_ttm() can test for it */ | ||
49 | gem->filp = NULL; | ||
50 | ttm_bo_unref(&bo); | ||
60 | } | 51 | } |
61 | 52 | ||
62 | int | 53 | int |
@@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | |||
186 | if (nv_device(drm->device)->card_type >= NV_50) | 177 | if (nv_device(drm->device)->card_type >= NV_50) |
187 | nvbo->valid_domains &= domain; | 178 | nvbo->valid_domains &= domain; |
188 | 179 | ||
189 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | 180 | /* Initialize the embedded gem-object. We return a single gem-reference |
190 | if (!nvbo->gem) { | 181 | * to the caller, instead of a normal nouveau_bo ttm reference. */ |
182 | ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); | ||
183 | if (ret) { | ||
191 | nouveau_bo_ref(NULL, pnvbo); | 184 | nouveau_bo_ref(NULL, pnvbo); |
192 | return -ENOMEM; | 185 | return -ENOMEM; |
193 | } | 186 | } |
194 | 187 | ||
195 | nvbo->bo.persistent_swap_storage = nvbo->gem->filp; | 188 | nvbo->bo.persistent_swap_storage = nvbo->gem.filp; |
196 | nvbo->gem->driver_private = nvbo; | ||
197 | return 0; | 189 | return 0; |
198 | } | 190 | } |
199 | 191 | ||
@@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
250 | if (ret) | 242 | if (ret) |
251 | return ret; | 243 | return ret; |
252 | 244 | ||
253 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 245 | ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); |
254 | if (ret == 0) { | 246 | if (ret == 0) { |
255 | ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); | 247 | ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); |
256 | if (ret) | 248 | if (ret) |
257 | drm_gem_handle_delete(file_priv, req->info.handle); | 249 | drm_gem_handle_delete(file_priv, req->info.handle); |
258 | } | 250 | } |
259 | 251 | ||
260 | /* drop reference from allocate - handle holds it now */ | 252 | /* drop reference from allocate - handle holds it now */ |
261 | drm_gem_object_unreference_unlocked(nvbo->gem); | 253 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
262 | return ret; | 254 | return ret; |
263 | } | 255 | } |
264 | 256 | ||
@@ -266,7 +258,7 @@ static int | |||
266 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | 258 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, |
267 | uint32_t write_domains, uint32_t valid_domains) | 259 | uint32_t write_domains, uint32_t valid_domains) |
268 | { | 260 | { |
269 | struct nouveau_bo *nvbo = gem->driver_private; | 261 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
270 | struct ttm_buffer_object *bo = &nvbo->bo; | 262 | struct ttm_buffer_object *bo = &nvbo->bo; |
271 | uint32_t domains = valid_domains & nvbo->valid_domains & | 263 | uint32_t domains = valid_domains & nvbo->valid_domains & |
272 | (write_domains ? write_domains : read_domains); | 264 | (write_domains ? write_domains : read_domains); |
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence, | |||
327 | list_del(&nvbo->entry); | 319 | list_del(&nvbo->entry); |
328 | nvbo->reserved_by = NULL; | 320 | nvbo->reserved_by = NULL; |
329 | ttm_bo_unreserve_ticket(&nvbo->bo, ticket); | 321 | ttm_bo_unreserve_ticket(&nvbo->bo, ticket); |
330 | drm_gem_object_unreference_unlocked(nvbo->gem); | 322 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
331 | } | 323 | } |
332 | } | 324 | } |
333 | 325 | ||
@@ -376,7 +368,7 @@ retry: | |||
376 | validate_fini(op, NULL); | 368 | validate_fini(op, NULL); |
377 | return -ENOENT; | 369 | return -ENOENT; |
378 | } | 370 | } |
379 | nvbo = gem->driver_private; | 371 | nvbo = nouveau_gem_object(gem); |
380 | if (nvbo == res_bo) { | 372 | if (nvbo == res_bo) { |
381 | res_bo = NULL; | 373 | res_bo = NULL; |
382 | drm_gem_object_unreference_unlocked(gem); | 374 | drm_gem_object_unreference_unlocked(gem); |
@@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
478 | return ret; | 470 | return ret; |
479 | } | 471 | } |
480 | 472 | ||
481 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | 473 | ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, |
482 | b->write_domains, | 474 | b->write_domains, |
483 | b->valid_domains); | 475 | b->valid_domains); |
484 | if (unlikely(ret)) { | 476 | if (unlikely(ret)) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 502e4290aa8f..7caca057bc38 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h | |||
@@ -12,14 +12,13 @@ | |||
12 | static inline struct nouveau_bo * | 12 | static inline struct nouveau_bo * |
13 | nouveau_gem_object(struct drm_gem_object *gem) | 13 | nouveau_gem_object(struct drm_gem_object *gem) |
14 | { | 14 | { |
15 | return gem ? gem->driver_private : NULL; | 15 | return gem ? container_of(gem, struct nouveau_bo, gem) : NULL; |
16 | } | 16 | } |
17 | 17 | ||
18 | /* nouveau_gem.c */ | 18 | /* nouveau_gem.c */ |
19 | extern int nouveau_gem_new(struct drm_device *, int size, int align, | 19 | extern int nouveau_gem_new(struct drm_device *, int size, int align, |
20 | uint32_t domain, uint32_t tile_mode, | 20 | uint32_t domain, uint32_t tile_mode, |
21 | uint32_t tile_flags, struct nouveau_bo **); | 21 | uint32_t tile_flags, struct nouveau_bo **); |
22 | extern int nouveau_gem_object_new(struct drm_gem_object *); | ||
23 | extern void nouveau_gem_object_del(struct drm_gem_object *); | 22 | extern void nouveau_gem_object_del(struct drm_gem_object *); |
24 | extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); | 23 | extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); |
25 | extern void nouveau_gem_object_close(struct drm_gem_object *, | 24 | extern void nouveau_gem_object_close(struct drm_gem_object *, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index e90468d5e5c0..51a2cb102b44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, | |||
71 | return ERR_PTR(ret); | 71 | return ERR_PTR(ret); |
72 | 72 | ||
73 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; | 73 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; |
74 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | 74 | |
75 | if (!nvbo->gem) { | 75 | /* Initialize the embedded gem-object. We return a single gem-reference |
76 | * to the caller, instead of a normal nouveau_bo ttm reference. */ | ||
77 | ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); | ||
78 | if (ret) { | ||
76 | nouveau_bo_ref(NULL, &nvbo); | 79 | nouveau_bo_ref(NULL, &nvbo); |
77 | return ERR_PTR(-ENOMEM); | 80 | return ERR_PTR(-ENOMEM); |
78 | } | 81 | } |
79 | 82 | ||
80 | nvbo->gem->driver_private = nvbo; | 83 | return &nvbo->gem; |
81 | return nvbo->gem; | ||
82 | } | 84 | } |
83 | 85 | ||
84 | int nouveau_gem_prime_pin(struct drm_gem_object *obj) | 86 | int nouveau_gem_prime_pin(struct drm_gem_object *obj) |
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2603d909f49c..e7fa3cd96743 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c | |||
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = { | |||
620 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 620 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
621 | .gem_prime_export = omap_gem_prime_export, | 621 | .gem_prime_export = omap_gem_prime_export, |
622 | .gem_prime_import = omap_gem_prime_import, | 622 | .gem_prime_import = omap_gem_prime_import, |
623 | .gem_init_object = omap_gem_init_object, | ||
624 | .gem_free_object = omap_gem_free_object, | 623 | .gem_free_object = omap_gem_free_object, |
625 | .gem_vm_ops = &omap_gem_vm_ops, | 624 | .gem_vm_ops = &omap_gem_vm_ops, |
626 | .dumb_create = omap_gem_dumb_create, | 625 | .dumb_create = omap_gem_dumb_create, |
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 30b95b736658..07847693cf49 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h | |||
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, | |||
220 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | 220 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
221 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle); | 221 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle); |
222 | void omap_gem_free_object(struct drm_gem_object *obj); | 222 | void omap_gem_free_object(struct drm_gem_object *obj); |
223 | int omap_gem_init_object(struct drm_gem_object *obj); | ||
224 | void *omap_gem_vaddr(struct drm_gem_object *obj); | 223 | void *omap_gem_vaddr(struct drm_gem_object *obj); |
225 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | 224 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
226 | uint32_t handle, uint64_t *offset); | 225 | uint32_t handle, uint64_t *offset); |
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 533f6ebec531..5aec3e81fe24 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -1274,11 +1274,6 @@ unlock: | |||
1274 | return ret; | 1274 | return ret; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | int omap_gem_init_object(struct drm_gem_object *obj) | ||
1278 | { | ||
1279 | return -EINVAL; /* unused */ | ||
1280 | } | ||
1281 | |||
1282 | /* don't call directly.. called from GEM core when it is time to actually | 1277 | /* don't call directly.. called from GEM core when it is time to actually |
1283 | * free the object.. | 1278 | * free the object.. |
1284 | */ | 1279 | */ |
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 9263db117ff8..cb858600185f 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c | |||
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev) | |||
261 | mutex_unlock(&dev->struct_mutex); | 261 | mutex_unlock(&dev->struct_mutex); |
262 | return -EBUSY; | 262 | return -EBUSY; |
263 | } | 263 | } |
264 | dev->irq_enabled = 1; | 264 | dev->irq_enabled = true; |
265 | mutex_unlock(&dev->struct_mutex); | 265 | mutex_unlock(&dev->struct_mutex); |
266 | 266 | ||
267 | /* Before installing handler */ | 267 | /* Before installing handler */ |
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev) | |||
272 | 272 | ||
273 | if (ret < 0) { | 273 | if (ret < 0) { |
274 | mutex_lock(&dev->struct_mutex); | 274 | mutex_lock(&dev->struct_mutex); |
275 | dev->irq_enabled = 0; | 275 | dev->irq_enabled = false; |
276 | mutex_unlock(&dev->struct_mutex); | 276 | mutex_unlock(&dev->struct_mutex); |
277 | return ret; | 277 | return ret; |
278 | } | 278 | } |
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev) | |||
283 | 283 | ||
284 | if (ret < 0) { | 284 | if (ret < 0) { |
285 | mutex_lock(&dev->struct_mutex); | 285 | mutex_lock(&dev->struct_mutex); |
286 | dev->irq_enabled = 0; | 286 | dev->irq_enabled = false; |
287 | mutex_unlock(&dev->struct_mutex); | 287 | mutex_unlock(&dev->struct_mutex); |
288 | dispc_free_irq(dev); | 288 | dispc_free_irq(dev); |
289 | } | 289 | } |
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev) | |||
294 | int omap_drm_irq_uninstall(struct drm_device *dev) | 294 | int omap_drm_irq_uninstall(struct drm_device *dev) |
295 | { | 295 | { |
296 | unsigned long irqflags; | 296 | unsigned long irqflags; |
297 | int irq_enabled, i; | 297 | bool irq_enabled; |
298 | int i; | ||
298 | 299 | ||
299 | mutex_lock(&dev->struct_mutex); | 300 | mutex_lock(&dev->struct_mutex); |
300 | irq_enabled = dev->irq_enabled; | 301 | irq_enabled = dev->irq_enabled; |
301 | dev->irq_enabled = 0; | 302 | dev->irq_enabled = false; |
302 | mutex_unlock(&dev->struct_mutex); | 303 | mutex_unlock(&dev->struct_mutex); |
303 | 304 | ||
304 | /* | 305 | /* |
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev) | |||
307 | if (dev->num_crtcs) { | 308 | if (dev->num_crtcs) { |
308 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 309 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
309 | for (i = 0; i < dev->num_crtcs; i++) { | 310 | for (i = 0; i < dev->num_crtcs; i++) { |
310 | DRM_WAKEUP(&dev->vbl_queue[i]); | 311 | DRM_WAKEUP(&dev->vblank[i].queue); |
311 | dev->vblank_enabled[i] = 0; | 312 | dev->vblank[i].enabled = false; |
312 | dev->last_vblank[i] = | 313 | dev->vblank[i].last = |
313 | dev->driver->get_vblank_counter(dev, i); | 314 | dev->driver->get_vblank_counter(dev, i); |
314 | } | 315 | } |
315 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 316 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 514118ae72d4..fee8748bdca5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = { | |||
225 | .debugfs_init = qxl_debugfs_init, | 225 | .debugfs_init = qxl_debugfs_init, |
226 | .debugfs_cleanup = qxl_debugfs_takedown, | 226 | .debugfs_cleanup = qxl_debugfs_takedown, |
227 | #endif | 227 | #endif |
228 | .gem_init_object = qxl_gem_object_init, | ||
229 | .gem_free_object = qxl_gem_object_free, | 228 | .gem_free_object = qxl_gem_object_free, |
230 | .gem_open_object = qxl_gem_object_open, | 229 | .gem_open_object = qxl_gem_object_open, |
231 | .gem_close_object = qxl_gem_object_close, | 230 | .gem_close_object = qxl_gem_object_close, |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index f7c9adde46a0..41d22ed26060 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, | |||
412 | struct qxl_surface *surf, | 412 | struct qxl_surface *surf, |
413 | struct qxl_bo **qobj, | 413 | struct qxl_bo **qobj, |
414 | uint32_t *handle); | 414 | uint32_t *handle); |
415 | int qxl_gem_object_init(struct drm_gem_object *obj); | ||
416 | void qxl_gem_object_free(struct drm_gem_object *gobj); | 415 | void qxl_gem_object_free(struct drm_gem_object *gobj); |
417 | int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); | 416 | int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); |
418 | void qxl_gem_object_close(struct drm_gem_object *obj, | 417 | void qxl_gem_object_close(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index 1648e4125af7..b96f0c9d89b2 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
@@ -28,12 +28,6 @@ | |||
28 | #include "qxl_drv.h" | 28 | #include "qxl_drv.h" |
29 | #include "qxl_object.h" | 29 | #include "qxl_object.h" |
30 | 30 | ||
31 | int qxl_gem_object_init(struct drm_gem_object *obj) | ||
32 | { | ||
33 | /* we do nothings here */ | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | void qxl_gem_object_free(struct drm_gem_object *gobj) | 31 | void qxl_gem_object_free(struct drm_gem_object *gobj) |
38 | { | 32 | { |
39 | struct qxl_bo *qobj = gem_to_qxl_bo(gobj); | 33 | struct qxl_bo *qobj = gem_to_qxl_bo(gobj); |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 00885417ffff..fb3ae07a1469 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) | |||
690 | 690 | ||
691 | /* set the lane count on the sink */ | 691 | /* set the lane count on the sink */ |
692 | tmp = dp_info->dp_lane_count; | 692 | tmp = dp_info->dp_lane_count; |
693 | if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && | 693 | if (drm_dp_enhanced_frame_cap(dp_info->dpcd)) |
694 | dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP) | ||
695 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 694 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
696 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); | 695 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); |
697 | 696 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 061b227dae0c..c155d6f3fa68 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
499 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 499 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
500 | fp2_gen_cntl = 0; | 500 | fp2_gen_cntl = 0; |
501 | 501 | ||
502 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 502 | if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
503 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 503 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
504 | } | 504 | } |
505 | 505 | ||
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
536 | (RADEON_CRTC_SYNC_TRISTAT | | 536 | (RADEON_CRTC_SYNC_TRISTAT | |
537 | RADEON_CRTC_DISPLAY_DIS))); | 537 | RADEON_CRTC_DISPLAY_DIS))); |
538 | 538 | ||
539 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 539 | if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
540 | WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); | 540 | WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); |
541 | } | 541 | } |
542 | 542 | ||
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
554 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); | 554 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
555 | } | 555 | } |
556 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); | 556 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); |
557 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 557 | if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
558 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | 558 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); |
559 | } | 559 | } |
560 | return r; | 560 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cdd12dcd988b..22f685827b7e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev); | |||
100 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); | 100 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
101 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); | 101 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
102 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); | 102 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); |
103 | int radeon_gem_object_init(struct drm_gem_object *obj); | ||
104 | void radeon_gem_object_free(struct drm_gem_object *obj); | 103 | void radeon_gem_object_free(struct drm_gem_object *obj); |
105 | int radeon_gem_object_open(struct drm_gem_object *obj, | 104 | int radeon_gem_object_open(struct drm_gem_object *obj, |
106 | struct drm_file *file_priv); | 105 | struct drm_file *file_priv); |
@@ -408,7 +407,6 @@ static struct drm_driver kms_driver = { | |||
408 | .irq_uninstall = radeon_driver_irq_uninstall_kms, | 407 | .irq_uninstall = radeon_driver_irq_uninstall_kms, |
409 | .irq_handler = radeon_driver_irq_handler_kms, | 408 | .irq_handler = radeon_driver_irq_handler_kms, |
410 | .ioctls = radeon_ioctls_kms, | 409 | .ioctls = radeon_ioctls_kms, |
411 | .gem_init_object = radeon_gem_object_init, | ||
412 | .gem_free_object = radeon_gem_object_free, | 410 | .gem_free_object = radeon_gem_object_free, |
413 | .gem_open_object = radeon_gem_object_open, | 411 | .gem_open_object = radeon_gem_object_open, |
414 | .gem_close_object = radeon_gem_object_close, | 412 | .gem_close_object = radeon_gem_object_close, |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index dce99c8a5835..805c5e566b9a 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -29,13 +29,6 @@ | |||
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | 31 | ||
32 | int radeon_gem_object_init(struct drm_gem_object *obj) | ||
33 | { | ||
34 | BUG(); | ||
35 | |||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | void radeon_gem_object_free(struct drm_gem_object *gobj) | 32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
40 | { | 33 | { |
41 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 61580ddc4eb2..d6b36766e8c9 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
191 | 191 | ||
192 | switch (info->request) { | 192 | switch (info->request) { |
193 | case RADEON_INFO_DEVICE_ID: | 193 | case RADEON_INFO_DEVICE_ID: |
194 | *value = dev->pci_device; | 194 | *value = dev->pdev->device; |
195 | break; | 195 | break; |
196 | case RADEON_INFO_NUM_GB_PIPES: | 196 | case RADEON_INFO_NUM_GB_PIPES: |
197 | *value = rdev->num_gb_pipes; | 197 | *value = rdev->num_gb_pipes; |
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 7650dc0d78ce..3ddd6cd98ac1 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c | |||
@@ -77,7 +77,6 @@ static struct drm_driver driver = { | |||
77 | .unload = udl_driver_unload, | 77 | .unload = udl_driver_unload, |
78 | 78 | ||
79 | /* gem hooks */ | 79 | /* gem hooks */ |
80 | .gem_init_object = udl_gem_init_object, | ||
81 | .gem_free_object = udl_gem_free_object, | 80 | .gem_free_object = udl_gem_free_object, |
82 | .gem_vm_ops = &udl_gem_vm_ops, | 81 | .gem_vm_ops = &udl_gem_vm_ops, |
83 | 82 | ||
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 56aec9409fa3..1fbf7b357f16 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h | |||
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv, | |||
115 | int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, | 115 | int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, |
116 | uint32_t handle, uint64_t *offset); | 116 | uint32_t handle, uint64_t *offset); |
117 | 117 | ||
118 | int udl_gem_init_object(struct drm_gem_object *obj); | ||
119 | void udl_gem_free_object(struct drm_gem_object *gem_obj); | 118 | void udl_gem_free_object(struct drm_gem_object *gem_obj); |
120 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, | 119 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, |
121 | size_t size); | 120 | size_t size); |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8bf646183bac..24ffbe990736 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | int udl_gem_init_object(struct drm_gem_object *obj) | ||
111 | { | ||
112 | BUG(); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) | 110 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) |
118 | { | 111 | { |
119 | struct page **pages; | 112 | struct page **pages; |
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 7e3ad87c366c..927889105483 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c | |||
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context) | |||
79 | 79 | ||
80 | /* Linux specific until context tracking code gets ported to BSD */ | 80 | /* Linux specific until context tracking code gets ported to BSD */ |
81 | /* Last context, perform cleanup */ | 81 | /* Last context, perform cleanup */ |
82 | if (dev->ctx_count == 1 && dev->dev_private) { | 82 | if (list_is_singular(&dev->ctxlist) && dev->dev_private) { |
83 | DRM_DEBUG("Last Context\n"); | 83 | DRM_DEBUG("Last Context\n"); |
84 | drm_irq_uninstall(dev); | 84 | drm_irq_uninstall(dev); |
85 | via_cleanup_futex(dev_priv); | 85 | via_cleanup_futex(dev_priv); |
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c index 8c61ceeaa12d..df7d90a3a4fa 100644 --- a/drivers/gpu/host1x/drm/drm.c +++ b/drivers/gpu/host1x/drm/drm.c | |||
@@ -264,7 +264,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) | |||
264 | * core, so we need to set this manually in order to allow the | 264 | * core, so we need to set this manually in order to allow the |
265 | * DRM_IOCTL_WAIT_VBLANK to operate correctly. | 265 | * DRM_IOCTL_WAIT_VBLANK to operate correctly. |
266 | */ | 266 | */ |
267 | drm->irq_enabled = 1; | 267 | drm->irq_enabled = true; |
268 | 268 | ||
269 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); | 269 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); |
270 | if (err < 0) | 270 | if (err < 0) |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index a2e52a0c53c9..c1014eb2907d 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -396,14 +396,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
396 | 396 | ||
397 | /* | 397 | /* |
398 | * enable drm irq mode. | 398 | * enable drm irq mode. |
399 | * - with irq_enabled = 1, we can use the vblank feature. | 399 | * - with irq_enabled = true, we can use the vblank feature. |
400 | * | 400 | * |
401 | * P.S. note that we wouldn't use drm irq handler but | 401 | * P.S. note that we wouldn't use drm irq handler but |
402 | * just specific driver own one instead because | 402 | * just specific driver own one instead because |
403 | * drm framework supports only one irq handler and | 403 | * drm framework supports only one irq handler and |
404 | * drivers can well take care of their interrupts | 404 | * drivers can well take care of their interrupts |
405 | */ | 405 | */ |
406 | drm->irq_enabled = 1; | 406 | drm->irq_enabled = true; |
407 | 407 | ||
408 | drm_mode_config_init(drm); | 408 | drm_mode_config_init(drm); |
409 | imx_drm_mode_config_init(drm); | 409 | imx_drm_mode_config_init(drm); |
@@ -423,11 +423,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
423 | goto err_init; | 423 | goto err_init; |
424 | 424 | ||
425 | /* | 425 | /* |
426 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | 426 | * with vblank_disable_allowed = true, vblank interrupt will be disabled |
427 | * by drm timer once a current process gives up ownership of | 427 | * by drm timer once a current process gives up ownership of |
428 | * vblank event.(after drm_vblank_put function is called) | 428 | * vblank event.(after drm_vblank_put function is called) |
429 | */ | 429 | */ |
430 | imxdrm->drm->vblank_disable_allowed = 1; | 430 | imxdrm->drm->vblank_disable_allowed = true; |
431 | 431 | ||
432 | if (!imx_drm_device_get()) | 432 | if (!imx_drm_device_get()) |
433 | ret = -EINVAL; | 433 | ret = -EINVAL; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c65f496ad6b6..2b954adf5bd4 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -670,8 +670,6 @@ struct drm_gem_object { | |||
670 | uint32_t pending_read_domains; | 670 | uint32_t pending_read_domains; |
671 | uint32_t pending_write_domain; | 671 | uint32_t pending_write_domain; |
672 | 672 | ||
673 | void *driver_private; | ||
674 | |||
675 | /** | 673 | /** |
676 | * dma_buf - dma buf associated with this GEM object | 674 | * dma_buf - dma buf associated with this GEM object |
677 | * | 675 | * |
@@ -925,7 +923,6 @@ struct drm_driver { | |||
925 | * | 923 | * |
926 | * Returns 0 on success. | 924 | * Returns 0 on success. |
927 | */ | 925 | */ |
928 | int (*gem_init_object) (struct drm_gem_object *obj); | ||
929 | void (*gem_free_object) (struct drm_gem_object *obj); | 926 | void (*gem_free_object) (struct drm_gem_object *obj); |
930 | int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); | 927 | int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); |
931 | void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); | 928 | void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); |
@@ -1084,6 +1081,19 @@ struct drm_pending_vblank_event { | |||
1084 | struct drm_event_vblank event; | 1081 | struct drm_event_vblank event; |
1085 | }; | 1082 | }; |
1086 | 1083 | ||
1084 | struct drm_vblank_crtc { | ||
1085 | wait_queue_head_t queue; /**< VBLANK wait queue */ | ||
1086 | struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */ | ||
1087 | atomic_t count; /**< number of VBLANK interrupts */ | ||
1088 | atomic_t refcount; /* number of users of vblank interruptsper crtc */ | ||
1089 | u32 last; /* protected by dev->vbl_lock, used */ | ||
1090 | /* for wraparound handling */ | ||
1091 | u32 last_wait; /* Last vblank seqno waited per CRTC */ | ||
1092 | unsigned int inmodeset; /* Display driver is setting mode */ | ||
1093 | bool enabled; /* so we don't call enable more than | ||
1094 | once per disable */ | ||
1095 | }; | ||
1096 | |||
1087 | /** | 1097 | /** |
1088 | * DRM device structure. This structure represent a complete card that | 1098 | * DRM device structure. This structure represent a complete card that |
1089 | * may contain multiple heads. | 1099 | * may contain multiple heads. |
@@ -1108,25 +1118,16 @@ struct drm_device { | |||
1108 | atomic_t buf_alloc; /**< Buffer allocation in progress */ | 1118 | atomic_t buf_alloc; /**< Buffer allocation in progress */ |
1109 | /*@} */ | 1119 | /*@} */ |
1110 | 1120 | ||
1111 | /** \name Performance counters */ | ||
1112 | /*@{ */ | ||
1113 | unsigned long counters; | ||
1114 | enum drm_stat_type types[15]; | ||
1115 | atomic_t counts[15]; | ||
1116 | /*@} */ | ||
1117 | |||
1118 | struct list_head filelist; | 1121 | struct list_head filelist; |
1119 | 1122 | ||
1120 | /** \name Memory management */ | 1123 | /** \name Memory management */ |
1121 | /*@{ */ | 1124 | /*@{ */ |
1122 | struct list_head maplist; /**< Linked list of regions */ | 1125 | struct list_head maplist; /**< Linked list of regions */ |
1123 | int map_count; /**< Number of mappable regions */ | ||
1124 | struct drm_open_hash map_hash; /**< User token hash table for maps */ | 1126 | struct drm_open_hash map_hash; /**< User token hash table for maps */ |
1125 | 1127 | ||
1126 | /** \name Context handle management */ | 1128 | /** \name Context handle management */ |
1127 | /*@{ */ | 1129 | /*@{ */ |
1128 | struct list_head ctxlist; /**< Linked list of context handles */ | 1130 | struct list_head ctxlist; /**< Linked list of context handles */ |
1129 | int ctx_count; /**< Number of context handles */ | ||
1130 | struct mutex ctxlist_mutex; /**< For ctxlist */ | 1131 | struct mutex ctxlist_mutex; /**< For ctxlist */ |
1131 | 1132 | ||
1132 | struct idr ctx_idr; | 1133 | struct idr ctx_idr; |
@@ -1142,12 +1143,11 @@ struct drm_device { | |||
1142 | 1143 | ||
1143 | /** \name Context support */ | 1144 | /** \name Context support */ |
1144 | /*@{ */ | 1145 | /*@{ */ |
1145 | int irq_enabled; /**< True if irq handler is enabled */ | 1146 | bool irq_enabled; /**< True if irq handler is enabled */ |
1146 | __volatile__ long context_flag; /**< Context swapping flag */ | 1147 | __volatile__ long context_flag; /**< Context swapping flag */ |
1147 | int last_context; /**< Last current context */ | 1148 | int last_context; /**< Last current context */ |
1148 | /*@} */ | 1149 | /*@} */ |
1149 | 1150 | ||
1150 | struct work_struct work; | ||
1151 | /** \name VBLANK IRQ support */ | 1151 | /** \name VBLANK IRQ support */ |
1152 | /*@{ */ | 1152 | /*@{ */ |
1153 | 1153 | ||
@@ -1157,20 +1157,13 @@ struct drm_device { | |||
1157 | * Once the modeset ioctl *has* been called though, we can safely | 1157 | * Once the modeset ioctl *has* been called though, we can safely |
1158 | * disable them when unused. | 1158 | * disable them when unused. |
1159 | */ | 1159 | */ |
1160 | int vblank_disable_allowed; | 1160 | bool vblank_disable_allowed; |
1161 | |||
1162 | /* array of size num_crtcs */ | ||
1163 | struct drm_vblank_crtc *vblank; | ||
1161 | 1164 | ||
1162 | wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ | ||
1163 | atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ | ||
1164 | struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ | ||
1165 | spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ | 1165 | spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ |
1166 | spinlock_t vbl_lock; | 1166 | spinlock_t vbl_lock; |
1167 | atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ | ||
1168 | u32 *last_vblank; /* protected by dev->vbl_lock, used */ | ||
1169 | /* for wraparound handling */ | ||
1170 | int *vblank_enabled; /* so we don't call enable more than | ||
1171 | once per disable */ | ||
1172 | int *vblank_inmodeset; /* Display driver is setting mode */ | ||
1173 | u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ | ||
1174 | struct timer_list vblank_disable_timer; | 1167 | struct timer_list vblank_disable_timer; |
1175 | 1168 | ||
1176 | u32 max_vblank_count; /**< size of vblank counter register */ | 1169 | u32 max_vblank_count; /**< size of vblank counter register */ |
@@ -1187,8 +1180,6 @@ struct drm_device { | |||
1187 | 1180 | ||
1188 | struct device *dev; /**< Device structure */ | 1181 | struct device *dev; /**< Device structure */ |
1189 | struct pci_dev *pdev; /**< PCI device structure */ | 1182 | struct pci_dev *pdev; /**< PCI device structure */ |
1190 | int pci_vendor; /**< PCI vendor id */ | ||
1191 | int pci_device; /**< PCI device id */ | ||
1192 | #ifdef __alpha__ | 1183 | #ifdef __alpha__ |
1193 | struct pci_controller *hose; | 1184 | struct pci_controller *hose; |
1194 | #endif | 1185 | #endif |
@@ -1561,8 +1552,6 @@ int drm_gem_init(struct drm_device *dev); | |||
1561 | void drm_gem_destroy(struct drm_device *dev); | 1552 | void drm_gem_destroy(struct drm_device *dev); |
1562 | void drm_gem_object_release(struct drm_gem_object *obj); | 1553 | void drm_gem_object_release(struct drm_gem_object *obj); |
1563 | void drm_gem_object_free(struct kref *kref); | 1554 | void drm_gem_object_free(struct kref *kref); |
1564 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | ||
1565 | size_t size); | ||
1566 | int drm_gem_object_init(struct drm_device *dev, | 1555 | int drm_gem_object_init(struct drm_device *dev, |
1567 | struct drm_gem_object *obj, size_t size); | 1556 | struct drm_gem_object *obj, size_t size); |
1568 | void drm_gem_private_object_init(struct drm_device *dev, | 1557 | void drm_gem_private_object_init(struct drm_device *dev, |
@@ -1650,9 +1639,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) | |||
1650 | 1639 | ||
1651 | #include <drm/drm_mem_util.h> | 1640 | #include <drm/drm_mem_util.h> |
1652 | 1641 | ||
1653 | extern int drm_fill_in_dev(struct drm_device *dev, | 1642 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
1654 | const struct pci_device_id *ent, | 1643 | struct device *parent); |
1655 | struct drm_driver *driver); | 1644 | void drm_dev_free(struct drm_device *dev); |
1645 | int drm_dev_register(struct drm_device *dev, unsigned long flags); | ||
1646 | void drm_dev_unregister(struct drm_device *dev); | ||
1656 | int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); | 1647 | int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); |
1657 | /*@}*/ | 1648 | /*@}*/ |
1658 | 1649 | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 50cedadc9fcc..ba407f6b4f1f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -973,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m | |||
973 | extern bool drm_probe_ddc(struct i2c_adapter *adapter); | 973 | extern bool drm_probe_ddc(struct i2c_adapter *adapter); |
974 | extern struct edid *drm_get_edid(struct drm_connector *connector, | 974 | extern struct edid *drm_get_edid(struct drm_connector *connector, |
975 | struct i2c_adapter *adapter); | 975 | struct i2c_adapter *adapter); |
976 | extern struct edid *drm_edid_duplicate(const struct edid *edid); | ||
976 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 977 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
977 | extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); | 978 | extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
978 | extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); | 979 | extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index ae8dbfb1207c..a92c3754e3bb 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -77,10 +77,10 @@ | |||
77 | #define DP_DOWNSTREAMPORT_PRESENT 0x005 | 77 | #define DP_DOWNSTREAMPORT_PRESENT 0x005 |
78 | # define DP_DWN_STRM_PORT_PRESENT (1 << 0) | 78 | # define DP_DWN_STRM_PORT_PRESENT (1 << 0) |
79 | # define DP_DWN_STRM_PORT_TYPE_MASK 0x06 | 79 | # define DP_DWN_STRM_PORT_TYPE_MASK 0x06 |
80 | /* 00b = DisplayPort */ | 80 | # define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) |
81 | /* 01b = Analog */ | 81 | # define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) |
82 | /* 10b = TMDS or HDMI */ | 82 | # define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) |
83 | /* 11b = Other */ | 83 | # define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) |
84 | # define DP_FORMAT_CONVERSION (1 << 3) | 84 | # define DP_FORMAT_CONVERSION (1 << 3) |
85 | # define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ | 85 | # define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ |
86 | 86 | ||
@@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | |||
333 | 333 | ||
334 | 334 | ||
335 | #define DP_LINK_STATUS_SIZE 6 | 335 | #define DP_LINK_STATUS_SIZE 6 |
336 | bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | 336 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
337 | int lane_count); | 337 | int lane_count); |
338 | bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | 338 | bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
339 | int lane_count); | 339 | int lane_count); |
340 | u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], | 340 | u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], |
341 | int lane); | 341 | int lane); |
342 | u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], | 342 | u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], |
343 | int lane); | 343 | int lane); |
344 | 344 | ||
345 | #define DP_RECEIVER_CAP_SIZE 0xf | 345 | #define DP_RECEIVER_CAP_SIZE 0xf |
346 | #define EDP_PSR_RECEIVER_CAP_SIZE 2 | 346 | #define EDP_PSR_RECEIVER_CAP_SIZE 2 |
347 | 347 | ||
348 | void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); | 348 | void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); |
349 | void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); | 349 | void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); |
350 | 350 | ||
351 | u8 drm_dp_link_rate_to_bw_code(int link_rate); | 351 | u8 drm_dp_link_rate_to_bw_code(int link_rate); |
352 | int drm_dp_bw_code_to_link_rate(u8 link_bw); | 352 | int drm_dp_bw_code_to_link_rate(u8 link_bw); |
@@ -379,15 +379,22 @@ struct edp_vsc_psr { | |||
379 | #define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) | 379 | #define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) |
380 | 380 | ||
381 | static inline int | 381 | static inline int |
382 | drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) | 382 | drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) |
383 | { | 383 | { |
384 | return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); | 384 | return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); |
385 | } | 385 | } |
386 | 386 | ||
387 | static inline u8 | 387 | static inline u8 |
388 | drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) | 388 | drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) |
389 | { | 389 | { |
390 | return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | 390 | return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; |
391 | } | 391 | } |
392 | 392 | ||
393 | static inline bool | ||
394 | drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | ||
395 | { | ||
396 | return dpcd[DP_DPCD_REV] >= 0x11 && | ||
397 | (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); | ||
398 | } | ||
399 | |||
393 | #endif /* _DRM_DP_HELPER_H_ */ | 400 | #endif /* _DRM_DP_HELPER_H_ */ |