diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-18 11:10:21 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-18 11:10:21 -0500 |
| commit | 86404ab60df2ea65f39be936fc11762b642810c3 (patch) | |
| tree | 9834aa4ba93dbdb398d007eaba639f27148d4bdf | |
| parent | ab320af2244462703455dac59fe5ceede24d3662 (diff) | |
| parent | 6b15835282f9c6a023e2625455bfdb822bb9cc64 (diff) | |
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/radeon/kms: fix bo's fence association
drm/radeon/kms: fix indirect buffer management V2
drm/edid: Fix interlaced detailed timings to be frame size, not field.
drm/vmwgfx: Use fb handover mechanism instead of stealth mode.
drm/radeon/kms: use udelay for short delays
drm/nouveau: Force TV encoder DPMS reinit after resume.
drm/nouveau: use mutex for vbios lock
| -rw-r--r-- | drivers/gpu/drm/drm_edid.c | 47 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bios.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bios.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nv17_tv.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/atom.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/r600_blit_kms.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 36 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 105 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 49 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 3 |
13 files changed, 137 insertions, 142 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f665b05592f3..ab6c97330412 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
| 598 | return mode; | 598 | return mode; |
| 599 | } | 599 | } |
| 600 | 600 | ||
| 601 | /* | ||
| 602 | * EDID is delightfully ambiguous about how interlaced modes are to be | ||
| 603 | * encoded. Our internal representation is of frame height, but some | ||
| 604 | * HDTV detailed timings are encoded as field height. | ||
| 605 | * | ||
| 606 | * The format list here is from CEA, in frame size. Technically we | ||
| 607 | * should be checking refresh rate too. Whatever. | ||
| 608 | */ | ||
| 609 | static void | ||
| 610 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | ||
| 611 | struct detailed_pixel_timing *pt) | ||
| 612 | { | ||
| 613 | int i; | ||
| 614 | static const struct { | ||
| 615 | int w, h; | ||
| 616 | } cea_interlaced[] = { | ||
| 617 | { 1920, 1080 }, | ||
| 618 | { 720, 480 }, | ||
| 619 | { 1440, 480 }, | ||
| 620 | { 2880, 480 }, | ||
| 621 | { 720, 576 }, | ||
| 622 | { 1440, 576 }, | ||
| 623 | { 2880, 576 }, | ||
| 624 | }; | ||
| 625 | static const int n_sizes = | ||
| 626 | sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); | ||
| 627 | |||
| 628 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | ||
| 629 | return; | ||
| 630 | |||
| 631 | for (i = 0; i < n_sizes; i++) { | ||
| 632 | if ((mode->hdisplay == cea_interlaced[i].w) && | ||
| 633 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | ||
| 634 | mode->vdisplay *= 2; | ||
| 635 | mode->vsync_start *= 2; | ||
| 636 | mode->vsync_end *= 2; | ||
| 637 | mode->vtotal *= 2; | ||
| 638 | mode->vtotal |= 1; | ||
| 639 | } | ||
| 640 | } | ||
| 641 | |||
| 642 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 643 | } | ||
| 644 | |||
| 601 | /** | 645 | /** |
| 602 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 646 | * drm_mode_detailed - create a new mode from an EDID detailed timing section |
| 603 | * @dev: DRM device (needed to create new mode) | 647 | * @dev: DRM device (needed to create new mode) |
| @@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
| 680 | 724 | ||
| 681 | drm_mode_set_name(mode); | 725 | drm_mode_set_name(mode); |
| 682 | 726 | ||
| 683 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 727 | drm_mode_do_interlace_quirk(mode, pt); |
| 684 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 685 | 728 | ||
| 686 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 729 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { |
| 687 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 730 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2cd0fad17dac..0e9cd1d49130 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
| 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 5862 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; |
| 5863 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; |
| 5864 | unsigned long flags; | ||
| 5865 | 5864 | ||
| 5866 | spin_lock_irqsave(&bios->lock, flags); | 5865 | mutex_lock(&bios->lock); |
| 5867 | bios->display.output = dcbent; | 5866 | bios->display.output = dcbent; |
| 5868 | parse_init_table(bios, table, &iexec); | 5867 | parse_init_table(bios, table, &iexec); |
| 5869 | bios->display.output = NULL; | 5868 | bios->display.output = NULL; |
| 5870 | spin_unlock_irqrestore(&bios->lock, flags); | 5869 | mutex_unlock(&bios->lock); |
| 5871 | } | 5870 | } |
| 5872 | 5871 | ||
| 5873 | static bool NVInitVBIOS(struct drm_device *dev) | 5872 | static bool NVInitVBIOS(struct drm_device *dev) |
| @@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
| 5876 | struct nvbios *bios = &dev_priv->VBIOS; | 5875 | struct nvbios *bios = &dev_priv->VBIOS; |
| 5877 | 5876 | ||
| 5878 | memset(bios, 0, sizeof(struct nvbios)); | 5877 | memset(bios, 0, sizeof(struct nvbios)); |
| 5879 | spin_lock_init(&bios->lock); | 5878 | mutex_init(&bios->lock); |
| 5880 | bios->dev = dev; | 5879 | bios->dev = dev; |
| 5881 | 5880 | ||
| 5882 | if (!NVShadowVBIOS(dev, bios->data)) | 5881 | if (!NVShadowVBIOS(dev, bios->data)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 68446fd4146b..fd94bd6dc264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
| @@ -205,7 +205,7 @@ struct nvbios { | |||
| 205 | struct drm_device *dev; | 205 | struct drm_device *dev; |
| 206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; |
| 207 | 207 | ||
| 208 | spinlock_t lock; | 208 | struct mutex lock; |
| 209 | 209 | ||
| 210 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; |
| 211 | unsigned int length; | 211 | unsigned int length; |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341b..21ac6e49b6ee 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
| @@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) | |||
| 579 | nouveau_encoder(encoder)->restore.output); | 579 | nouveau_encoder(encoder)->restore.output); |
| 580 | 580 | ||
| 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); |
| 582 | |||
| 583 | nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; | ||
| 582 | } | 584 | } |
| 583 | 585 | ||
| 584 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 586 | static int nv17_tv_create_resources(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 2a3df5599ab4..7f152f66f196 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | |||
| 643 | uint8_t count = U8((*ptr)++); | 643 | uint8_t count = U8((*ptr)++); |
| 644 | SDEBUG(" count: %d\n", count); | 644 | SDEBUG(" count: %d\n", count); |
| 645 | if (arg == ATOM_UNIT_MICROSEC) | 645 | if (arg == ATOM_UNIT_MICROSEC) |
| 646 | schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | 646 | udelay(count); |
| 647 | else | 647 | else |
| 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); |
| 649 | } | 649 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index af1c3ca8a4cb..446b765ac72a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
| 543 | void r600_vb_ib_put(struct radeon_device *rdev) | 543 | void r600_vb_ib_put(struct radeon_device *rdev) |
| 544 | { | 544 | { |
| 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); |
| 546 | mutex_lock(&rdev->ib_pool.mutex); | ||
| 547 | list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); | ||
| 548 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 549 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 546 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
| 550 | } | 547 | } |
| 551 | 548 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..c0356bb193e5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -96,6 +96,7 @@ extern int radeon_audio; | |||
| 96 | * symbol; | 96 | * symbol; |
| 97 | */ | 97 | */ |
| 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
| 99 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ | ||
| 99 | #define RADEON_IB_POOL_SIZE 16 | 100 | #define RADEON_IB_POOL_SIZE 16 |
| 100 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 101 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 |
| 101 | #define RADEONFB_CONN_LIMIT 4 | 102 | #define RADEONFB_CONN_LIMIT 4 |
| @@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | |||
| 363 | */ | 364 | */ |
| 364 | struct radeon_ib { | 365 | struct radeon_ib { |
| 365 | struct list_head list; | 366 | struct list_head list; |
| 366 | unsigned long idx; | 367 | unsigned idx; |
| 367 | uint64_t gpu_addr; | 368 | uint64_t gpu_addr; |
| 368 | struct radeon_fence *fence; | 369 | struct radeon_fence *fence; |
| 369 | uint32_t *ptr; | 370 | uint32_t *ptr; |
| 370 | uint32_t length_dw; | 371 | uint32_t length_dw; |
| 372 | bool free; | ||
| 371 | }; | 373 | }; |
| 372 | 374 | ||
| 373 | /* | 375 | /* |
| @@ -377,10 +379,9 @@ struct radeon_ib { | |||
| 377 | struct radeon_ib_pool { | 379 | struct radeon_ib_pool { |
| 378 | struct mutex mutex; | 380 | struct mutex mutex; |
| 379 | struct radeon_bo *robj; | 381 | struct radeon_bo *robj; |
| 380 | struct list_head scheduled_ibs; | ||
| 381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 382 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
| 382 | bool ready; | 383 | bool ready; |
| 383 | DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); | 384 | unsigned head_id; |
| 384 | }; | 385 | }; |
| 385 | 386 | ||
| 386 | struct radeon_cp { | 387 | struct radeon_cp { |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1190148cf5e6..e9d085021c1f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
| @@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
| 86 | &p->validated); | 86 | &p->validated); |
| 87 | } | 87 | } |
| 88 | } | 88 | } |
| 89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
| @@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
| 189 | { | 189 | { |
| 190 | unsigned i; | 190 | unsigned i; |
| 191 | 191 | ||
| 192 | if (error && parser->ib) { | 192 | if (!error && parser->ib) { |
| 193 | radeon_bo_list_unvalidate(&parser->validated, | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
| 194 | parser->ib->fence); | ||
| 195 | } else { | ||
| 196 | radeon_bo_list_unreserve(&parser->validated); | ||
| 197 | } | 194 | } |
| 195 | radeon_bo_list_unreserve(&parser->validated); | ||
| 198 | for (i = 0; i < parser->nrelocs; i++) { | 196 | for (i = 0; i < parser->nrelocs; i++) { |
| 199 | if (parser->relocs[i].gobj) { | 197 | if (parser->relocs[i].gobj) { |
| 200 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | mutex_lock(&parser->rdev->ddev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d72a71bff218..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
| 306 | } | 306 | } |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) |
| 310 | { | 310 | { |
| 311 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; |
| 312 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; |
| 313 | struct radeon_fence *old_fence = NULL; | ||
| 314 | int r; | 313 | int r; |
| 315 | 314 | ||
| 316 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); |
| @@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
| 334 | } | 333 | } |
| 335 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
| 336 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; |
| 337 | if (fence) { | ||
| 338 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
| 339 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
| 340 | bo->tbo.sync_obj_arg = NULL; | ||
| 341 | } | ||
| 342 | if (old_fence) { | ||
| 343 | radeon_fence_unref(&old_fence); | ||
| 344 | } | ||
| 345 | } | 336 | } |
| 346 | return 0; | 337 | return 0; |
| 347 | } | 338 | } |
| 348 | 339 | ||
| 349 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
| 350 | { | 341 | { |
| 351 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; |
| 352 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; |
| 353 | 344 | struct radeon_fence *old_fence = NULL; | |
| 354 | if (fence) | 345 | |
| 355 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { |
| 356 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; |
| 357 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); |
| 358 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
| 359 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
| 360 | } | 351 | bo->tbo.sync_obj_arg = NULL; |
| 352 | spin_unlock(&bo->tbo.lock); | ||
| 353 | if (old_fence) { | ||
| 354 | radeon_fence_unref(&old_fence); | ||
| 361 | } | 355 | } |
| 362 | radeon_bo_list_unreserve(head); | 356 | } |
| 363 | } | 357 | } |
| 364 | 358 | ||
| 365 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad1..7ab43de1e244 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
| @@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
| 156 | struct list_head *head); | 156 | struct list_head *head); |
| 157 | extern int radeon_bo_list_reserve(struct list_head *head); | 157 | extern int radeon_bo_list_reserve(struct list_head *head); |
| 158 | extern void radeon_bo_list_unreserve(struct list_head *head); | 158 | extern void radeon_bo_list_unreserve(struct list_head *head); |
| 159 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | 159 | extern int radeon_bo_list_validate(struct list_head *head); |
| 160 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | 160 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); |
| 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
| 162 | struct vm_area_struct *vma); | 162 | struct vm_area_struct *vma); |
| 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4d..694799f6fac1 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) | |||
| 41 | { | 41 | { |
| 42 | struct radeon_fence *fence; | 42 | struct radeon_fence *fence; |
| 43 | struct radeon_ib *nib; | 43 | struct radeon_ib *nib; |
| 44 | unsigned long i; | 44 | int r = 0, i, c; |
| 45 | int r = 0; | ||
| 46 | 45 | ||
| 47 | *ib = NULL; | 46 | *ib = NULL; |
| 48 | r = radeon_fence_create(rdev, &fence); | 47 | r = radeon_fence_create(rdev, &fence); |
| 49 | if (r) { | 48 | if (r) { |
| 50 | DRM_ERROR("failed to create fence for new IB\n"); | 49 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
| 51 | return r; | 50 | return r; |
| 52 | } | 51 | } |
| 53 | mutex_lock(&rdev->ib_pool.mutex); | 52 | mutex_lock(&rdev->ib_pool.mutex); |
| 54 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 53 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
| 55 | if (i < RADEON_IB_POOL_SIZE) { | 54 | i &= (RADEON_IB_POOL_SIZE - 1); |
| 56 | set_bit(i, rdev->ib_pool.alloc_bm); | 55 | if (rdev->ib_pool.ibs[i].free) { |
| 57 | rdev->ib_pool.ibs[i].length_dw = 0; | 56 | nib = &rdev->ib_pool.ibs[i]; |
| 58 | *ib = &rdev->ib_pool.ibs[i]; | 57 | break; |
| 59 | mutex_unlock(&rdev->ib_pool.mutex); | 58 | } |
| 60 | goto out; | ||
| 61 | } | 59 | } |
| 62 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { | 60 | if (nib == NULL) { |
| 63 | /* we go do nothings here */ | 61 | /* This should never happen, it means we allocated all |
| 62 | * IB and haven't scheduled one yet, return EBUSY to | ||
| 63 | * userspace hoping that on ioctl recall we get better | ||
| 64 | * luck | ||
| 65 | */ | ||
| 66 | dev_err(rdev->dev, "no free indirect buffer !\n"); | ||
| 64 | mutex_unlock(&rdev->ib_pool.mutex); | 67 | mutex_unlock(&rdev->ib_pool.mutex); |
| 65 | DRM_ERROR("all IB allocated none scheduled.\n"); | 68 | radeon_fence_unref(&fence); |
| 66 | r = -EINVAL; | 69 | return -EBUSY; |
| 67 | goto out; | ||
| 68 | } | 70 | } |
| 69 | /* get the first ib on the scheduled list */ | 71 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
| 70 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, | 72 | nib->free = false; |
| 71 | struct radeon_ib, list); | 73 | if (nib->fence) { |
| 72 | if (nib->fence == NULL) { | ||
| 73 | /* we go do nothings here */ | ||
| 74 | mutex_unlock(&rdev->ib_pool.mutex); | 74 | mutex_unlock(&rdev->ib_pool.mutex); |
| 75 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); | 75 | r = radeon_fence_wait(nib->fence, false); |
| 76 | r = -EINVAL; | 76 | if (r) { |
| 77 | goto out; | 77 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", |
| 78 | } | 78 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); |
| 79 | mutex_unlock(&rdev->ib_pool.mutex); | 79 | mutex_lock(&rdev->ib_pool.mutex); |
| 80 | 80 | nib->free = true; | |
| 81 | r = radeon_fence_wait(nib->fence, false); | 81 | mutex_unlock(&rdev->ib_pool.mutex); |
| 82 | if (r) { | 82 | radeon_fence_unref(&fence); |
| 83 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, | 83 | return r; |
| 84 | (unsigned long)nib->gpu_addr, nib->length_dw); | 84 | } |
| 85 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); | 85 | mutex_lock(&rdev->ib_pool.mutex); |
| 86 | goto out; | ||
| 87 | } | 86 | } |
| 88 | radeon_fence_unref(&nib->fence); | 87 | radeon_fence_unref(&nib->fence); |
| 89 | 88 | nib->fence = fence; | |
| 90 | nib->length_dw = 0; | 89 | nib->length_dw = 0; |
| 91 | |||
| 92 | /* scheduled list is accessed here */ | ||
| 93 | mutex_lock(&rdev->ib_pool.mutex); | ||
| 94 | list_del(&nib->list); | ||
| 95 | INIT_LIST_HEAD(&nib->list); | ||
| 96 | mutex_unlock(&rdev->ib_pool.mutex); | 90 | mutex_unlock(&rdev->ib_pool.mutex); |
| 97 | |||
| 98 | *ib = nib; | 91 | *ib = nib; |
| 99 | out: | 92 | return 0; |
| 100 | if (r) { | ||
| 101 | radeon_fence_unref(&fence); | ||
| 102 | } else { | ||
| 103 | (*ib)->fence = fence; | ||
| 104 | } | ||
| 105 | return r; | ||
| 106 | } | 93 | } |
| 107 | 94 | ||
| 108 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 95 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
| @@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
| 114 | return; | 101 | return; |
| 115 | } | 102 | } |
| 116 | mutex_lock(&rdev->ib_pool.mutex); | 103 | mutex_lock(&rdev->ib_pool.mutex); |
| 117 | if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { | 104 | tmp->free = true; |
| 118 | /* IB is scheduled & not signaled don't do anythings */ | ||
| 119 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 120 | return; | ||
| 121 | } | ||
| 122 | list_del(&tmp->list); | ||
| 123 | INIT_LIST_HEAD(&tmp->list); | ||
| 124 | if (tmp->fence) | ||
| 125 | radeon_fence_unref(&tmp->fence); | ||
| 126 | |||
| 127 | tmp->length_dw = 0; | ||
| 128 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); | ||
| 129 | mutex_unlock(&rdev->ib_pool.mutex); | 105 | mutex_unlock(&rdev->ib_pool.mutex); |
| 130 | } | 106 | } |
| 131 | 107 | ||
| @@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 135 | 111 | ||
| 136 | if (!ib->length_dw || !rdev->cp.ready) { | 112 | if (!ib->length_dw || !rdev->cp.ready) { |
| 137 | /* TODO: Nothings in the ib we should report. */ | 113 | /* TODO: Nothings in the ib we should report. */ |
| 138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 114 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
| 139 | return -EINVAL; | 115 | return -EINVAL; |
| 140 | } | 116 | } |
| 141 | 117 | ||
| @@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 148 | radeon_ring_ib_execute(rdev, ib); | 124 | radeon_ring_ib_execute(rdev, ib); |
| 149 | radeon_fence_emit(rdev, ib->fence); | 125 | radeon_fence_emit(rdev, ib->fence); |
| 150 | mutex_lock(&rdev->ib_pool.mutex); | 126 | mutex_lock(&rdev->ib_pool.mutex); |
| 151 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); | 127 | /* once scheduled IB is considered free and protected by the fence */ |
| 128 | ib->free = true; | ||
| 152 | mutex_unlock(&rdev->ib_pool.mutex); | 129 | mutex_unlock(&rdev->ib_pool.mutex); |
| 153 | radeon_ring_unlock_commit(rdev); | 130 | radeon_ring_unlock_commit(rdev); |
| 154 | return 0; | 131 | return 0; |
| @@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 164 | if (rdev->ib_pool.robj) | 141 | if (rdev->ib_pool.robj) |
| 165 | return 0; | 142 | return 0; |
| 166 | /* Allocate 1M object buffer */ | 143 | /* Allocate 1M object buffer */ |
| 167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | ||
| 168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 144 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
| 169 | true, RADEON_GEM_DOMAIN_GTT, | 145 | true, RADEON_GEM_DOMAIN_GTT, |
| 170 | &rdev->ib_pool.robj); | 146 | &rdev->ib_pool.robj); |
| @@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 195 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | 171 | rdev->ib_pool.ibs[i].ptr = ptr + offset; |
| 196 | rdev->ib_pool.ibs[i].idx = i; | 172 | rdev->ib_pool.ibs[i].idx = i; |
| 197 | rdev->ib_pool.ibs[i].length_dw = 0; | 173 | rdev->ib_pool.ibs[i].length_dw = 0; |
| 198 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); | 174 | rdev->ib_pool.ibs[i].free = true; |
| 199 | } | 175 | } |
| 200 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 176 | rdev->ib_pool.head_id = 0; |
| 201 | rdev->ib_pool.ready = true; | 177 | rdev->ib_pool.ready = true; |
| 202 | DRM_INFO("radeon: ib pool ready.\n"); | 178 | DRM_INFO("radeon: ib pool ready.\n"); |
| 203 | if (radeon_debugfs_ib_init(rdev)) { | 179 | if (radeon_debugfs_ib_init(rdev)) { |
| @@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
| 214 | return; | 190 | return; |
| 215 | } | 191 | } |
| 216 | mutex_lock(&rdev->ib_pool.mutex); | 192 | mutex_lock(&rdev->ib_pool.mutex); |
| 217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | ||
| 218 | if (rdev->ib_pool.robj) { | 193 | if (rdev->ib_pool.robj) { |
| 219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 194 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
| 220 | if (likely(r == 0)) { | 195 | if (likely(r == 0)) { |
| @@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
| 363 | if (ib == NULL) { | 338 | if (ib == NULL) { |
| 364 | return 0; | 339 | return 0; |
| 365 | } | 340 | } |
| 366 | seq_printf(m, "IB %04lu\n", ib->idx); | 341 | seq_printf(m, "IB %04u\n", ib->idx); |
| 367 | seq_printf(m, "IB fence %p\n", ib->fence); | 342 | seq_printf(m, "IB fence %p\n", ib->fence); |
| 368 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | 343 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
| 369 | for (i = 0; i < ib->length_dw; i++) { | 344 | for (i = 0; i < ib->length_dw; i++) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa64..0c9c0811f42d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 348 | */ | 348 | */ |
| 349 | 349 | ||
| 350 | DRM_INFO("It appears like vesafb is loaded. " | 350 | DRM_INFO("It appears like vesafb is loaded. " |
| 351 | "Ignore above error if any. Entering stealth mode.\n"); | 351 | "Ignore above error if any.\n"); |
| 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
| 353 | if (unlikely(ret != 0)) { | 353 | if (unlikely(ret != 0)) { |
| 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
| 355 | goto out_no_device; | 355 | goto out_no_device; |
| 356 | } | 356 | } |
| 357 | vmw_kms_init(dev_priv); | ||
| 358 | vmw_overlay_init(dev_priv); | ||
| 359 | } else { | ||
| 360 | ret = vmw_request_device(dev_priv); | ||
| 361 | if (unlikely(ret != 0)) | ||
| 362 | goto out_no_device; | ||
| 363 | vmw_kms_init(dev_priv); | ||
| 364 | vmw_overlay_init(dev_priv); | ||
| 365 | vmw_fb_init(dev_priv); | ||
| 366 | } | 357 | } |
| 358 | ret = vmw_request_device(dev_priv); | ||
| 359 | if (unlikely(ret != 0)) | ||
| 360 | goto out_no_device; | ||
| 361 | vmw_kms_init(dev_priv); | ||
| 362 | vmw_overlay_init(dev_priv); | ||
| 363 | vmw_fb_init(dev_priv); | ||
| 367 | 364 | ||
| 368 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
| 369 | register_pm_notifier(&dev_priv->pm_nb); | 366 | register_pm_notifier(&dev_priv->pm_nb); |
| @@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 406 | 403 | ||
| 407 | unregister_pm_notifier(&dev_priv->pm_nb); | 404 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 408 | 405 | ||
| 409 | if (!dev_priv->stealth) { | 406 | vmw_fb_close(dev_priv); |
| 410 | vmw_fb_close(dev_priv); | 407 | vmw_kms_close(dev_priv); |
| 411 | vmw_kms_close(dev_priv); | 408 | vmw_overlay_close(dev_priv); |
| 412 | vmw_overlay_close(dev_priv); | 409 | vmw_release_device(dev_priv); |
| 413 | vmw_release_device(dev_priv); | 410 | if (dev_priv->stealth) |
| 414 | pci_release_regions(dev->pdev); | ||
| 415 | } else { | ||
| 416 | vmw_kms_close(dev_priv); | ||
| 417 | vmw_overlay_close(dev_priv); | ||
| 418 | pci_release_region(dev->pdev, 2); | 411 | pci_release_region(dev->pdev, 2); |
| 419 | } | 412 | else |
| 413 | pci_release_regions(dev->pdev); | ||
| 414 | |||
| 420 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 421 | drm_irq_uninstall(dev_priv->dev); | 416 | drm_irq_uninstall(dev_priv->dev); |
| 422 | if (dev->devname == vmw_devname) | 417 | if (dev->devname == vmw_devname) |
| @@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 585 | int ret = 0; | 580 | int ret = 0; |
| 586 | 581 | ||
| 587 | DRM_INFO("Master set.\n"); | 582 | DRM_INFO("Master set.\n"); |
| 588 | if (dev_priv->stealth) { | ||
| 589 | ret = vmw_request_device(dev_priv); | ||
| 590 | if (unlikely(ret != 0)) | ||
| 591 | return ret; | ||
| 592 | } | ||
| 593 | 583 | ||
| 594 | if (active) { | 584 | if (active) { |
| 595 | BUG_ON(active != &dev_priv->fbdev_master); | 585 | BUG_ON(active != &dev_priv->fbdev_master); |
| @@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 649 | 639 | ||
| 650 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 651 | 641 | ||
| 652 | if (dev_priv->stealth) { | ||
| 653 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 654 | if (unlikely(ret != 0)) | ||
| 655 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
| 656 | vmw_release_device(dev_priv); | ||
| 657 | } | ||
| 658 | dev_priv->active_master = &dev_priv->fbdev_master; | 642 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 659 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 660 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
| 661 | 645 | ||
| 662 | if (!dev_priv->stealth) | 646 | vmw_fb_on(dev_priv); |
| 663 | vmw_fb_on(dev_priv); | ||
| 664 | } | 647 | } |
| 665 | 648 | ||
| 666 | 649 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8b..a93367041cdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; |
| 560 | #endif | 560 | #endif |
| 561 | 561 | ||
| 562 | info->aperture_base = vmw_priv->vram_start; | ||
| 563 | info->aperture_size = vmw_priv->vram_size; | ||
| 564 | |||
| 562 | /* | 565 | /* |
| 563 | * Dirty & Deferred IO | 566 | * Dirty & Deferred IO |
| 564 | */ | 567 | */ |
