diff options
Diffstat (limited to 'drivers/gpu/drm')
40 files changed, 451 insertions, 252 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 785127cb281b..1368826ef284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -9,7 +9,6 @@ menuconfig DRM | |||
9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU | 9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU |
10 | select I2C | 10 | select I2C |
11 | select I2C_ALGOBIT | 11 | select I2C_ALGOBIT |
12 | select SLOW_WORK | ||
13 | help | 12 | help |
14 | Kernel-level support for the Direct Rendering Infrastructure (DRI) | 13 | Kernel-level support for the Direct Rendering Infrastructure (DRI) |
15 | introduced in XFree86 4.0. If you say Y here, you need to select | 14 | introduced in XFree86 4.0. If you say Y here, you need to select |
@@ -96,6 +95,7 @@ config DRM_I915 | |||
96 | select FB_CFB_IMAGEBLIT | 95 | select FB_CFB_IMAGEBLIT |
97 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
98 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
98 | select BACKLIGHT_LCD_SUPPORT if ACPI | ||
99 | select BACKLIGHT_CLASS_DEVICE if ACPI | 99 | select BACKLIGHT_CLASS_DEVICE if ACPI |
100 | select VIDEO_OUTPUT_CONTROL if ACPI | 100 | select VIDEO_OUTPUT_CONTROL if ACPI |
101 | select INPUT if ACPI | 101 | select INPUT if ACPI |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9a2e2a14b3bb..405c63b9d539 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2118,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, | |||
2118 | property->num_values = num_values; | 2118 | property->num_values = num_values; |
2119 | INIT_LIST_HEAD(&property->enum_blob_list); | 2119 | INIT_LIST_HEAD(&property->enum_blob_list); |
2120 | 2120 | ||
2121 | if (name) | 2121 | if (name) { |
2122 | strncpy(property->name, name, DRM_PROP_NAME_LEN); | 2122 | strncpy(property->name, name, DRM_PROP_NAME_LEN); |
2123 | property->name[DRM_PROP_NAME_LEN-1] = '\0'; | ||
2124 | } | ||
2123 | 2125 | ||
2124 | list_add_tail(&property->head, &dev->mode_config.property_list); | 2126 | list_add_tail(&property->head, &dev->mode_config.property_list); |
2125 | return property; | 2127 | return property; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 2957636161e8..3969f7553fe7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -484,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
484 | struct drm_connector *save_connectors, *connector; | 484 | struct drm_connector *save_connectors, *connector; |
485 | int count = 0, ro, fail = 0; | 485 | int count = 0, ro, fail = 0; |
486 | struct drm_crtc_helper_funcs *crtc_funcs; | 486 | struct drm_crtc_helper_funcs *crtc_funcs; |
487 | struct drm_mode_set save_set; | ||
487 | int ret = 0; | 488 | int ret = 0; |
488 | int i; | 489 | int i; |
489 | 490 | ||
@@ -556,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
556 | save_connectors[count++] = *connector; | 557 | save_connectors[count++] = *connector; |
557 | } | 558 | } |
558 | 559 | ||
560 | save_set.crtc = set->crtc; | ||
561 | save_set.mode = &set->crtc->mode; | ||
562 | save_set.x = set->crtc->x; | ||
563 | save_set.y = set->crtc->y; | ||
564 | save_set.fb = set->crtc->fb; | ||
565 | |||
559 | /* We should be able to check here if the fb has the same properties | 566 | /* We should be able to check here if the fb has the same properties |
560 | * and then just flip_or_move it */ | 567 | * and then just flip_or_move it */ |
561 | if (set->crtc->fb != set->fb) { | 568 | if (set->crtc->fb != set->fb) { |
@@ -721,6 +728,12 @@ fail: | |||
721 | *connector = save_connectors[count++]; | 728 | *connector = save_connectors[count++]; |
722 | } | 729 | } |
723 | 730 | ||
731 | /* Try to restore the config */ | ||
732 | if (mode_changed && | ||
733 | !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, | ||
734 | save_set.y, save_set.fb)) | ||
735 | DRM_ERROR("failed to restore config after modeset failure\n"); | ||
736 | |||
724 | kfree(save_connectors); | 737 | kfree(save_connectors); |
725 | kfree(save_encoders); | 738 | kfree(save_encoders); |
726 | kfree(save_crtcs); | 739 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index d067c12ba940..1c7a1c0d3edd 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
118 | tmp->minor = minor; | 118 | tmp->minor = minor; |
119 | tmp->dent = ent; | 119 | tmp->dent = ent; |
120 | tmp->info_ent = &files[i]; | 120 | tmp->info_ent = &files[i]; |
121 | list_add(&(tmp->list), &(minor->debugfs_nodes.list)); | 121 | |
122 | mutex_lock(&minor->debugfs_lock); | ||
123 | list_add(&tmp->list, &minor->debugfs_list); | ||
124 | mutex_unlock(&minor->debugfs_lock); | ||
122 | } | 125 | } |
123 | return 0; | 126 | return 0; |
124 | 127 | ||
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, | |||
146 | char name[64]; | 149 | char name[64]; |
147 | int ret; | 150 | int ret; |
148 | 151 | ||
149 | INIT_LIST_HEAD(&minor->debugfs_nodes.list); | 152 | INIT_LIST_HEAD(&minor->debugfs_list); |
153 | mutex_init(&minor->debugfs_lock); | ||
150 | sprintf(name, "%d", minor_id); | 154 | sprintf(name, "%d", minor_id); |
151 | minor->debugfs_root = debugfs_create_dir(name, root); | 155 | minor->debugfs_root = debugfs_create_dir(name, root); |
152 | if (!minor->debugfs_root) { | 156 | if (!minor->debugfs_root) { |
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, | |||
192 | struct drm_info_node *tmp; | 196 | struct drm_info_node *tmp; |
193 | int i; | 197 | int i; |
194 | 198 | ||
199 | mutex_lock(&minor->debugfs_lock); | ||
195 | for (i = 0; i < count; i++) { | 200 | for (i = 0; i < count; i++) { |
196 | list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { | 201 | list_for_each_safe(pos, q, &minor->debugfs_list) { |
197 | tmp = list_entry(pos, struct drm_info_node, list); | 202 | tmp = list_entry(pos, struct drm_info_node, list); |
198 | if (tmp->info_ent == &files[i]) { | 203 | if (tmp->info_ent == &files[i]) { |
199 | debugfs_remove(tmp->dent); | 204 | debugfs_remove(tmp->dent); |
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, | |||
202 | } | 207 | } |
203 | } | 208 | } |
204 | } | 209 | } |
210 | mutex_unlock(&minor->debugfs_lock); | ||
205 | return 0; | 211 | return 0; |
206 | } | 212 | } |
207 | EXPORT_SYMBOL(drm_debugfs_remove_files); | 213 | EXPORT_SYMBOL(drm_debugfs_remove_files); |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index fc81af9dbf42..40c187c60f44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
125 | DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 125 | DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
126 | DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 126 | DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
127 | 127 | ||
128 | DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), | 128 | DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), |
129 | 129 | ||
130 | DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), | 130 | DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), |
131 | 131 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index cb3794a00f98..44a5d0ad8b7c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
110 | /* Prevent vblank irq processing while disabling vblank irqs, | 110 | /* Prevent vblank irq processing while disabling vblank irqs, |
111 | * so no updates of timestamps or count can happen after we've | 111 | * so no updates of timestamps or count can happen after we've |
112 | * disabled. Needed to prevent races in case of delayed irq's. | 112 | * disabled. Needed to prevent races in case of delayed irq's. |
113 | * Disable preemption, so vblank_time_lock is held as short as | ||
114 | * possible, even under a kernel with PREEMPT_RT patches. | ||
115 | */ | 113 | */ |
116 | preempt_disable(); | ||
117 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); | 114 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); |
118 | 115 | ||
119 | dev->driver->disable_vblank(dev, crtc); | 116 | dev->driver->disable_vblank(dev, crtc); |
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
164 | clear_vblank_timestamps(dev, crtc); | 161 | clear_vblank_timestamps(dev, crtc); |
165 | 162 | ||
166 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 163 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
167 | preempt_enable(); | ||
168 | } | 164 | } |
169 | 165 | ||
170 | static void vblank_disable_fn(unsigned long arg) | 166 | static void vblank_disable_fn(unsigned long arg) |
@@ -407,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
407 | /* | 403 | /* |
408 | * Wake up any waiters so they don't hang. | 404 | * Wake up any waiters so they don't hang. |
409 | */ | 405 | */ |
410 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 406 | if (dev->num_crtcs) { |
411 | for (i = 0; i < dev->num_crtcs; i++) { | 407 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
412 | DRM_WAKEUP(&dev->vbl_queue[i]); | 408 | for (i = 0; i < dev->num_crtcs; i++) { |
413 | dev->vblank_enabled[i] = 0; | 409 | DRM_WAKEUP(&dev->vbl_queue[i]); |
414 | dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); | 410 | dev->vblank_enabled[i] = 0; |
411 | dev->last_vblank[i] = | ||
412 | dev->driver->get_vblank_counter(dev, i); | ||
413 | } | ||
414 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
415 | } | 415 | } |
416 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
417 | 416 | ||
418 | if (!irq_enabled) | 417 | if (!irq_enabled) |
419 | return -EINVAL; | 418 | return -EINVAL; |
@@ -886,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
886 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 885 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
887 | /* Going from 0->1 means we have to enable interrupts again */ | 886 | /* Going from 0->1 means we have to enable interrupts again */ |
888 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { | 887 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { |
889 | /* Disable preemption while holding vblank_time_lock. Do | ||
890 | * it explicitely to guard against PREEMPT_RT kernel. | ||
891 | */ | ||
892 | preempt_disable(); | ||
893 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); | 888 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); |
894 | if (!dev->vblank_enabled[crtc]) { | 889 | if (!dev->vblank_enabled[crtc]) { |
895 | /* Enable vblank irqs under vblank_time_lock protection. | 890 | /* Enable vblank irqs under vblank_time_lock protection. |
@@ -909,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
909 | } | 904 | } |
910 | } | 905 | } |
911 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); | 906 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); |
912 | preempt_enable(); | ||
913 | } else { | 907 | } else { |
914 | if (!dev->vblank_enabled[crtc]) { | 908 | if (!dev->vblank_enabled[crtc]) { |
915 | atomic_dec(&dev->vblank_refcount[crtc]); | 909 | atomic_dec(&dev->vblank_refcount[crtc]); |
@@ -1125,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
1125 | trace_drm_vblank_event_delivered(current->pid, pipe, | 1119 | trace_drm_vblank_event_delivered(current->pid, pipe, |
1126 | vblwait->request.sequence); | 1120 | vblwait->request.sequence); |
1127 | } else { | 1121 | } else { |
1122 | /* drm_handle_vblank_events will call drm_vblank_put */ | ||
1128 | list_add_tail(&e->base.link, &dev->vblank_event_list); | 1123 | list_add_tail(&e->base.link, &dev->vblank_event_list); |
1129 | vblwait->reply.sequence = vblwait->request.sequence; | 1124 | vblwait->reply.sequence = vblwait->request.sequence; |
1130 | } | 1125 | } |
@@ -1205,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1205 | goto done; | 1200 | goto done; |
1206 | } | 1201 | } |
1207 | 1202 | ||
1208 | if (flags & _DRM_VBLANK_EVENT) | 1203 | if (flags & _DRM_VBLANK_EVENT) { |
1204 | /* must hold on to the vblank ref until the event fires | ||
1205 | * drm_vblank_put will be called asynchronously | ||
1206 | */ | ||
1209 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); | 1207 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); |
1208 | } | ||
1210 | 1209 | ||
1211 | if ((flags & _DRM_VBLANK_NEXTONMISS) && | 1210 | if ((flags & _DRM_VBLANK_NEXTONMISS) && |
1212 | (seq - vblwait->request.sequence) <= (1<<23)) { | 1211 | (seq - vblwait->request.sequence) <= (1<<23)) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d14b44e13f51..4f40f1ce1d8e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1506,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor, | |||
1506 | node->minor = minor; | 1506 | node->minor = minor; |
1507 | node->dent = ent; | 1507 | node->dent = ent; |
1508 | node->info_ent = (void *) key; | 1508 | node->info_ent = (void *) key; |
1509 | list_add(&node->list, &minor->debugfs_nodes.list); | 1509 | |
1510 | mutex_lock(&minor->debugfs_lock); | ||
1511 | list_add(&node->list, &minor->debugfs_list); | ||
1512 | mutex_unlock(&minor->debugfs_lock); | ||
1510 | 1513 | ||
1511 | return 0; | 1514 | return 0; |
1512 | } | 1515 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cc531bb59c26..e9c2cfe45daa 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = { | |||
789 | }; | 789 | }; |
790 | 790 | ||
791 | static struct drm_driver driver = { | 791 | static struct drm_driver driver = { |
792 | /* don't use mtrr's here, the Xserver or user space app should | 792 | /* Don't use MTRRs here; the Xserver or userspace app should |
793 | * deal with them for intel hardware. | 793 | * deal with them for Intel hardware. |
794 | */ | 794 | */ |
795 | .driver_features = | 795 | .driver_features = |
796 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ | 796 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6651c36b6e8a..d18b07adcffa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1396 | 1396 | ||
1397 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | 1397 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1398 | ret = -E2BIG; | 1398 | ret = -E2BIG; |
1399 | goto unlock; | 1399 | goto out; |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | if (obj->madv != I915_MADV_WILLNEED) { | 1402 | if (obj->madv != I915_MADV_WILLNEED) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 032a82098136..5fc201b49d30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -640,10 +640,9 @@ static int | |||
640 | nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) | 640 | nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) |
641 | { | 641 | { |
642 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 642 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
643 | uint32_t reg0 = nv_rd32(dev, reg + 0); | ||
644 | uint32_t reg1 = nv_rd32(dev, reg + 4); | ||
645 | struct nouveau_pll_vals pll; | 643 | struct nouveau_pll_vals pll; |
646 | struct pll_lims pll_limits; | 644 | struct pll_lims pll_limits; |
645 | u32 ctrl, mask, coef; | ||
647 | int ret; | 646 | int ret; |
648 | 647 | ||
649 | ret = get_pll_limits(dev, reg, &pll_limits); | 648 | ret = get_pll_limits(dev, reg, &pll_limits); |
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) | |||
654 | if (!clk) | 653 | if (!clk) |
655 | return -ERANGE; | 654 | return -ERANGE; |
656 | 655 | ||
657 | reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); | 656 | coef = pll.N1 << 8 | pll.M1; |
658 | reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; | 657 | ctrl = pll.log2P << 16; |
659 | 658 | mask = 0x00070000; | |
660 | if (dev_priv->vbios.execute) { | 659 | if (reg == 0x004008) { |
661 | still_alive(); | 660 | mask |= 0x01f80000; |
662 | nv_wr32(dev, reg + 4, reg1); | 661 | ctrl |= (pll_limits.log2p_bias << 19); |
663 | nv_wr32(dev, reg + 0, reg0); | 662 | ctrl |= (pll.log2P << 22); |
664 | } | 663 | } |
665 | 664 | ||
665 | if (!dev_priv->vbios.execute) | ||
666 | return 0; | ||
667 | |||
668 | nv_mask(dev, reg + 0, mask, ctrl); | ||
669 | nv_wr32(dev, reg + 4, coef); | ||
666 | return 0; | 670 | return 0; |
667 | } | 671 | } |
668 | 672 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7226f419e178..7cc37e690860 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |||
148 | 148 | ||
149 | if (dev_priv->card_type == NV_10 && | 149 | if (dev_priv->card_type == NV_10 && |
150 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && | 150 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
151 | nvbo->bo.mem.num_pages < vram_pages / 2) { | 151 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
152 | /* | 152 | /* |
153 | * Make sure that the color and depth buffers are handled | 153 | * Make sure that the color and depth buffers are handled |
154 | * by independent memory controller units. Up to a 9x | 154 | * by independent memory controller units. Up to a 9x |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a319d5646ea9..bb6ec9ef8676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
159 | INIT_LIST_HEAD(&chan->nvsw.flip); | 159 | INIT_LIST_HEAD(&chan->nvsw.flip); |
160 | INIT_LIST_HEAD(&chan->fence.pending); | 160 | INIT_LIST_HEAD(&chan->fence.pending); |
161 | spin_lock_init(&chan->fence.lock); | ||
161 | 162 | ||
162 | /* setup channel's memory and vm */ | 163 | /* setup channel's memory and vm */ |
163 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | 164 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index e0d275e1c96c..cea6696b1906 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
710 | case OUTPUT_DP: | 710 | case OUTPUT_DP: |
711 | max_clock = nv_encoder->dp.link_nr; | 711 | max_clock = nv_encoder->dp.link_nr; |
712 | max_clock *= nv_encoder->dp.link_bw; | 712 | max_clock *= nv_encoder->dp.link_bw; |
713 | clock = clock * nouveau_connector_bpp(connector) / 8; | 713 | clock = clock * nouveau_connector_bpp(connector) / 10; |
714 | break; | 714 | break; |
715 | default: | 715 | default: |
716 | BUG_ON(1); | 716 | BUG_ON(1); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 14a8627efe4d..3a4cc32b9e44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
487 | { | 487 | { |
488 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 488 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
489 | struct nouveau_fbdev *nfbdev; | 489 | struct nouveau_fbdev *nfbdev; |
490 | int preferred_bpp; | ||
490 | int ret; | 491 | int ret; |
491 | 492 | ||
492 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); | 493 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); |
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
505 | } | 506 | } |
506 | 507 | ||
507 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); | 508 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); |
508 | drm_fb_helper_initial_config(&nfbdev->helper, 32); | 509 | |
510 | if (dev_priv->vram_size <= 32 * 1024 * 1024) | ||
511 | preferred_bpp = 8; | ||
512 | else if (dev_priv->vram_size <= 64 * 1024 * 1024) | ||
513 | preferred_bpp = 16; | ||
514 | else | ||
515 | preferred_bpp = 32; | ||
516 | |||
517 | drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); | ||
509 | return 0; | 518 | return 0; |
510 | } | 519 | } |
511 | 520 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 81116cfea275..2f6daae68b9d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
539 | return ret; | 539 | return ret; |
540 | } | 540 | } |
541 | 541 | ||
542 | INIT_LIST_HEAD(&chan->fence.pending); | ||
543 | spin_lock_init(&chan->fence.lock); | ||
544 | atomic_set(&chan->fence.last_sequence_irq, 0); | 542 | atomic_set(&chan->fence.last_sequence_irq, 0); |
545 | return 0; | 543 | return 0; |
546 | } | 544 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index c6143df48b9f..d39b2202b197 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, | |||
333 | 333 | ||
334 | NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); | 334 | NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); |
335 | 335 | ||
336 | for (i = 0; info[i].addr; i++) { | 336 | for (i = 0; i2c && info[i].addr; i++) { |
337 | if (nouveau_probe_i2c_addr(i2c, info[i].addr) && | 337 | if (nouveau_probe_i2c_addr(i2c, info[i].addr) && |
338 | (!match || match(i2c, &info[i]))) { | 338 | (!match || match(i2c, &info[i]))) { |
339 | NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); | 339 | NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 9f178aa94162..33d03fbf00df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
239 | if(version == 0x15) { | 239 | if(version == 0x15) { |
240 | memtimings->timing = | 240 | memtimings->timing = |
241 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); | 241 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); |
242 | if(!memtimings) { | 242 | if (!memtimings->timing) { |
243 | NV_WARN(dev,"Could not allocate memtiming table\n"); | 243 | NV_WARN(dev,"Could not allocate memtiming table\n"); |
244 | return; | 244 | return; |
245 | } | 245 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 82478e0998e5..d8831ab42bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev) | |||
579 | if (ret) | 579 | if (ret) |
580 | goto out_display_early; | 580 | goto out_display_early; |
581 | 581 | ||
582 | /* workaround an odd issue on nvc1 by disabling the device's | ||
583 | * nosnoop capability. hopefully won't cause issues until a | ||
584 | * better fix is found - assuming there is one... | ||
585 | */ | ||
586 | if (dev_priv->chipset == 0xc1) { | ||
587 | nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); | ||
588 | } | ||
589 | |||
582 | nouveau_pm_init(dev); | 590 | nouveau_pm_init(dev); |
583 | 591 | ||
584 | ret = engine->vram.init(dev); | 592 | ret = engine->vram.init(dev); |
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
1102 | dev_priv->noaccel = !!nouveau_noaccel; | 1110 | dev_priv->noaccel = !!nouveau_noaccel; |
1103 | if (nouveau_noaccel == -1) { | 1111 | if (nouveau_noaccel == -1) { |
1104 | switch (dev_priv->chipset) { | 1112 | switch (dev_priv->chipset) { |
1105 | case 0xc1: /* known broken */ | 1113 | #if 0 |
1106 | case 0xc8: /* never tested */ | 1114 | case 0xXX: /* known broken */ |
1107 | NV_INFO(dev, "acceleration disabled by default, pass " | 1115 | NV_INFO(dev, "acceleration disabled by default, pass " |
1108 | "noaccel=0 to force enable\n"); | 1116 | "noaccel=0 to force enable\n"); |
1109 | dev_priv->noaccel = true; | 1117 | dev_priv->noaccel = true; |
1110 | break; | 1118 | break; |
1119 | #endif | ||
1111 | default: | 1120 | default: |
1112 | dev_priv->noaccel = false; | 1121 | dev_priv->noaccel = false; |
1113 | break; | 1122 | break; |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index bbc0b9c7e1f7..e676b0d53478 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg) | |||
57 | int P = (ctrl & 0x00070000) >> 16; | 57 | int P = (ctrl & 0x00070000) >> 16; |
58 | u32 ref = 27000, clk = 0; | 58 | u32 ref = 27000, clk = 0; |
59 | 59 | ||
60 | if (ctrl & 0x80000000) | 60 | if ((ctrl & 0x80000000) && M1) { |
61 | clk = ref * N1 / M1; | 61 | clk = ref * N1 / M1; |
62 | 62 | if ((ctrl & 0x40000100) == 0x40000000) { | |
63 | if (!(ctrl & 0x00000100)) { | 63 | if (M2) |
64 | if (ctrl & 0x40000000) | 64 | clk = clk * N2 / M2; |
65 | clk = clk * N2 / M2; | 65 | else |
66 | clk = 0; | ||
67 | } | ||
66 | } | 68 | } |
67 | 69 | ||
68 | return clk >> P; | 70 | return clk >> P; |
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
177 | } | 179 | } |
178 | 180 | ||
179 | /* memory clock */ | 181 | /* memory clock */ |
182 | if (!perflvl->memory) { | ||
183 | info->mpll_ctrl = 0x00000000; | ||
184 | goto out; | ||
185 | } | ||
186 | |||
180 | ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, | 187 | ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, |
181 | &N1, &M1, &N2, &M2, &log2P); | 188 | &N1, &M1, &N2, &M2, &log2P); |
182 | if (ret < 0) | 189 | if (ret < 0) |
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) | |||
264 | mdelay(5); | 271 | mdelay(5); |
265 | nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); | 272 | nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); |
266 | 273 | ||
274 | if (!info->mpll_ctrl) | ||
275 | goto resume; | ||
276 | |||
267 | /* wait for vblank start on active crtcs, disable memory access */ | 277 | /* wait for vblank start on active crtcs, disable memory access */ |
268 | for (i = 0; i < 2; i++) { | 278 | for (i = 0; i < 2; i++) { |
269 | if (!(crtc_mask & (1 << i))) | 279 | if (!(crtc_mask & (1 << i))) |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8c979b31ff61..ac601f7c4e1a 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine) | |||
131 | NV_DEBUG(dev, "\n"); | 131 | NV_DEBUG(dev, "\n"); |
132 | 132 | ||
133 | /* master reset */ | 133 | /* master reset */ |
134 | nv_mask(dev, 0x000200, 0x00200100, 0x00000000); | 134 | nv_mask(dev, 0x000200, 0x00201000, 0x00000000); |
135 | nv_mask(dev, 0x000200, 0x00200100, 0x00200100); | 135 | nv_mask(dev, 0x000200, 0x00201000, 0x00201000); |
136 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ | 136 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ |
137 | 137 | ||
138 | /* reset/enable traps and interrupts */ | 138 | /* reset/enable traps and interrupts */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d05c2c3b2444..4b46d6968566 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
601 | gr_def(ctx, offset + 0x1c, 0x00880000); | 601 | gr_def(ctx, offset + 0x1c, 0x00880000); |
602 | break; | 602 | break; |
603 | case 0x86: | 603 | case 0x86: |
604 | gr_def(ctx, offset + 0x1c, 0x008c0000); | 604 | gr_def(ctx, offset + 0x1c, 0x018c0000); |
605 | break; | 605 | break; |
606 | case 0x92: | 606 | case 0x92: |
607 | case 0x96: | 607 | case 0x96: |
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 9da23838e63e..2e45e57fd869 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev) | |||
160 | colbits = (r4 & 0x0000f000) >> 12; | 160 | colbits = (r4 & 0x0000f000) >> 12; |
161 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | 161 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; |
162 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | 162 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; |
163 | banks = ((r4 & 0x01000000) ? 8 : 4); | 163 | banks = 1 << (((r4 & 0x03000000) >> 24) + 2); |
164 | 164 | ||
165 | rowsize = parts * banks * (1 << colbits) * 8; | 165 | rowsize = parts * banks * (1 << colbits) * 8; |
166 | predicted = rowsize << rowbitsa; | 166 | predicted = rowsize << rowbitsa; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index bbdbc51830c8..a74e501afd25 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
157 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); | 157 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); |
158 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | 158 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; |
159 | struct drm_device *dev = chan->dev; | 159 | struct drm_device *dev = chan->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
160 | int i = 0, gpc, tp, ret; | 161 | int i = 0, gpc, tp, ret; |
161 | u32 magic; | ||
162 | 162 | ||
163 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, | 163 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, |
164 | &grch->unk408004); | 164 | &grch->unk408004); |
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
207 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); | 207 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); |
208 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | 208 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); |
209 | 209 | ||
210 | magic = 0x02180000; | 210 | if (dev_priv->chipset != 0xc1) { |
211 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | 211 | u32 magic = 0x02180000; |
212 | nv_wo32(grch->mmio, i++ * 4, magic); | 212 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); |
213 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 213 | nv_wo32(grch->mmio, i++ * 4, magic); |
214 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { | 214 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
215 | u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); | 215 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { |
216 | nv_wo32(grch->mmio, i++ * 4, reg); | 216 | u32 reg = TP_UNIT(gpc, tp, 0x520); |
217 | nv_wo32(grch->mmio, i++ * 4, magic); | 217 | nv_wo32(grch->mmio, i++ * 4, reg); |
218 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
219 | magic += 0x0324; | ||
220 | } | ||
221 | } | ||
222 | } else { | ||
223 | u32 magic = 0x02180000; | ||
224 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | ||
225 | nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218); | ||
226 | nv_wo32(grch->mmio, i++ * 4, 0x004064c4); | ||
227 | nv_wo32(grch->mmio, i++ * 4, 0x0086ffff); | ||
228 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
229 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { | ||
230 | u32 reg = TP_UNIT(gpc, tp, 0x520); | ||
231 | nv_wo32(grch->mmio, i++ * 4, reg); | ||
232 | nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic); | ||
233 | magic += 0x0324; | ||
234 | } | ||
235 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { | ||
236 | u32 reg = TP_UNIT(gpc, tp, 0x544); | ||
237 | nv_wo32(grch->mmio, i++ * 4, reg); | ||
238 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
239 | magic += 0x0324; | ||
240 | } | ||
218 | } | 241 | } |
219 | } | 242 | } |
220 | 243 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index dd0e6a736b3b..96b0b93d94ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1812 | /* calculate first set of magics */ | 1812 | /* calculate first set of magics */ |
1813 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1813 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1814 | 1814 | ||
1815 | gpc = -1; | ||
1815 | for (tp = 0; tp < priv->tp_total; tp++) { | 1816 | for (tp = 0; tp < priv->tp_total; tp++) { |
1816 | do { | 1817 | do { |
1817 | gpc = (gpc + 1) % priv->gpc_nr; | 1818 | gpc = (gpc + 1) % priv->gpc_nr; |
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1861 | 1862 | ||
1862 | if (1) { | 1863 | if (1) { |
1863 | u32 tp_mask = 0, tp_set = 0; | 1864 | u32 tp_mask = 0, tp_set = 0; |
1864 | u8 tpnr[GPC_MAX]; | 1865 | u8 tpnr[GPC_MAX], a, b; |
1865 | 1866 | ||
1866 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1867 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1867 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) | 1868 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) |
1868 | tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); | 1869 | tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); |
1869 | 1870 | ||
1870 | gpc = -1; | 1871 | for (i = 0, gpc = -1, b = -1; i < 32; i++) { |
1871 | for (i = 0, gpc = -1; i < 32; i++) { | 1872 | a = (i * (priv->tp_total - 1)) / 32; |
1872 | int ltp = i * (priv->tp_total - 1) / 32; | 1873 | if (a != b) { |
1873 | 1874 | b = a; | |
1874 | do { | 1875 | do { |
1875 | gpc = (gpc + 1) % priv->gpc_nr; | 1876 | gpc = (gpc + 1) % priv->gpc_nr; |
1876 | } while (!tpnr[gpc]); | 1877 | } while (!tpnr[gpc]); |
1877 | tp = priv->tp_nr[gpc] - tpnr[gpc]--; | 1878 | tp = priv->tp_nr[gpc] - tpnr[gpc]--; |
1878 | 1879 | ||
1879 | tp_set |= 1 << ((gpc * 8) + tp); | 1880 | tp_set |= 1 << ((gpc * 8) + tp); |
1881 | } | ||
1880 | 1882 | ||
1881 | do { | 1883 | nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); |
1882 | nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); | 1884 | nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask); |
1883 | tp_set ^= tp_mask; | ||
1884 | nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); | ||
1885 | tp_set ^= tp_mask; | ||
1886 | } while (ltp == (++i * (priv->tp_total - 1) / 32)); | ||
1887 | i--; | ||
1888 | } | 1885 | } |
1889 | } | 1886 | } |
1890 | 1887 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index edbfe9360ae2..ce984d573a51 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -43,7 +43,7 @@ static const u8 types[256] = { | |||
43 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 43 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
44 | 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, | 44 | 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, |
45 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, | 45 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, |
46 | 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, | 46 | 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, |
47 | 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, | 47 | 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, |
48 | 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, | 48 | 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, |
49 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 | 49 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 |
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev) | |||
110 | u32 bsize = nv_rd32(dev, 0x10f20c); | 110 | u32 bsize = nv_rd32(dev, 0x10f20c); |
111 | u32 offset, length; | 111 | u32 offset, length; |
112 | bool uniform = true; | 112 | bool uniform = true; |
113 | int ret, i; | 113 | int ret, part; |
114 | 114 | ||
115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); | 115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); |
116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); | 116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); |
117 | 117 | ||
118 | /* read amount of vram attached to each memory controller */ | 118 | /* read amount of vram attached to each memory controller */ |
119 | for (i = 0; i < parts; i++) { | 119 | part = 0; |
120 | u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); | 120 | while (parts) { |
121 | u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000)); | ||
122 | if (psize == 0) | ||
123 | continue; | ||
124 | parts--; | ||
125 | |||
121 | if (psize != bsize) { | 126 | if (psize != bsize) { |
122 | if (psize < bsize) | 127 | if (psize < bsize) |
123 | bsize = psize; | 128 | bsize = psize; |
124 | uniform = false; | 129 | uniform = false; |
125 | } | 130 | } |
126 | 131 | ||
127 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); | 132 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); |
128 | |||
129 | dev_priv->vram_size += (u64)psize << 20; | 133 | dev_priv->vram_size += (u64)psize << 20; |
130 | } | 134 | } |
131 | 135 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 87921c88a95c..87631fede1f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1522,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1522 | struct drm_display_mode *mode, | 1522 | struct drm_display_mode *mode, |
1523 | struct drm_display_mode *adjusted_mode) | 1523 | struct drm_display_mode *adjusted_mode) |
1524 | { | 1524 | { |
1525 | struct drm_device *dev = crtc->dev; | ||
1526 | struct radeon_device *rdev = dev->dev_private; | ||
1527 | |||
1528 | /* adjust pm to upcoming mode change */ | ||
1529 | radeon_pm_compute_clocks(rdev); | ||
1530 | |||
1531 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 1525 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
1532 | return false; | 1526 | return false; |
1533 | return true; | 1527 | return true; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index a0de48542f71..6fb335a4fdda 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
283 | } | 283 | } |
284 | } | 284 | } |
285 | 285 | ||
286 | DRM_ERROR("aux i2c too many retries, giving up\n"); | 286 | DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); |
287 | return -EREMOTEIO; | 287 | return -EREMOTEIO; |
288 | } | 288 | } |
289 | 289 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e4c384b9511c..1d603a3335db 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev) | |||
157 | return actual_temp * 1000; | 157 | return actual_temp * 1000; |
158 | } | 158 | } |
159 | 159 | ||
160 | void sumo_pm_init_profile(struct radeon_device *rdev) | ||
161 | { | ||
162 | int idx; | ||
163 | |||
164 | /* default */ | ||
165 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
166 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
167 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
168 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
169 | |||
170 | /* low,mid sh/mh */ | ||
171 | if (rdev->flags & RADEON_IS_MOBILITY) | ||
172 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
173 | else | ||
174 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
175 | |||
176 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; | ||
177 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; | ||
178 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
179 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
180 | |||
181 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; | ||
182 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; | ||
183 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
184 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
185 | |||
186 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; | ||
187 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; | ||
188 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
189 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
190 | |||
191 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; | ||
192 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; | ||
193 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
194 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
195 | |||
196 | /* high sh/mh */ | ||
197 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
198 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; | ||
199 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; | ||
200 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
201 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = | ||
202 | rdev->pm.power_state[idx].num_clock_modes - 1; | ||
203 | |||
204 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; | ||
205 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; | ||
206 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
207 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = | ||
208 | rdev->pm.power_state[idx].num_clock_modes - 1; | ||
209 | } | ||
210 | |||
160 | void evergreen_pm_misc(struct radeon_device *rdev) | 211 | void evergreen_pm_misc(struct radeon_device *rdev) |
161 | { | 212 | { |
162 | int req_ps_idx = rdev->pm.requested_power_state_index; | 213 | int req_ps_idx = rdev->pm.requested_power_state_index; |
@@ -1219,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev) | |||
1219 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 1270 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
1220 | rdev->mc.vram_end >> 12); | 1271 | rdev->mc.vram_end >> 12); |
1221 | } | 1272 | } |
1222 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 1273 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); |
1223 | if (rdev->flags & RADEON_IS_IGP) { | 1274 | if (rdev->flags & RADEON_IS_IGP) { |
1224 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; | 1275 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; |
1225 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; | 1276 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 19afc43ad173..9cdda0b3b081 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |||
288 | pcie_lanes); | 288 | pcie_lanes); |
289 | } | 289 | } |
290 | 290 | ||
291 | static int r600_pm_get_type_index(struct radeon_device *rdev, | ||
292 | enum radeon_pm_state_type ps_type, | ||
293 | int instance) | ||
294 | { | ||
295 | int i; | ||
296 | int found_instance = -1; | ||
297 | |||
298 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
299 | if (rdev->pm.power_state[i].type == ps_type) { | ||
300 | found_instance++; | ||
301 | if (found_instance == instance) | ||
302 | return i; | ||
303 | } | ||
304 | } | ||
305 | /* return default if no match */ | ||
306 | return rdev->pm.default_power_state_index; | ||
307 | } | ||
308 | |||
309 | void rs780_pm_init_profile(struct radeon_device *rdev) | 291 | void rs780_pm_init_profile(struct radeon_device *rdev) |
310 | { | 292 | { |
311 | if (rdev->pm.num_power_states == 2) { | 293 | if (rdev->pm.num_power_states == 2) { |
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
421 | 403 | ||
422 | void r600_pm_init_profile(struct radeon_device *rdev) | 404 | void r600_pm_init_profile(struct radeon_device *rdev) |
423 | { | 405 | { |
406 | int idx; | ||
407 | |||
424 | if (rdev->family == CHIP_R600) { | 408 | if (rdev->family == CHIP_R600) { |
425 | /* XXX */ | 409 | /* XXX */ |
426 | /* default */ | 410 | /* default */ |
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
502 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | 486 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
503 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | 487 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; |
504 | /* low sh */ | 488 | /* low sh */ |
505 | if (rdev->flags & RADEON_IS_MOBILITY) { | 489 | if (rdev->flags & RADEON_IS_MOBILITY) |
506 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | 490 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); |
507 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 491 | else |
508 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | 492 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
509 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 493 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; |
510 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 494 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; |
511 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 495 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
512 | } else { | 496 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
513 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | ||
514 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
515 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | ||
516 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
517 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
518 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
519 | } | ||
520 | /* mid sh */ | 497 | /* mid sh */ |
521 | if (rdev->flags & RADEON_IS_MOBILITY) { | 498 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; |
522 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | 499 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; |
523 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 500 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
524 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | 501 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; |
525 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
526 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
527 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
528 | } else { | ||
529 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | ||
530 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
531 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | ||
532 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
533 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
534 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
535 | } | ||
536 | /* high sh */ | 502 | /* high sh */ |
537 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = | 503 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
538 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | 504 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; |
539 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = | 505 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; |
540 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
541 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | 506 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
542 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | 507 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; |
543 | /* low mh */ | 508 | /* low mh */ |
544 | if (rdev->flags & RADEON_IS_MOBILITY) { | 509 | if (rdev->flags & RADEON_IS_MOBILITY) |
545 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | 510 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); |
546 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 511 | else |
547 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | 512 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
548 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 513 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; |
549 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 514 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; |
550 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 515 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
551 | } else { | 516 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
552 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | ||
553 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
554 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | ||
555 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
556 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
557 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
558 | } | ||
559 | /* mid mh */ | 517 | /* mid mh */ |
560 | if (rdev->flags & RADEON_IS_MOBILITY) { | 518 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; |
561 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | 519 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; |
562 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 520 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
563 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | 521 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; |
564 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
565 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
566 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
567 | } else { | ||
568 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | ||
569 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
570 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | ||
571 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
572 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
573 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
574 | } | ||
575 | /* high mh */ | 522 | /* high mh */ |
576 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = | 523 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
577 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | 524 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; |
578 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = | 525 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; |
579 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
580 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | 526 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
581 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | 527 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; |
582 | } | 528 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b316b301152f..fc5a1d642cb5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -784,8 +784,7 @@ struct radeon_pm_clock_info { | |||
784 | 784 | ||
785 | struct radeon_power_state { | 785 | struct radeon_power_state { |
786 | enum radeon_pm_state_type type; | 786 | enum radeon_pm_state_type type; |
787 | /* XXX: use a define for num clock modes */ | 787 | struct radeon_pm_clock_info *clock_info; |
788 | struct radeon_pm_clock_info clock_info[8]; | ||
789 | /* number of valid clock modes in this power state */ | 788 | /* number of valid clock modes in this power state */ |
790 | int num_clock_modes; | 789 | int num_clock_modes; |
791 | struct radeon_pm_clock_info *default_clock_mode; | 790 | struct radeon_pm_clock_info *default_clock_mode; |
@@ -855,6 +854,9 @@ struct radeon_pm { | |||
855 | struct device *int_hwmon_dev; | 854 | struct device *int_hwmon_dev; |
856 | }; | 855 | }; |
857 | 856 | ||
857 | int radeon_pm_get_type_index(struct radeon_device *rdev, | ||
858 | enum radeon_pm_state_type ps_type, | ||
859 | int instance); | ||
858 | 860 | ||
859 | /* | 861 | /* |
860 | * Benchmarking | 862 | * Benchmarking |
@@ -1142,6 +1144,48 @@ struct r600_vram_scratch { | |||
1142 | u64 gpu_addr; | 1144 | u64 gpu_addr; |
1143 | }; | 1145 | }; |
1144 | 1146 | ||
1147 | |||
1148 | /* | ||
1149 | * Mutex which allows recursive locking from the same process. | ||
1150 | */ | ||
1151 | struct radeon_mutex { | ||
1152 | struct mutex mutex; | ||
1153 | struct task_struct *owner; | ||
1154 | int level; | ||
1155 | }; | ||
1156 | |||
1157 | static inline void radeon_mutex_init(struct radeon_mutex *mutex) | ||
1158 | { | ||
1159 | mutex_init(&mutex->mutex); | ||
1160 | mutex->owner = NULL; | ||
1161 | mutex->level = 0; | ||
1162 | } | ||
1163 | |||
1164 | static inline void radeon_mutex_lock(struct radeon_mutex *mutex) | ||
1165 | { | ||
1166 | if (mutex_trylock(&mutex->mutex)) { | ||
1167 | /* The mutex was unlocked before, so it's ours now */ | ||
1168 | mutex->owner = current; | ||
1169 | } else if (mutex->owner != current) { | ||
1170 | /* Another process locked the mutex, take it */ | ||
1171 | mutex_lock(&mutex->mutex); | ||
1172 | mutex->owner = current; | ||
1173 | } | ||
1174 | /* Otherwise the mutex was already locked by this process */ | ||
1175 | |||
1176 | mutex->level++; | ||
1177 | } | ||
1178 | |||
1179 | static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) | ||
1180 | { | ||
1181 | if (--mutex->level > 0) | ||
1182 | return; | ||
1183 | |||
1184 | mutex->owner = NULL; | ||
1185 | mutex_unlock(&mutex->mutex); | ||
1186 | } | ||
1187 | |||
1188 | |||
1145 | /* | 1189 | /* |
1146 | * Core structure, functions and helpers. | 1190 | * Core structure, functions and helpers. |
1147 | */ | 1191 | */ |
@@ -1197,7 +1241,7 @@ struct radeon_device { | |||
1197 | struct radeon_gem gem; | 1241 | struct radeon_gem gem; |
1198 | struct radeon_pm pm; | 1242 | struct radeon_pm pm; |
1199 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; | 1243 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; |
1200 | struct mutex cs_mutex; | 1244 | struct radeon_mutex cs_mutex; |
1201 | struct radeon_wb wb; | 1245 | struct radeon_wb wb; |
1202 | struct radeon_dummy_page dummy_page; | 1246 | struct radeon_dummy_page dummy_page; |
1203 | bool gpu_lockup; | 1247 | bool gpu_lockup; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e2944566ffea..a2e1eae114ef 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = { | |||
834 | .pm_misc = &evergreen_pm_misc, | 834 | .pm_misc = &evergreen_pm_misc, |
835 | .pm_prepare = &evergreen_pm_prepare, | 835 | .pm_prepare = &evergreen_pm_prepare, |
836 | .pm_finish = &evergreen_pm_finish, | 836 | .pm_finish = &evergreen_pm_finish, |
837 | .pm_init_profile = &rs780_pm_init_profile, | 837 | .pm_init_profile = &sumo_pm_init_profile, |
838 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 838 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
839 | .pre_page_flip = &evergreen_pre_page_flip, | 839 | .pre_page_flip = &evergreen_pre_page_flip, |
840 | .page_flip = &evergreen_page_flip, | 840 | .page_flip = &evergreen_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 85f14f0337e4..59914842a729 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); | |||
413 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 413 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
414 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 414 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
415 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 415 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
416 | extern void sumo_pm_init_profile(struct radeon_device *rdev); | ||
416 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | 417 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); |
417 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 418 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
418 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | 419 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08d0b94332e6..fecd705a1a5f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -85,6 +85,18 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd | |||
85 | for (i = 0; i < num_indices; i++) { | 85 | for (i = 0; i < num_indices; i++) { |
86 | gpio = &i2c_info->asGPIO_Info[i]; | 86 | gpio = &i2c_info->asGPIO_Info[i]; |
87 | 87 | ||
88 | /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ | ||
89 | if ((rdev->family == CHIP_R420) || | ||
90 | (rdev->family == CHIP_R423) || | ||
91 | (rdev->family == CHIP_RV410)) { | ||
92 | if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || | ||
93 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || | ||
94 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { | ||
95 | gpio->ucClkMaskShift = 0x19; | ||
96 | gpio->ucDataMaskShift = 0x18; | ||
97 | } | ||
98 | } | ||
99 | |||
88 | /* some evergreen boards have bad data for this entry */ | 100 | /* some evergreen boards have bad data for this entry */ |
89 | if (ASIC_IS_DCE4(rdev)) { | 101 | if (ASIC_IS_DCE4(rdev)) { |
90 | if ((i == 7) && | 102 | if ((i == 7) && |
@@ -1996,10 +2008,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
1996 | return state_index; | 2008 | return state_index; |
1997 | /* last mode is usually default, array is low to high */ | 2009 | /* last mode is usually default, array is low to high */ |
1998 | for (i = 0; i < num_modes; i++) { | 2010 | for (i = 0; i < num_modes; i++) { |
2011 | rdev->pm.power_state[state_index].clock_info = | ||
2012 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | ||
2013 | if (!rdev->pm.power_state[state_index].clock_info) | ||
2014 | return state_index; | ||
2015 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1999 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2016 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2000 | switch (frev) { | 2017 | switch (frev) { |
2001 | case 1: | 2018 | case 1: |
2002 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
2003 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2019 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2004 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | 2020 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); |
2005 | rdev->pm.power_state[state_index].clock_info[0].sclk = | 2021 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
@@ -2035,7 +2051,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
2035 | state_index++; | 2051 | state_index++; |
2036 | break; | 2052 | break; |
2037 | case 2: | 2053 | case 2: |
2038 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
2039 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2054 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2040 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); | 2055 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); |
2041 | rdev->pm.power_state[state_index].clock_info[0].sclk = | 2056 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
@@ -2072,7 +2087,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
2072 | state_index++; | 2087 | state_index++; |
2073 | break; | 2088 | break; |
2074 | case 3: | 2089 | case 3: |
2075 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
2076 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2090 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2077 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); | 2091 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); |
2078 | rdev->pm.power_state[state_index].clock_info[0].sclk = | 2092 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
@@ -2257,7 +2271,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
2257 | rdev->pm.default_power_state_index = state_index; | 2271 | rdev->pm.default_power_state_index = state_index; |
2258 | rdev->pm.power_state[state_index].default_clock_mode = | 2272 | rdev->pm.power_state[state_index].default_clock_mode = |
2259 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 2273 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
2260 | if (ASIC_IS_DCE5(rdev)) { | 2274 | if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
2261 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ | 2275 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ |
2262 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; | 2276 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
2263 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; | 2277 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
@@ -2377,17 +2391,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) | |||
2377 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + | 2391 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + |
2378 | (power_state->v1.ucNonClockStateIndex * | 2392 | (power_state->v1.ucNonClockStateIndex * |
2379 | power_info->pplib.ucNonClockSize)); | 2393 | power_info->pplib.ucNonClockSize)); |
2380 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { | 2394 | rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * |
2381 | clock_info = (union pplib_clock_info *) | 2395 | ((power_info->pplib.ucStateEntrySize - 1) ? |
2382 | (mode_info->atom_context->bios + data_offset + | 2396 | (power_info->pplib.ucStateEntrySize - 1) : 1), |
2383 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + | 2397 | GFP_KERNEL); |
2384 | (power_state->v1.ucClockStateIndices[j] * | 2398 | if (!rdev->pm.power_state[i].clock_info) |
2385 | power_info->pplib.ucClockInfoSize)); | 2399 | return state_index; |
2386 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | 2400 | if (power_info->pplib.ucStateEntrySize - 1) { |
2387 | state_index, mode_index, | 2401 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { |
2388 | clock_info); | 2402 | clock_info = (union pplib_clock_info *) |
2389 | if (valid) | 2403 | (mode_info->atom_context->bios + data_offset + |
2390 | mode_index++; | 2404 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + |
2405 | (power_state->v1.ucClockStateIndices[j] * | ||
2406 | power_info->pplib.ucClockInfoSize)); | ||
2407 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2408 | state_index, mode_index, | ||
2409 | clock_info); | ||
2410 | if (valid) | ||
2411 | mode_index++; | ||
2412 | } | ||
2413 | } else { | ||
2414 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
2415 | rdev->clock.default_mclk; | ||
2416 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
2417 | rdev->clock.default_sclk; | ||
2418 | mode_index++; | ||
2391 | } | 2419 | } |
2392 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | 2420 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; |
2393 | if (mode_index) { | 2421 | if (mode_index) { |
@@ -2456,18 +2484,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | |||
2456 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ | 2484 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ |
2457 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | 2485 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
2458 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | 2486 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; |
2459 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | 2487 | rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * |
2460 | clock_array_index = power_state->v2.clockInfoIndex[j]; | 2488 | (power_state->v2.ucNumDPMLevels ? |
2461 | /* XXX this might be an inagua bug... */ | 2489 | power_state->v2.ucNumDPMLevels : 1), |
2462 | if (clock_array_index >= clock_info_array->ucNumEntries) | 2490 | GFP_KERNEL); |
2463 | continue; | 2491 | if (!rdev->pm.power_state[i].clock_info) |
2464 | clock_info = (union pplib_clock_info *) | 2492 | return state_index; |
2465 | &clock_info_array->clockInfo[clock_array_index]; | 2493 | if (power_state->v2.ucNumDPMLevels) { |
2466 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | 2494 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { |
2467 | state_index, mode_index, | 2495 | clock_array_index = power_state->v2.clockInfoIndex[j]; |
2468 | clock_info); | 2496 | /* XXX this might be an inagua bug... */ |
2469 | if (valid) | 2497 | if (clock_array_index >= clock_info_array->ucNumEntries) |
2470 | mode_index++; | 2498 | continue; |
2499 | clock_info = (union pplib_clock_info *) | ||
2500 | &clock_info_array->clockInfo[clock_array_index]; | ||
2501 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2502 | state_index, mode_index, | ||
2503 | clock_info); | ||
2504 | if (valid) | ||
2505 | mode_index++; | ||
2506 | } | ||
2507 | } else { | ||
2508 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
2509 | rdev->clock.default_mclk; | ||
2510 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
2511 | rdev->clock.default_sclk; | ||
2512 | mode_index++; | ||
2471 | } | 2513 | } |
2472 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | 2514 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; |
2473 | if (mode_index) { | 2515 | if (mode_index) { |
@@ -2524,19 +2566,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
2524 | } else { | 2566 | } else { |
2525 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); | 2567 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); |
2526 | if (rdev->pm.power_state) { | 2568 | if (rdev->pm.power_state) { |
2527 | /* add the default mode */ | 2569 | rdev->pm.power_state[0].clock_info = |
2528 | rdev->pm.power_state[state_index].type = | 2570 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); |
2529 | POWER_STATE_TYPE_DEFAULT; | 2571 | if (rdev->pm.power_state[0].clock_info) { |
2530 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2572 | /* add the default mode */ |
2531 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2573 | rdev->pm.power_state[state_index].type = |
2532 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2574 | POWER_STATE_TYPE_DEFAULT; |
2533 | rdev->pm.power_state[state_index].default_clock_mode = | 2575 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2534 | &rdev->pm.power_state[state_index].clock_info[0]; | 2576 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
2535 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2577 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
2536 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2578 | rdev->pm.power_state[state_index].default_clock_mode = |
2537 | rdev->pm.default_power_state_index = state_index; | 2579 | &rdev->pm.power_state[state_index].clock_info[0]; |
2538 | rdev->pm.power_state[state_index].flags = 0; | 2580 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2539 | state_index++; | 2581 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
2582 | rdev->pm.default_power_state_index = state_index; | ||
2583 | rdev->pm.power_state[state_index].flags = 0; | ||
2584 | state_index++; | ||
2585 | } | ||
2540 | } | 2586 | } |
2541 | } | 2587 | } |
2542 | 2588 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 5cafc90de7f8..17e1a9b2d8fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
98 | struct radeon_bo *sobj = NULL; | 98 | struct radeon_bo *sobj = NULL; |
99 | uint64_t saddr, daddr; | 99 | uint64_t saddr, daddr; |
100 | int r, n; | 100 | int r, n; |
101 | unsigned int time; | 101 | int time; |
102 | 102 | ||
103 | n = RADEON_BENCHMARK_ITERATIONS; | 103 | n = RADEON_BENCHMARK_ITERATIONS; |
104 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); | 104 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 8bf83c4b4147..81fc100be7e1 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2563 | 2563 | ||
2564 | /* allocate 2 power states */ | 2564 | /* allocate 2 power states */ |
2565 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); | 2565 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); |
2566 | if (!rdev->pm.power_state) { | 2566 | if (rdev->pm.power_state) { |
2567 | rdev->pm.default_power_state_index = state_index; | 2567 | /* allocate 1 clock mode per state */ |
2568 | rdev->pm.num_power_states = 0; | 2568 | rdev->pm.power_state[0].clock_info = |
2569 | 2569 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | |
2570 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 2570 | rdev->pm.power_state[1].clock_info = |
2571 | rdev->pm.current_clock_mode_index = 0; | 2571 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); |
2572 | return; | 2572 | if (!rdev->pm.power_state[0].clock_info || |
2573 | } | 2573 | !rdev->pm.power_state[1].clock_info) |
2574 | goto pm_failed; | ||
2575 | } else | ||
2576 | goto pm_failed; | ||
2574 | 2577 | ||
2575 | /* check for a thermal chip */ | 2578 | /* check for a thermal chip */ |
2576 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); | 2579 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); |
@@ -2735,6 +2738,14 @@ default_mode: | |||
2735 | 2738 | ||
2736 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 2739 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
2737 | rdev->pm.current_clock_mode_index = 0; | 2740 | rdev->pm.current_clock_mode_index = 0; |
2741 | return; | ||
2742 | |||
2743 | pm_failed: | ||
2744 | rdev->pm.default_power_state_index = state_index; | ||
2745 | rdev->pm.num_power_states = 0; | ||
2746 | |||
2747 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
2748 | rdev->pm.current_clock_mode_index = 0; | ||
2738 | } | 2749 | } |
2739 | 2750 | ||
2740 | void radeon_external_tmds_setup(struct drm_encoder *encoder) | 2751 | void radeon_external_tmds_setup(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index fae00c0d75aa..ccaa243c1442 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
222 | struct radeon_cs_chunk *ib_chunk; | 222 | struct radeon_cs_chunk *ib_chunk; |
223 | int r; | 223 | int r; |
224 | 224 | ||
225 | mutex_lock(&rdev->cs_mutex); | 225 | radeon_mutex_lock(&rdev->cs_mutex); |
226 | /* initialize parser */ | 226 | /* initialize parser */ |
227 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 227 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
228 | parser.filp = filp; | 228 | parser.filp = filp; |
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
233 | if (r) { | 233 | if (r) { |
234 | DRM_ERROR("Failed to initialize parser !\n"); | 234 | DRM_ERROR("Failed to initialize parser !\n"); |
235 | radeon_cs_parser_fini(&parser, r); | 235 | radeon_cs_parser_fini(&parser, r); |
236 | mutex_unlock(&rdev->cs_mutex); | 236 | radeon_mutex_unlock(&rdev->cs_mutex); |
237 | return r; | 237 | return r; |
238 | } | 238 | } |
239 | r = radeon_ib_get(rdev, &parser.ib); | 239 | r = radeon_ib_get(rdev, &parser.ib); |
240 | if (r) { | 240 | if (r) { |
241 | DRM_ERROR("Failed to get ib !\n"); | 241 | DRM_ERROR("Failed to get ib !\n"); |
242 | radeon_cs_parser_fini(&parser, r); | 242 | radeon_cs_parser_fini(&parser, r); |
243 | mutex_unlock(&rdev->cs_mutex); | 243 | radeon_mutex_unlock(&rdev->cs_mutex); |
244 | return r; | 244 | return r; |
245 | } | 245 | } |
246 | r = radeon_cs_parser_relocs(&parser); | 246 | r = radeon_cs_parser_relocs(&parser); |
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
248 | if (r != -ERESTARTSYS) | 248 | if (r != -ERESTARTSYS) |
249 | DRM_ERROR("Failed to parse relocation %d!\n", r); | 249 | DRM_ERROR("Failed to parse relocation %d!\n", r); |
250 | radeon_cs_parser_fini(&parser, r); | 250 | radeon_cs_parser_fini(&parser, r); |
251 | mutex_unlock(&rdev->cs_mutex); | 251 | radeon_mutex_unlock(&rdev->cs_mutex); |
252 | return r; | 252 | return r; |
253 | } | 253 | } |
254 | /* Copy the packet into the IB, the parser will read from the | 254 | /* Copy the packet into the IB, the parser will read from the |
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
260 | if (r || parser.parser_error) { | 260 | if (r || parser.parser_error) { |
261 | DRM_ERROR("Invalid command stream !\n"); | 261 | DRM_ERROR("Invalid command stream !\n"); |
262 | radeon_cs_parser_fini(&parser, r); | 262 | radeon_cs_parser_fini(&parser, r); |
263 | mutex_unlock(&rdev->cs_mutex); | 263 | radeon_mutex_unlock(&rdev->cs_mutex); |
264 | return r; | 264 | return r; |
265 | } | 265 | } |
266 | r = radeon_cs_finish_pages(&parser); | 266 | r = radeon_cs_finish_pages(&parser); |
267 | if (r) { | 267 | if (r) { |
268 | DRM_ERROR("Invalid command stream !\n"); | 268 | DRM_ERROR("Invalid command stream !\n"); |
269 | radeon_cs_parser_fini(&parser, r); | 269 | radeon_cs_parser_fini(&parser, r); |
270 | mutex_unlock(&rdev->cs_mutex); | 270 | radeon_mutex_unlock(&rdev->cs_mutex); |
271 | return r; | 271 | return r; |
272 | } | 272 | } |
273 | r = radeon_ib_schedule(rdev, parser.ib); | 273 | r = radeon_ib_schedule(rdev, parser.ib); |
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
275 | DRM_ERROR("Failed to schedule IB !\n"); | 275 | DRM_ERROR("Failed to schedule IB !\n"); |
276 | } | 276 | } |
277 | radeon_cs_parser_fini(&parser, r); | 277 | radeon_cs_parser_fini(&parser, r); |
278 | mutex_unlock(&rdev->cs_mutex); | 278 | radeon_mutex_unlock(&rdev->cs_mutex); |
279 | return r; | 279 | return r; |
280 | } | 280 | } |
281 | 281 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c33bc914d93d..c4d00a171411 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
716 | 716 | ||
717 | /* mutex initialization are all done here so we | 717 | /* mutex initialization are all done here so we |
718 | * can recall function without having locking issues */ | 718 | * can recall function without having locking issues */ |
719 | mutex_init(&rdev->cs_mutex); | 719 | radeon_mutex_init(&rdev->cs_mutex); |
720 | mutex_init(&rdev->ib_pool.mutex); | 720 | mutex_init(&rdev->ib_pool.mutex); |
721 | mutex_init(&rdev->cp.mutex); | 721 | mutex_init(&rdev->cp.mutex); |
722 | mutex_init(&rdev->dc_hw_i2c_mutex); | 722 | mutex_init(&rdev->dc_hw_i2c_mutex); |
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
955 | int r; | 955 | int r; |
956 | int resched; | 956 | int resched; |
957 | 957 | ||
958 | /* Prevent CS ioctl from interfering */ | ||
959 | radeon_mutex_lock(&rdev->cs_mutex); | ||
960 | |||
958 | radeon_save_bios_scratch_regs(rdev); | 961 | radeon_save_bios_scratch_regs(rdev); |
959 | /* block TTM */ | 962 | /* block TTM */ |
960 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 963 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
967 | radeon_restore_bios_scratch_regs(rdev); | 970 | radeon_restore_bios_scratch_regs(rdev); |
968 | drm_helper_resume_force_mode(rdev->ddev); | 971 | drm_helper_resume_force_mode(rdev->ddev); |
969 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 972 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
970 | return 0; | ||
971 | } | 973 | } |
972 | /* bad news, how to tell it to userspace ? */ | 974 | |
973 | dev_info(rdev->dev, "GPU reset failed\n"); | 975 | radeon_mutex_unlock(&rdev->cs_mutex); |
976 | |||
977 | if (r) { | ||
978 | /* bad news, how to tell it to userspace ? */ | ||
979 | dev_info(rdev->dev, "GPU reset failed\n"); | ||
980 | } | ||
981 | |||
974 | return r; | 982 | return r; |
975 | } | 983 | } |
976 | 984 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 41a5d48e657b..daadf2111040 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
991 | struct drm_display_mode *mode, | 991 | struct drm_display_mode *mode, |
992 | struct drm_display_mode *adjusted_mode) | 992 | struct drm_display_mode *adjusted_mode) |
993 | { | 993 | { |
994 | struct drm_device *dev = crtc->dev; | ||
995 | struct radeon_device *rdev = dev->dev_private; | ||
996 | |||
997 | /* adjust pm to upcoming mode change */ | ||
998 | radeon_pm_compute_clocks(rdev); | ||
999 | |||
1000 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 994 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
1001 | return false; | 995 | return false; |
1002 | return true; | 996 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fabe89fa6a1..78a665bd9519 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev); | |||
53 | 53 | ||
54 | #define ACPI_AC_CLASS "ac_adapter" | 54 | #define ACPI_AC_CLASS "ac_adapter" |
55 | 55 | ||
56 | int radeon_pm_get_type_index(struct radeon_device *rdev, | ||
57 | enum radeon_pm_state_type ps_type, | ||
58 | int instance) | ||
59 | { | ||
60 | int i; | ||
61 | int found_instance = -1; | ||
62 | |||
63 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
64 | if (rdev->pm.power_state[i].type == ps_type) { | ||
65 | found_instance++; | ||
66 | if (found_instance == instance) | ||
67 | return i; | ||
68 | } | ||
69 | } | ||
70 | /* return default if no match */ | ||
71 | return rdev->pm.default_power_state_index; | ||
72 | } | ||
73 | |||
56 | #ifdef CONFIG_ACPI | 74 | #ifdef CONFIG_ACPI |
57 | static int radeon_acpi_event(struct notifier_block *nb, | 75 | static int radeon_acpi_event(struct notifier_block *nb, |
58 | unsigned long val, | 76 | unsigned long val, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 03daefa73397..880e285d7578 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
105 | struct vmw_dma_buffer *dmabuf = NULL; | 105 | struct vmw_dma_buffer *dmabuf = NULL; |
106 | int ret; | 106 | int ret; |
107 | 107 | ||
108 | /* A lot of the code assumes this */ | ||
109 | if (handle && (width != 64 || height != 64)) | ||
110 | return -EINVAL; | ||
111 | |||
108 | if (handle) { | 112 | if (handle) { |
109 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | 113 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
110 | handle, &surface); | 114 | handle, &surface); |
@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, | |||
410 | top = clips->y1; | 414 | top = clips->y1; |
411 | bottom = clips->y2; | 415 | bottom = clips->y2; |
412 | 416 | ||
413 | clips_ptr = clips; | 417 | /* skip the first clip rect */ |
414 | for (i = 1; i < num_clips; i++, clips_ptr += inc) { | 418 | for (i = 1, clips_ptr = clips + inc; |
419 | i < num_clips; i++, clips_ptr += inc) { | ||
415 | left = min_t(int, left, (int)clips_ptr->x1); | 420 | left = min_t(int, left, (int)clips_ptr->x1); |
416 | right = max_t(int, right, (int)clips_ptr->x2); | 421 | right = max_t(int, right, (int)clips_ptr->x2); |
417 | top = min_t(int, top, (int)clips_ptr->y1); | 422 | top = min_t(int, top, (int)clips_ptr->y1); |
@@ -1323,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv) | |||
1323 | * drm_encoder_cleanup which takes the lock we deadlock. | 1328 | * drm_encoder_cleanup which takes the lock we deadlock. |
1324 | */ | 1329 | */ |
1325 | drm_mode_config_cleanup(dev_priv->dev); | 1330 | drm_mode_config_cleanup(dev_priv->dev); |
1326 | vmw_kms_close_legacy_display_system(dev_priv); | 1331 | if (dev_priv->sou_priv) |
1332 | vmw_kms_close_screen_object_display(dev_priv); | ||
1333 | else | ||
1334 | vmw_kms_close_legacy_display_system(dev_priv); | ||
1327 | return 0; | 1335 | return 0; |
1328 | } | 1336 | } |
1329 | 1337 | ||