aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c47
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c146
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c92
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c276
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c1
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c37
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c98
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c165
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c52
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c62
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
92 files changed, 1525 insertions, 849 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e050..305c59003963 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
66 66
67 If M is selected, the module will be called radeon. 67 If M is selected, the module will be called radeon.
68 68
69source "drivers/gpu/drm/radeon/Kconfig"
70
69config DRM_I810 71config DRM_I810
70 tristate "Intel I810" 72 tristate "Intel I810"
71 depends on DRM && AGP && AGP_INTEL 73 depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
113 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask); 116 (unsigned long long)gart_info->table_mask);
117 ret = 1; 117 ret = 1;
118 goto done; 118 goto done;
119 } 119 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05592f3..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
680 724
681 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
682 726
683 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
684 mode->flags |= DRM_MODE_FLAG_INTERLACE;
685 728
686 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
687 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
158 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
159 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
160 obj->size = size; 147 obj->size = size;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dceb..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
120 120
121const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
124 .has_hotplug = 1, 124 .has_hotplug = 1,
125}; 125};
126 126
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = dev->dev_private;
180 180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
191 182
192 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 184 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 185 int error = i915_gem_idle(dev);
186 if (error) {
195 dev_err(&dev->pdev->dev, 187 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 188 "GEM idle failed, resume might fail\n");
189 return error;
190 }
197 drm_irq_uninstall(dev); 191 drm_irq_uninstall(dev);
198 } 192 }
199 193
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
201 195
202 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
203 197
198 /* Modeset on resume, not lid events */
199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
202}
203
204static int i915_suspend(struct drm_device *dev, pm_message_t state)
205{
206 int error;
207
208 if (!dev || !dev->dev_private) {
209 DRM_ERROR("dev: %p\n", dev);
210 DRM_ERROR("DRM not initialized, aborting suspend.\n");
211 return -ENODEV;
212 }
213
214 if (state.event == PM_EVENT_PRETHAW)
215 return 0;
216
217 error = i915_drm_freeze(dev);
218 if (error)
219 return error;
220
204 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 222 /* Shut down the device */
206 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 224 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 225 }
209 226
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 227 return 0;
214} 228}
215 229
216static int i915_resume(struct drm_device *dev) 230static int i915_drm_thaw(struct drm_device *dev)
217{ 231{
218 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 233 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224 234
225 i915_restore_state(dev); 235 i915_restore_state(dev);
226 236
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
231 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 242 dev_priv->mm.suspended = 0;
233 243
234 ret = i915_gem_init_ringbuffer(dev); 244 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 245 mutex_unlock(&dev->struct_mutex);
238 246
239 drm_irq_install(dev); 247 drm_irq_install(dev);
240 } 248
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 249 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 250 drm_helper_resume_force_mode(dev);
244 } 251 }
245 252
246 dev_priv->modeset_on_lid = 0; 253 dev_priv->modeset_on_lid = 0;
247 254
248 return ret; 255 return error;
256}
257
258static int i915_resume(struct drm_device *dev)
259{
260 if (pci_enable_device(dev->pdev))
261 return -EIO;
262
263 pci_set_master(dev->pdev);
264
265 return i915_drm_thaw(dev);
249} 266}
250 267
251/** 268/**
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 403 drm_put_dev(dev);
387} 404}
388 405
389static int 406static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 407{
392 struct drm_device *dev = pci_get_drvdata(pdev); 408 struct pci_dev *pdev = to_pci_dev(dev);
409 struct drm_device *drm_dev = pci_get_drvdata(pdev);
410 int error;
393 411
394 return i915_suspend(dev, state); 412 if (!drm_dev || !drm_dev->dev_private) {
395} 413 dev_err(dev, "DRM not initialized, aborting suspend.\n");
414 return -ENODEV;
415 }
396 416
397static int 417 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 418 if (error)
399{ 419 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 420
402 return i915_resume(dev); 421 pci_disable_device(pdev);
403} 422 pci_set_power_state(pdev, PCI_D3hot);
404 423
405static int 424 return 0;
406i915_pm_suspend(struct device *dev)
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409} 425}
410 426
411static int 427static int i915_pm_resume(struct device *dev)
412i915_pm_resume(struct device *dev)
413{ 428{
414 return i915_pci_resume(to_pci_dev(dev)); 429 struct pci_dev *pdev = to_pci_dev(dev);
415} 430 struct drm_device *drm_dev = pci_get_drvdata(pdev);
416 431
417static int 432 return i915_resume(drm_dev);
418i915_pm_freeze(struct device *dev)
419{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
421} 433}
422 434
423static int 435static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 436{
426 /* thaw during hibernate, do nothing! */ 437 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 438 struct drm_device *drm_dev = pci_get_drvdata(pdev);
439
440 if (!drm_dev || !drm_dev->dev_private) {
441 dev_err(dev, "DRM not initialized, aborting suspend.\n");
442 return -ENODEV;
443 }
444
445 return i915_drm_freeze(drm_dev);
428} 446}
429 447
430static int 448static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 449{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 450 struct pci_dev *pdev = to_pci_dev(dev);
451 struct drm_device *drm_dev = pci_get_drvdata(pdev);
452
453 return i915_drm_thaw(drm_dev);
434} 454}
435 455
436static int 456static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 457{
439 return i915_pci_resume(to_pci_dev(dev)); 458 struct pci_dev *pdev = to_pci_dev(dev);
459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
460
461 return i915_drm_freeze(drm_dev);
440} 462}
441 463
442const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 467 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 468 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 469 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 470 .restore = i915_pm_resume,
449}; 471};
450 472
451static struct vm_operations_struct i915_gem_vm_ops = { 473static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c1669488b5a..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
@@ -872,7 +883,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
872void i915_gem_detach_phys_object(struct drm_device *dev, 883void i915_gem_detach_phys_object(struct drm_device *dev,
873 struct drm_gem_object *obj); 884 struct drm_gem_object *obj);
874void i915_gem_free_all_phys_object(struct drm_device *dev); 885void i915_gem_free_all_phys_object(struct drm_device *dev);
875int i915_gem_object_get_pages(struct drm_gem_object *obj); 886int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
876void i915_gem_object_put_pages(struct drm_gem_object *obj); 887void i915_gem_object_put_pages(struct drm_gem_object *obj);
877void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 888void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
878void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 889void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c67924ca80c..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2100,8 +2088,8 @@ static int
2100i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2101{ 2089{
2102 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2103 uint32_t seqno;
2104 int ret; 2091 int ret;
2092 uint32_t seqno;
2105 bool lists_empty; 2093 bool lists_empty;
2106 2094
2107 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2123 if (ret) 2111 if (ret)
2124 return ret; 2112 return ret;
2125 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2126 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2127 if (ret) 2117 if (ret)
2128 return ret; 2118 return ret;
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2230} 2220}
2231 2221
2232int 2222int
2233i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2234{ 2225{
2235 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2236 int page_count, i; 2227 int page_count, i;
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2257 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2258 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2259 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2260 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2261 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2262 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2581 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2582 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2583 int ret; 2577 int ret;
2584 2578
2585 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2624 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2625#endif 2619#endif
2626 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2627 i915_gem_object_set_page_gfp_mask (obj,
2628 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2629 }
2630 ret = i915_gem_object_get_pages(obj);
2631 if (retry_alloc) {
2632 i915_gem_object_set_page_gfp_mask (obj,
2633 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2634 }
2635 if (ret) { 2621 if (ret) {
2636 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2637 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2641 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2642 if (ret) { 2628 if (ret) {
2643 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2644 if (! retry_alloc) { 2630 if (gfpmask) {
2645 retry_alloc = true; 2631 gfpmask = 0;
2646 goto search_free; 2632 goto search_free;
2647 } 2633 }
2648 2634
2649 return ret; 2635 return ret;
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2721 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2722 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2723 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2724 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2725 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2726 2712
2727 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3584,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3584 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3585 int ret = 0; 3571 int ret = 0;
3586 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3587 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3588 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3589 int unwritten; 3578 int unwritten;
@@ -3673,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3673 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3674 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3675 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3676 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3677 int ret = 0, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3678 uint64_t exec_offset; 3667 uint64_t exec_offset;
3679 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
@@ -3699,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3699 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3700 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3701 GFP_KERNEL); 3690 GFP_KERNEL);
3702 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3703 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3704 3695
3705 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3706 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3742,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3742 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3743 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3744 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3745 ret = -EBADF; 3738 ret = -EBADF;
3746 goto err; 3739 goto err;
3747 } 3740 }
@@ -3750,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3750 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3751 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3752 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3753 ret = -EBADF; 3748 ret = -EBADF;
3754 goto err; 3749 goto err;
3755 } 3750 }
@@ -3863,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3863 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3864 dev->invalidate_domains, 3859 dev->invalidate_domains,
3865 dev->flush_domains); 3860 dev->flush_domains);
3866 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3867 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3868 dev->flush_domains); 3863 dev->flush_domains);
3869 } 3864 }
3870 3865
3871 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3872 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3873 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3874 3870
3875 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3876 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3877 obj->read_domains, 3879 obj->read_domains,
3878 old_write_domain); 3880 old_write_domain);
@@ -3946,6 +3948,7 @@ err:
3946 3948
3947 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3948 3950
3951pre_mutex_err:
3949 /* Copy the updated relocations out regardless of current error 3952 /* Copy the updated relocations out regardless of current error
3950 * state. Failure to update the relocs would mean that the next 3953 * state. Failure to update the relocs would mean that the next
3951 * time userland calls execbuf, it would do so with presumed offset 3954 * time userland calls execbuf, it would do so with presumed offset
@@ -3960,7 +3963,6 @@ err:
3960 ret = ret2; 3963 ret = ret2;
3961 } 3964 }
3962 3965
3963pre_mutex_err:
3964 drm_free_large(object_list); 3966 drm_free_large(object_list);
3965 kfree(cliprects); 3967 kfree(cliprects);
3966 3968
@@ -4383,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4383 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4384 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4385 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4386 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4387 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4388 4391
@@ -4834,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4834 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4835 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4836 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4837 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4838 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4839 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4946,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4946 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4947 return; 4951 return;
4948 4952
4949 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4950 if (ret) 4954 if (ret)
4951 goto out; 4955 goto out;
4952 4956
@@ -5004,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
5004 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
5005 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
5006 5010
5007 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
5008 if (ret) { 5012 if (ret) {
5009 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
5010 goto out; 5014 goto out;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
316
317 if (de_iir & DE_PLANEB_FLIP_DONE) {
318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
321
322 if (de_iir & DE_PIPEA_VBLANK)
323 drm_handle_vblank(dev, 0);
324
325 if (de_iir & DE_PIPEB_VBLANK)
326 drm_handle_vblank(dev, 1);
327
312 /* check event from PCH */ 328 /* check event from PCH */
313 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
314 (pch_iir & SDE_HOTPLUG_MASK)) { 330 (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
844 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
845 return -EINVAL; 861 return -EINVAL;
846 862
847 if (IS_IRONLAKE(dev))
848 return 0;
849
850 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
851 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
852 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
854 else 870 else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 unsigned long irqflags; 883 unsigned long irqflags;
868 884
869 if (IS_IRONLAKE(dev))
870 return;
871
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
874 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
875 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
876 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
877} 894}
878 895
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1015{ 1032{
1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1017 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1018 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1019 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1020 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1021 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1022 1040
1023 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1024 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1025 1043
1026 /* should always can generate irq */ 1044 /* should always can generate irq */
1027 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1638 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1640 1759
1760 drm_vblank_off(dev, pipe);
1641 /* Disable display plane */ 1761 /* Disable display plane */
1642 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1643 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2522 } 2646 }
2523 2647
2524 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2562 srwm = 1; 2686 srwm = 1;
2563 srwm &= 0x3f; 2687 srwm &= 0x3f;
2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2565 } 2693 }
2566 2694
2567 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2630 if (srwm < 0) 2758 if (srwm < 0)
2631 srwm = 1; 2759 srwm = 1;
2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2633 } 2765 }
2634 2766
2635 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3949struct intel_unpin_work { 4081struct intel_unpin_work {
3950 struct work_struct work; 4082 struct work_struct work;
3951 struct drm_device *dev; 4083 struct drm_device *dev;
3952 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3953 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3954 int pending; 4087 int pending;
3955}; 4088};
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3960 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3961 4094
3962 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3963 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3964 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3965 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3966 kfree(work); 4100 kfree(work);
3967} 4101}
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3984 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
3985 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3986 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
3987 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
3988 return; 4128 return;
3989 } 4129 }
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4004 4144
4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4006 4146
4007 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4008 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4009 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4010 schedule_work(&work->work); 4153 schedule_work(&work->work);
4011} 4154}
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4018 unsigned long flags; 4161 unsigned long flags;
4019 4162
4020 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4021 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4022 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4023 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4024} 4170}
4025 4171
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4036 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4037 unsigned long flags; 4183 unsigned long flags;
4038 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4039 RING_LOCALS; 4186 RING_LOCALS;
4040 4187
4041 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4047 work->event = event; 4194 work->event = event;
4048 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4049 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4050 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4051 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4052 4199
4053 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4054 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4055 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4056 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4057 kfree(work); 4205 kfree(work);
4058 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4066 4214
4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4068 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4069 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4070 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4071 return ret; 4222 return ret;
4072 } 4223 }
4073 4224
4074 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4075 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4076 4228
4077 crtc->fb = fb; 4229 crtc->fb = fb;
4078 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4079 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4080 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4081 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4082 4235
4083 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4084 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4086 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4087 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4088 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4089 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4090 } else { 4244 } else {
4091 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4092 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
611 { 611 {
612 .ident = "Samsung SX20S", 612 .ident = "Samsung SX20S",
613 .matches = { 613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 }, 616 },
617 }, 617 },
@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
623 }, 623 },
624 }, 624 },
625 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
626 .ident = "PC-81005", 633 .ident = "PC-81005",
627 .matches = { 634 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
630 }, 637 },
631 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
632 { } 646 { }
633}; 647};
634 648
@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
643{ 657{
644 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
645 659
646 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
647 status = connector_status_disconnected; 661 status = connector_status_disconnected;
648 662
649 return status; 663 return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2347 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2348 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2349 2357
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d7f8d8b4a4b8..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1865 1865
1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1867 1867
1868 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1869 return 1; 1869 return 1;
1870 1870
1871 /* 1871 /*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3765 */
3766 3766
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct init_exec iexec = {true, false};
3769 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3770 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3771 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3845 } 3844 }
3846 } 3845 }
3847 3846
3848 bios->display.output = dcbent;
3849
3850 if (pxclk == 0) { 3847 if (pxclk == 0) {
3851 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3852 if (!script) { 3849 if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3855 } 3852 }
3856 3853
3857 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3858 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3859 } else 3856 } else
3860 if (pxclk == -1) { 3857 if (pxclk == -1) {
3861 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3865 } 3862 }
3866 3863
3867 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3868 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3869 } else 3866 } else
3870 if (pxclk == -2) { 3867 if (pxclk == -2) {
3871 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3878 } 3875 }
3879 3876
3880 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3881 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3882 } else 3879 } else
3883 if (pxclk > 0) { 3880 if (pxclk > 0) {
3884 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3890 } 3887 }
3891 3888
3892 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3893 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3894 } else 3891 } else
3895 if (pxclk < 0) { 3892 if (pxclk < 0) {
3896 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3902 } 3899 }
3903 3900
3904 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3905 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3906 } 3903 }
3907 3904
3908 return 0; 3905 return 0;
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5865 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5866 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5867 5864
5865 mutex_lock(&bios->lock);
5868 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5869 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5870 bios->display.output = NULL; 5868 bios->display.output = NULL;
5869 mutex_unlock(&bios->lock);
5871} 5870}
5872 5871
5873static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5876 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5877 5876
5878 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock);
5879 bios->dev = dev; 5879 bios->dev = dev;
5880 5880
5881 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 struct mutex lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db0ed4c13f98..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
65 65
66 /* 66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to 68 * align to to that as well as the page size. Align the size to the
69 * avoid corruption of other buffer objects. 69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
70 */ 72 */
71 if (dev_priv->card_type == NV_50) { 73 if (dev_priv->card_type == NV_50) {
72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
77 case 0x2800: 79 case 0x2800:
78 case 0x4800: 80 case 0x4800:
79 case 0x7a00: 81 case 0x7a00:
80 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) { 82 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) { 83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size; 84 *align = 12 * i * block_size;
85 if (!(*align % 65536)) 85 if (!(*align % 65536))
86 break; 86 break;
87 } 87 }
88 } else { 88 } else {
89 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) { 89 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size; 90 *align = 8 * i * block_size;
92 if (!(*align % 65536)) 91 if (!(*align % 65536))
93 break; 92 break;
94 } 93 }
95 } 94 }
95 *size = roundup(*size, *align);
96 break; 96 break;
97 default: 97 default:
98 break; 98 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
278 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
279 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
280 280
281 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
282 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
284 pgraph->fifo_access(dev, true);
285 }
286 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
287 286
288 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
289 pfifo->disable(dev); 288 pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e6d673f3a23..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
88{ 88{
89 struct nouveau_connector *nv_connector = 89 struct nouveau_connector *nv_connector =
90 nouveau_connector(drm_connector); 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev; 91 struct drm_device *dev;
92
93 NV_DEBUG_KMS(dev, "\n");
94 92
95 if (!nv_connector) 93 if (!nv_connector)
96 return; 94 return;
97 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
98 kfree(nv_connector->edid); 99 kfree(nv_connector->edid);
99 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
100 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index dd4937224220..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
510 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
511 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 343ab7f17ccc..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6b9690418bc7..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug;
678extern char *nouveau_vbios; 679extern char *nouveau_vbios;
679extern int nouveau_ctxfw; 680extern int nouveau_ctxfw;
680extern int nouveau_ignorelid; 681extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel;
683extern int nouveau_noaccel;
681 684
682/* nouveau_state.c */ 685/* nouveau_state.c */
683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
107 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
108}; 108};
109 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
110static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 139 u16 blue, int regno)
112{ 140{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
267 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
268 296
269 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
270 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
271 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
272 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
273 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
274 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
316 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
317 par->dev = dev; 349 par->dev = dev;
318 350
319 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
320 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
321 case NV_50: 353 case NV_50:
322 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
323 break; 356 break;
324 default: 357 default:
325 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
326 break; 360 break;
327 }; 361 };
328 } 362 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
46void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6ac804b0c9f9..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
925 } 925 }
926 926
927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
928 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
929 } else { 931 } else {
930 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
931 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 3b9bad66162a..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
566static void 580static void
567nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
568{ 582{
569 uint32_t status, nsource; 583 uint32_t status;
570 584
571 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
572 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
573 587
574 if (status & 0x00000001) { 588 if (status & 0x00000001) {
575 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
576 status &= ~0x00000001; 590 status &= ~0x00000001;
577 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
578 } 592 }
579 593
580 if (status & 0x00000010) { 594 if (status & 0x00000010) {
581 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
582 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
583 597
584 status &= ~0x00000010; 598 status &= ~0x00000010;
585 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
586 } 600 }
587 601
588 if (status & 0x00001000) { 602 if (status & 0x00001000) {
589 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
590 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
591 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
592 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
593 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
594 610
595 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
596 612
597 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
598 } 614 }
599 615
600 if (status & 0x00100000) { 616 if (status & 0x00100000) {
601 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
602 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
603 619
604 status &= ~0x00100000; 620 status &= ~0x00100000;
605 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
606 } 622 }
607 623
608 if (status & 0x00200000) { 624 if (status & 0x00200000) {
609 int r; 625 int r;
610 626
611 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
612 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
613 629
614 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
615 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
616 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
617 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
618 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
619 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
620 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
621 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
623 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
624 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
625 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
626 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
627 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
628 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
629 645 nv_rd32(dev, r));
630 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
631 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
632 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
633 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
634 658
635 if (status) { 659 if (status) {
636 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
637 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
638 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
639 664
640 { 665 {
641 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
642 667
643 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
644 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
645 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 670 nv_rd32(dev, 0x400500) | isb);
671 }
646 } 672 }
647 673
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
649} 676}
650 677
651static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f2d0187ba152..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
310static unsigned int 310static unsigned int
311nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
312{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
313 if (state) 321 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
427 if (ret) 435 if (ret)
428 goto out_timer; 436 goto out_timer;
429 437
430 /* PGRAPH */ 438 if (nouveau_noaccel)
431 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
432 if (ret) 440 else {
433 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
434 445
435 /* PFIFO */ 446 /* PFIFO */
436 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
437 if (ret) 448 if (ret)
438 goto out_graph; 449 goto out_graph;
450 }
439 451
440 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall 453 * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
479out_irq: 491out_irq:
480 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
481out_fifo: 493out_fifo:
482 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
483out_graph: 496out_graph:
484 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
485out_fb: 499out_fb:
486 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
487out_timer: 501out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
518 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
519 } 533 }
520 534
521 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
522 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
523 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
524 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
525 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
817 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
818 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
819 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
820 default: 845 default:
821 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
822 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
54 FIRE_RING(chan); 54 FIRE_RING(chan);
55} 55}
56 56
57static void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
88 FIRE_RING(chan); 88 FIRE_RING(chan);
89} 89}
90 90
91static void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
307 307
308 FIRE_RING(chan); 308 FIRE_RING(chan);
309 309
310 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
311 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
312 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
313 return 0; 310 return 0;
314} 311}
315 312
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c3341b..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
579 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
580 580
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
582} 584}
583 585
584static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 40b7360841f8..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
46 FIRE_RING(chan); 46 FIRE_RING(chan);
47} 47}
48 48
49static void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
263 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
264 264
265 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
266 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
267 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
268 return 0; 265 return 0;
269} 266}
270 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 32b244bcb482..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 20319e59d368..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nouveau_wait_for_idle(dev); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo & ~1);
287 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
289 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
290 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
291 nv_wr32(dev, 0x400500, fifo);
292 296
293 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
294 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ecf1936b8224..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102 102
103 if (nvenc == nv_encoder || 103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
104 nvenc->dcb->or != nv_encoder->dcb->or) 105 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue; 106 continue;
106 107
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
640 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
641 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
642 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
643 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
644 else 647 else
645 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
646} 649}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3eb0ca5b3d73..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
@@ -468,7 +472,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
468 struct radeon_connector *radeon_connector; 472 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector; 473 struct radeon_connector_atom_dig *dig_connector;
470 474
471 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || 475 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
472 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 476 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
473 return; 477 return;
474 478
@@ -583,7 +587,7 @@ void dp_link_train(struct drm_encoder *encoder,
583 u8 train_set[4]; 587 u8 train_set[4];
584 int i; 588 int i;
585 589
586 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || 590 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
587 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 591 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
588 return; 592 return;
589 593
@@ -596,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
596 return; 600 return;
597 dig_connector = radeon_connector->con_priv; 601 dig_connector = radeon_connector->con_priv;
598 602
599 if (ASIC_IS_DCE32(rdev)) { 603 if (dig->dig_encoder)
600 if (dig->dig_block) 604 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
601 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 605 else
602 else 606 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
603 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 607 if (dig_connector->linkb)
604 if (dig_connector->linkb) 608 enc_id |= ATOM_DP_CONFIG_LINK_B;
605 enc_id |= ATOM_DP_CONFIG_LINK_B; 609 else
606 else 610 enc_id |= ATOM_DP_CONFIG_LINK_A;
607 enc_id |= ATOM_DP_CONFIG_LINK_A;
608 } else {
609 if (dig_connector->linkb)
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
611 else
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
613 }
614 611
615 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 612 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
616 if (dig_connector->dp_clock == 270000) 613 if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 11c9a3fe6810..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
355} 355}
356 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
357void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
358 struct radeon_fence *fence) 360 struct radeon_fence *fence)
359{ 361{
360 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
361 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
362 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
363 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
364 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
3369 3375
3370void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3371{ 3377{
3372 r100_suspend(rdev);
3373 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3374 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3375 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3481 if (r) { 3486 if (r) {
3482 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3483 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3484 r100_suspend(rdev);
3485 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3486 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3487 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3488 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3489 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3490 radeon_irq_kms_fini(rdev);
3491 rdev->accel_working = false; 3495 rdev->accel_working = false;
3492 } 3496 }
3493 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
506 506
507 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
509 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
510 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
511 rdev->mc.vram_width = 128; 512 switch (tmp) {
512 } else { 513 case 0: rdev->mc.vram_width = 64; break;
513 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
514 } 517 }
515 518
516 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1327 1330
1328void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1329{ 1332{
1330 r300_suspend(rdev);
1331 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1332 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1333 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1418 if (r) { 1420 if (r) {
1419 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1421 r300_suspend(rdev);
1422 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1423 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1424 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1425 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1426 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1427 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1428 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1429 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1430 rdev->accel_working = false; 1432 rdev->accel_working = false;
1431 } 1433 }
1432 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4526faaacca8..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
389 if (r) { 389 if (r) {
390 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
391 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
392 r420_suspend(rdev);
393 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
394 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
395 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
396 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
397 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
398 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
399 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
400 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
401 radeon_irq_kms_fini(rdev);
402 rdev->accel_working = false; 401 rdev->accel_working = false;
403 } 402 }
404 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
294 if (r) { 294 if (r) {
295 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
297 rv515_suspend(rdev);
298 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
299 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
300 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
301 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
302 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
303 radeon_irq_kms_fini(rdev);
304 rdev->accel_working = false; 303 rdev->accel_working = false;
305 } 304 }
306 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index da9aa3c31bcf..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1654 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1655} 1655}
1656 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1657 1663
1658/* 1664/*
1659 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1788,23 +1794,24 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1788 radeon_ring_write(rdev, RB_INT_STAT); 1794 radeon_ring_write(rdev, RB_INT_STAT);
1789} 1795}
1790 1796
1791int r600_copy_dma(struct radeon_device *rdev,
1792 uint64_t src_offset,
1793 uint64_t dst_offset,
1794 unsigned num_pages,
1795 struct radeon_fence *fence)
1796{
1797 /* FIXME: implement */
1798 return 0;
1799}
1800
1801int r600_copy_blit(struct radeon_device *rdev, 1797int r600_copy_blit(struct radeon_device *rdev,
1802 uint64_t src_offset, uint64_t dst_offset, 1798 uint64_t src_offset, uint64_t dst_offset,
1803 unsigned num_pages, struct radeon_fence *fence) 1799 unsigned num_pages, struct radeon_fence *fence)
1804{ 1800{
1805 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 1801 int r;
1802
1803 mutex_lock(&rdev->r600_blit.mutex);
1804 rdev->r600_blit.vb_ib = NULL;
1805 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1806 if (r) {
1807 if (rdev->r600_blit.vb_ib)
1808 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1809 mutex_unlock(&rdev->r600_blit.mutex);
1810 return r;
1811 }
1806 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 1812 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1807 r600_blit_done_copy(rdev, fence); 1813 r600_blit_done_copy(rdev, fence);
1814 mutex_unlock(&rdev->r600_blit.mutex);
1808 return 0; 1815 return 0;
1809} 1816}
1810 1817
@@ -1860,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
1860 return r; 1867 return r;
1861 } 1868 }
1862 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1863 1870 r = r600_blit_init(rdev);
1864 if (!rdev->r600_blit.shader_obj) { 1871 if (r) {
1865 r = r600_blit_init(rdev); 1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1876 /* pin copy shader into vram */
1877 if (rdev->r600_blit.shader_obj) {
1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1879 if (unlikely(r != 0))
1880 return r;
1881 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1882 &rdev->r600_blit.shader_gpu_addr);
1883 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1866 if (r) { 1884 if (r) {
1867 DRM_ERROR("radeon: failed blitter (%d).\n", r); 1885 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1868 return r; 1886 return r;
1869 } 1887 }
1870 } 1888 }
1871
1872 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1873 if (unlikely(r != 0))
1874 return r;
1875 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1876 &rdev->r600_blit.shader_gpu_addr);
1877 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1878 if (r) {
1879 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1880 return r;
1881 }
1882
1883 /* Enable IRQ */ 1889 /* Enable IRQ */
1884 r = r600_irq_init(rdev); 1890 r = r600_irq_init(rdev);
1885 if (r) { 1891 if (r) {
@@ -1944,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1944 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1945 return r; 1951 return r;
1946 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1947 return r; 1960 return r;
1948} 1961}
1949 1962
@@ -1951,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
1951{ 1964{
1952 int r; 1965 int r;
1953 1966
1967 r600_audio_fini(rdev);
1954 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1955 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1956 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
@@ -2055,9 +2069,11 @@ int r600_init(struct radeon_device *rdev)
2055 rdev->accel_working = true; 2069 rdev->accel_working = true;
2056 r = r600_startup(rdev); 2070 r = r600_startup(rdev);
2057 if (r) { 2071 if (r) {
2058 r600_suspend(rdev); 2072 dev_err(rdev->dev, "disabling GPU acceleration\n");
2073 r600_cp_fini(rdev);
2059 r600_wb_fini(rdev); 2074 r600_wb_fini(rdev);
2060 radeon_ring_fini(rdev); 2075 r600_irq_fini(rdev);
2076 radeon_irq_kms_fini(rdev);
2061 r600_pcie_gart_fini(rdev); 2077 r600_pcie_gart_fini(rdev);
2062 rdev->accel_working = false; 2078 rdev->accel_working = false;
2063 } 2079 }
@@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev)
2083 2099
2084void r600_fini(struct radeon_device *rdev) 2100void r600_fini(struct radeon_device *rdev)
2085{ 2101{
2086 /* Suspend operations */
2087 r600_suspend(rdev);
2088
2089 r600_audio_fini(rdev); 2102 r600_audio_fini(rdev);
2090 r600_blit_fini(rdev); 2103 r600_blit_fini(rdev);
2104 r600_cp_fini(rdev);
2105 r600_wb_fini(rdev);
2091 r600_irq_fini(rdev); 2106 r600_irq_fini(rdev);
2092 radeon_irq_kms_fini(rdev); 2107 radeon_irq_kms_fini(rdev);
2093 radeon_ring_fini(rdev);
2094 r600_wb_fini(rdev);
2095 r600_pcie_gart_fini(rdev); 2108 r600_pcie_gart_fini(rdev);
2109 radeon_agp_fini(rdev);
2096 radeon_gem_fini(rdev); 2110 radeon_gem_fini(rdev);
2097 radeon_fence_driver_fini(rdev); 2111 radeon_fence_driver_fini(rdev);
2098 radeon_clocks_fini(rdev); 2112 radeon_clocks_fini(rdev);
2099 radeon_agp_fini(rdev);
2100 radeon_bo_fini(rdev); 2113 radeon_bo_fini(rdev);
2101 radeon_atombios_fini(rdev); 2114 radeon_atombios_fini(rdev);
2102 kfree(rdev->bios); 2115 kfree(rdev->bios);
@@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2900 return 0; 2913 return 0;
2901#endif 2914#endif
2902} 2915}
2916
2917/**
2918 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2919 * rdev: radeon device structure
2920 * bo: buffer object struct which userspace is waiting for idle
2921 *
2922 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2923 * through ring buffer, this leads to corruption in rendering, see
2924 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2925 * directly perform HDP flush by writing register through MMIO.
2926 */
2927void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2928{
2929 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2930}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 2bedce477a97..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
449 u32 packet2s[16]; 449 u32 packet2s[16];
450 int num_packet2s = 0; 450 int num_packet2s = 0;
451 451
452 mutex_init(&rdev->r600_blit.mutex);
452 rdev->r600_blit.state_offset = 0; 453 rdev->r600_blit.state_offset = 0;
453 454
454 if (rdev->family >= CHIP_RV770) 455 if (rdev->family >= CHIP_RV770)
@@ -542,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
542void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
543{ 544{
544 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
545 mutex_lock(&rdev->ib_pool.mutex);
546 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
547 mutex_unlock(&rdev->ib_pool.mutex);
548 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
549} 547}
550 548
@@ -557,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
557 int dwords_per_loop = 76, num_loops; 555 int dwords_per_loop = 76, num_loops;
558 556
559 r = r600_vb_ib_get(rdev); 557 r = r600_vb_ib_get(rdev);
560 WARN_ON(r); 558 if (r)
559 return r;
561 560
562 /* set_render_target emits 2 extra dwords on rv6xx */ 561 /* set_render_target emits 2 extra dwords on rv6xx */
563 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) 562 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -583,7 +582,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
583 ring_size += 5; /* done copy */ 582 ring_size += 5; /* done copy */
584 ring_size += 7; /* fence emit for done copy */ 583 ring_size += 7; /* fence emit for done copy */
585 r = radeon_ring_lock(rdev, ring_size); 584 r = radeon_ring_lock(rdev, ring_size);
586 WARN_ON(r); 585 if (r)
586 return r;
587 587
588 set_default_state(rdev); /* 14 */ 588 set_default_state(rdev); /* 14 */
589 set_shaders(rdev); /* 26 */ 589 set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1428
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1429 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1430
1431 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1432 dev_priv->r600_max_backends, 1432 backend_map = 0x28;
1433 (0xff << dev_priv->r600_max_backends) & 0xff); 1433 else
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1434 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1435 1438
1436 cc_gc_shader_pipe_config = 1439 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f7df1a7e4413..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
363 */ 364 */
364struct radeon_ib { 365struct radeon_ib {
365 struct list_head list; 366 struct list_head list;
366 unsigned long idx; 367 unsigned idx;
367 uint64_t gpu_addr; 368 uint64_t gpu_addr;
368 struct radeon_fence *fence; 369 struct radeon_fence *fence;
369 uint32_t *ptr; 370 uint32_t *ptr;
370 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
371}; 373};
372 374
373/* 375/*
@@ -377,10 +379,9 @@ struct radeon_ib {
377struct radeon_ib_pool { 379struct radeon_ib_pool {
378 struct mutex mutex; 380 struct mutex mutex;
379 struct radeon_bo *robj; 381 struct radeon_bo *robj;
380 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 383 bool ready;
383 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
384}; 385};
385 386
386struct radeon_cp { 387struct radeon_cp {
@@ -416,6 +417,7 @@ struct r600_ih {
416}; 417};
417 418
418struct r600_blit { 419struct r600_blit {
420 struct mutex mutex;
419 struct radeon_bo *shader_obj; 421 struct radeon_bo *shader_obj;
420 u64 shader_gpu_addr; 422 u64 shader_gpu_addr;
421 u32 vs_offset, ps_offset; 423 u32 vs_offset, ps_offset;
@@ -660,6 +662,13 @@ struct radeon_asic {
660 void (*hpd_fini)(struct radeon_device *rdev); 662 void (*hpd_fini)(struct radeon_device *rdev);
661 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
662 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 664 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
665 /* ioctl hw specific callback. Some hw might want to perform special
666 * operation on specific ioctl. For instance on wait idle some hw
667 * might want to perform and HDP flush through MMIO as it seems that
668 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
669 * through ring.
670 */
671 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
663}; 672};
664 673
665/* 674/*
@@ -1142,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1142extern void r600_cp_stop(struct radeon_device *rdev); 1151extern void r600_cp_stop(struct radeon_device *rdev);
1143extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1144extern int r600_cp_resume(struct radeon_device *rdev); 1153extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev);
1145extern int r600_count_pipe_bits(uint32_t val); 1155extern int r600_count_pipe_bits(uint32_t val);
1146extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1147extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c9ad7f5cc1ac..c0681a5556dc 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -133,13 +133,6 @@ int radeon_agp_init(struct radeon_device *rdev)
133 bool is_v3; 133 bool is_v3;
134 int ret; 134 int ret;
135 135
136 if (rdev->ddev->agp->agp_info.aper_size < 32) {
137 dev_warn(rdev->dev, "AGP aperture to small (%dM) "
138 "need at least 32M, disabling AGP\n",
139 rdev->ddev->agp->agp_info.aper_size);
140 return -EINVAL;
141 }
142
143 /* Acquire AGP. */ 136 /* Acquire AGP. */
144 if (!rdev->ddev->agp->acquired) { 137 if (!rdev->ddev->agp->acquired) {
145 ret = drm_agp_acquire(rdev->ddev); 138 ret = drm_agp_acquire(rdev->ddev);
@@ -151,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
151 144
152 ret = drm_agp_info(rdev->ddev, &info); 145 ret = drm_agp_info(rdev->ddev, &info);
153 if (ret) { 146 if (ret) {
147 drm_agp_release(rdev->ddev);
154 DRM_ERROR("Unable to get AGP info: %d\n", ret); 148 DRM_ERROR("Unable to get AGP info: %d\n", ret);
155 return ret; 149 return ret;
156 } 150 }
151
152 if (rdev->ddev->agp->agp_info.aper_size < 32) {
153 drm_agp_release(rdev->ddev);
154 dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
155 "need at least 32M, disabling AGP\n",
156 rdev->ddev->agp->agp_info.aper_size);
157 return -EINVAL;
158 }
159
157 mode.mode = info.mode; 160 mode.mode = info.mode;
158 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 161 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
159 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 162 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -228,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
228 ret = drm_agp_enable(rdev->ddev, mode); 231 ret = drm_agp_enable(rdev->ddev, mode);
229 if (ret) { 232 if (ret) {
230 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); 233 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
234 drm_agp_release(rdev->ddev);
231 return ret; 235 return ret;
232 } 236 }
233 237
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
117 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
118 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
119 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
120}; 121};
121 122
122 123
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
176 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
177 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
179}; 181};
180 182
181/* 183/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
219 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
220 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
221 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
222}; 225};
223 226
224 227
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
267 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
268 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
269 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
270}; 274};
271 275
272 276
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
323 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
324 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
325 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
326}; 331};
327 332
328 333
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
370 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
371 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
372 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
373}; 379};
374 380
375 381
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
421 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
422 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
423 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
424}; 431};
425 432
426 433
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
463 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
466}; 474};
467 475
468/* 476/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
505void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
506 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
507 516
508static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
509 .init = &r600_init, 518 .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
538 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
539 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
540 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
541}; 551};
542 552
543/* 553/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
582 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
583 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
584 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
585}; 596};
586 597
587#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
206 *connector_type = DRM_MODE_CONNECTOR_DVID; 206 *connector_type = DRM_MODE_CONNECTOR_DVID;
207 } 207 }
208 208
209 /* Asrock RS600 board lists the DVI port as HDMI */
210 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x1849) &&
212 (dev->pdev->subsystem_device == 0x7941)) {
213 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
214 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
215 *connector_type = DRM_MODE_CONNECTOR_DVID;
216 }
217
209 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
210 if ((dev->pdev->device == 0x7941) && 219 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x147b) && 220 (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
287 *connector_type = DRM_MODE_CONNECTOR_DVID; 296 *connector_type = DRM_MODE_CONNECTOR_DVID;
288 } 297 }
289 298
299 /* XFX Pine Group device rv730 reports no VGA DDC lines
300 * even though they are wired up to record 0x93
301 */
302 if ((dev->pdev->device == 0x9498) &&
303 (dev->pdev->subsystem_vendor == 0x1682) &&
304 (dev->pdev->subsystem_device == 0x2452)) {
305 struct radeon_device *rdev = dev->dev_private;
306 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
307 }
290 return true; 308 return true;
291} 309}
292 310
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
971 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
972 972
973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
974 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
975 lvds->panel_vcc_delay = 2000;
976 975
977 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
978 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
582 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
583 bool dret; 583 bool dret = false;
584 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
585 585
586 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
587 if (!encoder) 587 if (!encoder)
588 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
589 589
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
591 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
593 if (dret) { 595 if (dret) {
594 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
595 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
740 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
741 int i; 743 int i;
742 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
743 bool dret; 745 bool dret = false;
744 746
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
746 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
747 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
748 if (dret) { 752 if (dret) {
749 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
750 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
776 * connected and the DVI port disconnected. If the edid doesn't 780 * connected and the DVI port disconnected. If the edid doesn't
777 * say HDMI, vice versa. 781 * say HDMI, vice versa.
778 */ 782 */
779 if (radeon_connector->shared_ddc && connector_status_connected) { 783 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
780 struct drm_device *dev = connector->dev; 784 struct drm_device *dev = connector->dev;
781 struct drm_connector *list_connector; 785 struct drm_connector *list_connector;
782 struct radeon_connector *list_radeon_connector; 786 struct radeon_connector *list_radeon_connector;
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1056 return; 1060 return;
1057 } 1061 }
1058 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) {
1059 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1063 if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
1060 sizeof(struct radeon_i2c_bus_rec)) == 0) {
1061 radeon_connector->shared_ddc = true; 1064 radeon_connector->shared_ddc = true;
1062 shared_ddc = true; 1065 shared_ddc = true;
1063 } 1066 }
@@ -1343,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1343 radeon_connector->dac_load_detect = false; 1346 radeon_connector->dac_load_detect = false;
1344 drm_connector_attach_property(&radeon_connector->base, 1347 drm_connector_attach_property(&radeon_connector->base,
1345 rdev->mode_info.load_detect_property, 1348 rdev->mode_info.load_detect_property,
1346 1); 1349 radeon_connector->dac_load_detect);
1347 drm_connector_attach_property(&radeon_connector->base, 1350 drm_connector_attach_property(&radeon_connector->base,
1348 rdev->mode_info.tv_std_property, 1351 rdev->mode_info.tv_std_property,
1349 radeon_combios_get_tv_info(rdev)); 1352 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1496cb8658ef..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a92f994cc26..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
281 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
283 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
284 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
288 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
289 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
290 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
291 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
293 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
106 * 1.29- R500 3D cmd buffer support 106 * 1.29- R500 3D cmd buffer support
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup
109 */ 110 */
110#define DRIVER_MAJOR 1 111#define DRIVER_MAJOR 1
111#define DRIVER_MINOR 31 112#define DRIVER_MINOR 32
112#define DRIVER_PATCHLEVEL 0 113#define DRIVER_PATCHLEVEL 0
113 114
114enum radeon_cp_microcode_version { 115enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 82eb551970b9..3c91724457ca 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
156 return ret; 156 return ret;
157} 157}
158 158
159static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
160{
161 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
162 switch (radeon_encoder->encoder_id) {
163 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
164 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
165 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
166 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
167 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
168 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
169 case ENCODER_OBJECT_ID_INTERNAL_DDI:
170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
172 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
173 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
174 return true;
175 default:
176 return false;
177 }
178}
159void 179void
160radeon_link_encoder_connector(struct drm_device *dev) 180radeon_link_encoder_connector(struct drm_device *dev)
161{ 181{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
202 222
203 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 223 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
204 radeon_connector = to_radeon_connector(connector); 224 radeon_connector = to_radeon_connector(connector);
205 if (radeon_encoder->devices & radeon_connector->devices) 225 if (radeon_encoder->active_device & radeon_connector->devices)
206 return connector; 226 return connector;
207 } 227 }
208 return NULL; 228 return NULL;
@@ -676,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
676 696
677 memset(&args, 0, sizeof(args)); 697 memset(&args, 0, sizeof(args));
678 698
679 if (ASIC_IS_DCE32(rdev)) { 699 if (dig->dig_encoder)
680 if (dig->dig_block) 700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
681 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 701 else
682 else 702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
683 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 703 num = dig->dig_encoder + 1;
684 num = dig->dig_block + 1;
685 } else {
686 switch (radeon_encoder->encoder_id) {
687 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
688 /* XXX doesn't really matter which dig encoder we pick as long as it's
689 * not already in use
690 */
691 if (dig_connector->linkb)
692 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
693 else
694 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
695 num = 1;
696 break;
697 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
698 /* Only dig2 encoder can drive LVTMA */
699 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
700 num = 2;
701 break;
702 }
703 }
704 704
705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
706 706
@@ -822,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
823 } 823 }
824 if (ASIC_IS_DCE32(rdev)) { 824 if (ASIC_IS_DCE32(rdev)) {
825 if (dig->dig_block) 825 if (dig->dig_encoder == 1)
826 args.v2.acConfig.ucEncoderSel = 1; 826 args.v2.acConfig.ucEncoderSel = 1;
827 if (dig_connector->linkb) 827 if (dig_connector->linkb)
828 args.v2.acConfig.ucLinkSel = 1; 828 args.v2.acConfig.ucLinkSel = 1;
@@ -849,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
849 args.v2.acConfig.fCoherentMode = 1; 849 args.v2.acConfig.fCoherentMode = 1;
850 } 850 }
851 } else { 851 } else {
852
852 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
853 854
855 if (dig->dig_encoder)
856 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
857 else
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
859
854 switch (radeon_encoder->encoder_id) { 860 switch (radeon_encoder->encoder_id) {
855 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
856 /* XXX doesn't really matter which dig encoder we pick as long as it's
857 * not already in use
858 */
859 if (dig_connector->linkb)
860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
861 else
862 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
863 if (rdev->flags & RADEON_IS_IGP) { 862 if (rdev->flags & RADEON_IS_IGP) {
864 if (radeon_encoder->pixel_clock > 165000) { 863 if (radeon_encoder->pixel_clock > 165000) {
865 if (dig_connector->igp_lane_info & 0x3) 864 if (dig_connector->igp_lane_info & 0x3)
@@ -878,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
878 } 877 }
879 } 878 }
880 break; 879 break;
881 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
882 /* Only dig2 encoder can drive LVTMA */
883 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
884 break;
885 } 880 }
886 881
887 if (radeon_encoder->pixel_clock > 165000) 882 if (radeon_encoder->pixel_clock > 165000)
@@ -1046,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1046 union crtc_sourc_param args; 1041 union crtc_sourc_param args;
1047 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1048 uint8_t frev, crev; 1043 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig;
1049 1045
1050 memset(&args, 0, sizeof(args)); 1046 memset(&args, 0, sizeof(args));
1051 1047
@@ -1109,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1105 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1110 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1106 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1111 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1112 if (ASIC_IS_DCE32(rdev)) { 1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1113 if (radeon_crtc->crtc_id) 1109 dig = radeon_encoder->enc_priv;
1114 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1110 if (dig->dig_encoder)
1115 else 1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1116 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1112 else
1117 } else { 1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1118 struct drm_connector *connector;
1119 struct radeon_connector *radeon_connector;
1120 struct radeon_connector_atom_dig *dig_connector;
1121
1122 connector = radeon_get_connector_for_encoder(encoder);
1123 if (!connector)
1124 return;
1125 radeon_connector = to_radeon_connector(connector);
1126 if (!radeon_connector->con_priv)
1127 return;
1128 dig_connector = radeon_connector->con_priv;
1129
1130 /* XXX doesn't really matter which dig encoder we pick as long as it's
1131 * not already in use
1132 */
1133 if (dig_connector->linkb)
1134 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1135 else
1136 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1137 }
1138 break; 1114 break;
1139 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1140 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1141 break; 1117 break;
1142 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1143 /* Only dig2 encoder can drive LVTMA */
1144 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1145 break;
1146 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1118 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1147 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1119 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1148 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; 1120 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1202,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1202 } 1174 }
1203} 1175}
1204 1176
1177static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1178{
1179 struct drm_device *dev = encoder->dev;
1180 struct radeon_device *rdev = dev->dev_private;
1181 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1183 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0;
1186 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id;
1189 }
1190
1191 /* on DCE3 - LVTMA can only be driven by DIGB */
1192 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1193 struct radeon_encoder *radeon_test_encoder;
1194
1195 if (encoder == test_encoder)
1196 continue;
1197
1198 if (!radeon_encoder_is_digital(test_encoder))
1199 continue;
1200
1201 radeon_test_encoder = to_radeon_encoder(test_encoder);
1202 dig = radeon_test_encoder->enc_priv;
1203
1204 if (dig->dig_encoder >= 0)
1205 dig_enc_in_use |= (1 << dig->dig_encoder);
1206 }
1207
1208 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1209 if (dig_enc_in_use & 0x2)
1210 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1211 return 1;
1212 }
1213 if (!(dig_enc_in_use & 1))
1214 return 0;
1215 return 1;
1216}
1217
1205static void 1218static void
1206radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1219radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1207 struct drm_display_mode *mode, 1220 struct drm_display_mode *mode,
@@ -1214,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1214 1227
1215 if (radeon_encoder->active_device & 1228 if (radeon_encoder->active_device &
1216 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1229 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1217 if (radeon_encoder->enc_priv) { 1230 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1218 struct radeon_encoder_atom_dig *dig; 1231 if (dig)
1219 1232 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1220 dig = radeon_encoder->enc_priv;
1221 dig->dig_block = radeon_crtc->crtc_id;
1222 }
1223 } 1233 }
1224 radeon_encoder->pixel_clock = adjusted_mode->clock; 1234 radeon_encoder->pixel_clock = adjusted_mode->clock;
1225 1235
@@ -1379,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1379static void radeon_atom_encoder_disable(struct drm_encoder *encoder) 1389static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1380{ 1390{
1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1391 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1392 struct radeon_encoder_atom_dig *dig;
1382 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1393 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1394
1395 if (radeon_encoder_is_digital(encoder)) {
1396 dig = radeon_encoder->enc_priv;
1397 dig->dig_encoder = -1;
1398 }
1383 radeon_encoder->active_device = 0; 1399 radeon_encoder->active_device = 0;
1384} 1400}
1385 1401
@@ -1436,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1436 1452
1437 /* coherent mode by default */ 1453 /* coherent mode by default */
1438 dig->coherent_mode = true; 1454 dig->coherent_mode = true;
1455 dig->dig_encoder = -1;
1439 1456
1440 return dig; 1457 return dig;
1441} 1458}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 } 308 }
309 robj = gobj->driver_private; 309 robj = gobj->driver_private;
310 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
311 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 96b851f92f4c..e81b2aeb6a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -299,7 +299,7 @@ struct radeon_atom_ss {
299struct radeon_encoder_atom_dig { 299struct radeon_encoder_atom_dig {
300 /* atom dig */ 300 /* atom dig */
301 bool coherent_mode; 301 bool coherent_mode;
302 int dig_block; 302 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
303 /* atom lvds */ 303 /* atom lvds */
304 uint32_t lvds_misc; 304 uint32_t lvds_misc;
305 uint16_t panel_pwr_delay; 305 uint16_t panel_pwr_delay;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71bff218..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
306 } 306 }
307} 307}
308 308
309int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
310{ 310{
311 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
312 struct radeon_bo *bo; 312 struct radeon_bo *bo;
313 struct radeon_fence *old_fence = NULL;
314 int r; 313 int r;
315 314
316 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
334 } 333 }
335 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
336 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
337 if (fence) {
338 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
339 bo->tbo.sync_obj = radeon_fence_ref(fence);
340 bo->tbo.sync_obj_arg = NULL;
341 }
342 if (old_fence) {
343 radeon_fence_unref(&old_fence);
344 }
345 } 336 }
346 return 0; 337 return 0;
347} 338}
348 339
349void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
350{ 341{
351 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
352 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
353 344 struct radeon_fence *old_fence = NULL;
354 if (fence) 345
355 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
356 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
357 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
358 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
359 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
360 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
361 } 355 }
362 radeon_bo_list_unreserve(head); 356 }
363} 357}
364 358
365int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 100 if (tmp == NULL) {
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 104 radeon_fence_unref(&tmp->fence);
126 105 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 106 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 107 mutex_unlock(&rdev->ib_pool.mutex);
130} 108}
131 109
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 113
136 if (!ib->length_dw || !rdev->cp.ready) { 114 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 115 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 117 return -EINVAL;
140 } 118 }
141 119
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 126 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 127 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 128 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 131 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 132 radeon_ring_unlock_commit(rdev);
154 return 0; 133 return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 143 if (rdev->ib_pool.robj)
165 return 0; 144 return 0;
166 /* Allocate 1M object buffer */ 145 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 147 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 148 &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 174 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 175 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 176 rdev->ib_pool.ibs[i].free = true;
199 } 177 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 178 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 179 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 180 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 181 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 192 return;
215 } 193 }
216 mutex_lock(&rdev->ib_pool.mutex); 194 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 195 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 197 if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 340 if (ib == NULL) {
364 return 0; 341 return 0;
365 } 342 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 343 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 344 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 346 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
370 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
371 387
372 /* Wait for mc idle */ 388 /* Wait for mc idle */
373 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
374 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
375 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
448 464
449void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
450{ 466{
451 rs400_suspend(rdev);
452 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
453 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
454 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
527 if (r) { 542 if (r) {
528 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
530 rs400_suspend(rdev);
531 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
532 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
533 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
610 610
611void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
612{ 612{
613 rs600_suspend(rdev);
614 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
615 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
616 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
689 if (r) { 688 if (r) {
690 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
692 rs600_suspend(rdev);
693 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
694 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
695 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
676 676
677void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
678{ 678{
679 rs690_suspend(rdev);
680 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
681 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
682 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
756 if (r) { 755 if (r) {
757 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
759 rs690_suspend(rdev);
760 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
761 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
762 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
537 537
538void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
539{ 539{
540 rv515_suspend(rdev);
541 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
542 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
543 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
615 if (r) { 614 if (r) {
616 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
618 rv515_suspend(rdev);
619 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
620 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
621 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
622 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
623 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 rdev->accel_working = false; 623 rdev->accel_working = false;
626 } 624 }
627 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 55f6ffc4e58b..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 549
550 gb_tiling_config |= BANK_SWAPS(1); 550 gb_tiling_config |= BANK_SWAPS(1);
551 551
552 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 552 if (rdev->family == CHIP_RV740)
553 rdev->config.rv770.max_backends, 553 backend_map = 0x28;
554 (0xff << rdev->config.rv770.max_backends) & 0xff); 554 else
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
555 gb_tiling_config |= BACKEND_MAP(backend_map); 558 gb_tiling_config |= BACKEND_MAP(backend_map);
556 559
557 cc_gc_shader_pipe_config = 560 cc_gc_shader_pipe_config =
@@ -887,26 +890,25 @@ static int rv770_startup(struct radeon_device *rdev)
887 return r; 890 return r;
888 } 891 }
889 rv770_gpu_init(rdev); 892 rv770_gpu_init(rdev);
890 893 r = r600_blit_init(rdev);
891 if (!rdev->r600_blit.shader_obj) { 894 if (r) {
892 r = r600_blit_init(rdev); 895 r600_blit_fini(rdev);
896 rdev->asic->copy = NULL;
897 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
898 }
899 /* pin copy shader into vram */
900 if (rdev->r600_blit.shader_obj) {
901 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
902 if (unlikely(r != 0))
903 return r;
904 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
905 &rdev->r600_blit.shader_gpu_addr);
906 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
893 if (r) { 907 if (r) {
894 DRM_ERROR("radeon: failed blitter (%d).\n", r); 908 DRM_ERROR("failed to pin blit object %d\n", r);
895 return r; 909 return r;
896 } 910 }
897 } 911 }
898
899 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
900 if (unlikely(r != 0))
901 return r;
902 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
903 &rdev->r600_blit.shader_gpu_addr);
904 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
905 if (r) {
906 DRM_ERROR("failed to pin blit object %d\n", r);
907 return r;
908 }
909
910 /* Enable IRQ */ 912 /* Enable IRQ */
911 r = r600_irq_init(rdev); 913 r = r600_irq_init(rdev);
912 if (r) { 914 if (r) {
@@ -1066,9 +1068,11 @@ int rv770_init(struct radeon_device *rdev)
1066 rdev->accel_working = true; 1068 rdev->accel_working = true;
1067 r = rv770_startup(rdev); 1069 r = rv770_startup(rdev);
1068 if (r) { 1070 if (r) {
1069 rv770_suspend(rdev); 1071 dev_err(rdev->dev, "disabling GPU acceleration\n");
1072 r600_cp_fini(rdev);
1070 r600_wb_fini(rdev); 1073 r600_wb_fini(rdev);
1071 radeon_ring_fini(rdev); 1074 r600_irq_fini(rdev);
1075 radeon_irq_kms_fini(rdev);
1072 rv770_pcie_gart_fini(rdev); 1076 rv770_pcie_gart_fini(rdev);
1073 rdev->accel_working = false; 1077 rdev->accel_working = false;
1074 } 1078 }
@@ -1090,13 +1094,11 @@ int rv770_init(struct radeon_device *rdev)
1090 1094
1091void rv770_fini(struct radeon_device *rdev) 1095void rv770_fini(struct radeon_device *rdev)
1092{ 1096{
1093 rv770_suspend(rdev);
1094
1095 r600_blit_fini(rdev); 1097 r600_blit_fini(rdev);
1098 r600_cp_fini(rdev);
1099 r600_wb_fini(rdev);
1096 r600_irq_fini(rdev); 1100 r600_irq_fini(rdev);
1097 radeon_irq_kms_fini(rdev); 1101 radeon_irq_kms_fini(rdev);
1098 radeon_ring_fini(rdev);
1099 r600_wb_fini(rdev);
1100 rv770_pcie_gart_fini(rdev); 1102 rv770_pcie_gart_fini(rdev);
1101 radeon_gem_fini(rdev); 1103 radeon_gem_fini(rdev);
1102 radeon_fence_driver_fini(rdev); 1104 radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1021{ 1021{
1022 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1023 1029
1024 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1025 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 53{
54 struct ttm_tt *ttm = bo->ttm; 54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 56 int ret;
58 57
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 58 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 61 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 62 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 63 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 64 }
67 65
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 66 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 75
78 *old_mem = *new_mem; 76 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 77 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 78
81 return 0; 79 return 0;
82} 80}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 81EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 217 void *old_iomap;
220 void *new_iomap; 218 void *new_iomap;
221 int ret; 219 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 220 unsigned long i;
224 unsigned long page; 221 unsigned long page;
225 unsigned long add = 0; 222 unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
270 267
271 *old_mem = *new_mem; 268 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 269 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 270
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 271 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 272 ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 533 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem; 534 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret; 535 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj; 536 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL; 537 void *tmp_obj = NULL;
543 538
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
598 593
599 *old_mem = *new_mem; 594 *old_mem = *new_mem;
600 new_mem->mm_node = NULL; 595 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 596
602 return 0; 597 return 0;
603} 598}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 599EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
109 struct drm_hash_item hash; 109 struct drm_hash_item hash;
110 struct list_head head; 110 struct list_head head;
111 struct kref kref; 111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type; 112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile; 114 struct ttm_object_file *tfile;
115}; 115};
116 116
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate);
196 196
197#ifdef CONFIG_X86 197#ifdef CONFIG_X86
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_old,
200 enum ttm_caching_state c_new)
200{ 201{
202 int ret = 0;
203
201 if (PageHighMem(p)) 204 if (PageHighMem(p))
202 return 0; 205 return 0;
203 206
204 switch (c_state) { 207 if (c_old != tt_cached) {
205 case tt_cached: 208 /* p isn't in the default caching state, set it to
206 return set_pages_wb(p, 1); 209 * writeback first to free its current memtype. */
207 case tt_wc: 210
208 return set_memory_wc((unsigned long) page_address(p), 1); 211 ret = set_pages_wb(p, 1);
209 default: 212 if (ret)
210 return set_pages_uc(p, 1); 213 return ret;
211 } 214 }
215
216 if (c_new == tt_wc)
217 ret = set_memory_wc((unsigned long) page_address(p), 1);
218 else if (c_new == tt_uncached)
219 ret = set_pages_uc(p, 1);
220
221 return ret;
212} 222}
213#else /* CONFIG_X86 */ 223#else /* CONFIG_X86 */
214static inline int ttm_tt_set_page_caching(struct page *p, 224static inline int ttm_tt_set_page_caching(struct page *p,
215 enum ttm_caching_state c_state) 225 enum ttm_caching_state c_old,
226 enum ttm_caching_state c_new)
216{ 227{
217 return 0; 228 return 0;
218} 229}
@@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
245 for (i = 0; i < ttm->num_pages; ++i) { 256 for (i = 0; i < ttm->num_pages; ++i) {
246 cur_page = ttm->pages[i]; 257 cur_page = ttm->pages[i];
247 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
248 ret = ttm_tt_set_page_caching(cur_page, c_state); 259 ret = ttm_tt_set_page_caching(cur_page,
260 ttm->caching_state,
261 c_state);
249 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
250 goto out_err; 263 goto out_err;
251 } 264 }
@@ -259,7 +272,7 @@ out_err:
259 for (j = 0; j < i; ++j) { 272 for (j = 0; j < i; ++j) {
260 cur_page = ttm->pages[j]; 273 cur_page = ttm->pages[j];
261 if (likely(cur_page != NULL)) { 274 if (likely(cur_page != NULL)) {
262 (void)ttm_tt_set_page_caching(cur_page, 275 (void)ttm_tt_set_page_caching(cur_page, c_state,
263 ttm->caching_state); 276 ttm->caching_state);
264 } 277 }
265 } 278 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dedd121d8fe7..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -209,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
209{ 209{
210 struct vmw_private *dev_priv; 210 struct vmw_private *dev_priv;
211 int ret; 211 int ret;
212 uint32_t svga_id;
212 213
213 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 214 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
214 if (unlikely(dev_priv == NULL)) { 215 if (unlikely(dev_priv == NULL)) {
@@ -239,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
239 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 240 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
240 241
241 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
245 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
246 if (svga_id != SVGA_ID_2) {
247 ret = -ENOSYS;
248 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
249 mutex_unlock(&dev_priv->hw_mutex);
250 goto out_err0;
251 }
252
242 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 253 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
243 254
244 if (dev_priv->capabilities & SVGA_CAP_GMR) { 255 if (dev_priv->capabilities & SVGA_CAP_GMR) {
@@ -337,26 +348,25 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
337 */ 348 */
338 349
339 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
340 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
341 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
342 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
343 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
344 goto out_no_device; 355 goto out_no_device;
345 } 356 }
346 vmw_kms_init(dev_priv);
347 vmw_overlay_init(dev_priv);
348 } else {
349 ret = vmw_request_device(dev_priv);
350 if (unlikely(ret != 0))
351 goto out_no_device;
352 vmw_kms_init(dev_priv);
353 vmw_overlay_init(dev_priv);
354 vmw_fb_init(dev_priv);
355 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
356 364
357 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
358 register_pm_notifier(&dev_priv->pm_nb); 366 register_pm_notifier(&dev_priv->pm_nb);
359 367
368 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
369
360 return 0; 370 return 0;
361 371
362out_no_device: 372out_no_device:
@@ -393,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
393 403
394 unregister_pm_notifier(&dev_priv->pm_nb); 404 unregister_pm_notifier(&dev_priv->pm_nb);
395 405
396 if (!dev_priv->stealth) { 406 vmw_fb_close(dev_priv);
397 vmw_fb_close(dev_priv); 407 vmw_kms_close(dev_priv);
398 vmw_kms_close(dev_priv); 408 vmw_overlay_close(dev_priv);
399 vmw_overlay_close(dev_priv); 409 vmw_release_device(dev_priv);
400 vmw_release_device(dev_priv); 410 if (dev_priv->stealth)
401 pci_release_regions(dev->pdev);
402 } else {
403 vmw_kms_close(dev_priv);
404 vmw_overlay_close(dev_priv);
405 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
406 } 412 else
413 pci_release_regions(dev->pdev);
414
407 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
408 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
409 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -572,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
572 int ret = 0; 580 int ret = 0;
573 581
574 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
575 if (dev_priv->stealth) {
576 ret = vmw_request_device(dev_priv);
577 if (unlikely(ret != 0))
578 return ret;
579 }
580 583
581 if (active) { 584 if (active) {
582 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -636,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
636 639
637 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
638 641
639 if (dev_priv->stealth) {
640 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
641 if (unlikely(ret != 0))
642 DRM_ERROR("Unable to clean VRAM on master drop.\n");
643 vmw_release_device(dev_priv);
644 }
645 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
646 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
647 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
648 645
649 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
650 vmw_fb_on(dev_priv);
651} 647}
652 648
653 649
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 50529a7f06fb..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,10 +39,10 @@
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41 41
42#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
45#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -96,6 +96,8 @@ struct vmw_surface {
96 struct drm_vmw_size *sizes; 96 struct drm_vmw_size *sizes;
97 uint32_t num_sizes; 97 uint32_t num_sizes;
98 98
99 bool scanout;
100
99 /* TODO so far just a extra pointer */ 101 /* TODO so far just a extra pointer */
100 struct vmw_cursor_snooper snooper; 102 struct vmw_cursor_snooper snooper;
101}; 103};
@@ -111,6 +113,7 @@ struct vmw_fifo_state {
111 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
112 bool using_bounce_buffer; 114 bool using_bounce_buffer;
113 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
114 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
115}; 118};
116 119
@@ -211,7 +214,7 @@ struct vmw_private {
211 * Fencing and IRQs. 214 * Fencing and IRQs.
212 */ 215 */
213 216
214 uint32_t fence_seq; 217 atomic_t fence_seq;
215 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
216 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
217 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
@@ -389,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
389 uint32_t *sequence); 392 uint32_t *sequence);
390extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
391extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
392 396
393/** 397/**
394 * TTM glue - vmwgfx_ttm_glue.c 398 * TTM glue - vmwgfx_ttm_glue.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f6432be8b..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index f7d5f70b52dd..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -29,6 +29,25 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false;
40
41 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42 if (hwversion == 0)
43 return false;
44
45 if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46 return false;
47
48 return true;
49}
50
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{ 52{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
55 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
57 76
77 mutex_init(&fifo->fifo_mutex);
58 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
59 79
60 /* 80 /*
@@ -98,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 118 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
100 120
101 dev_priv->fence_seq = dev_priv->last_read_sequence; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
102 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
103 123
104 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -264,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
264 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
265 int ret; 285 int ret;
266 286
267 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
268 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
269 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
270 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -332,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
332 } 352 }
333out_err: 353out_err:
334 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
335 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
336 return NULL; 356 return NULL;
337} 357}
338 358
@@ -407,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
407 427
408 } 428 }
409 429
430 down_write(&fifo_state->rwsem);
410 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
411 next_cmd += bytes; 432 next_cmd += bytes;
412 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -418,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
418 if (reserveable) 439 if (reserveable)
419 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
420 mb(); 441 mb();
421 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
422 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
423} 445}
424 446
425int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -432,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
432 454
433 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
434 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
435 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
436 *sequence = dev_priv->fence_seq;
437 up_write(&fifo_state->rwsem);
438 ret = -ENOMEM; 458 ret = -ENOMEM;
439 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
440 false, 3*HZ); 460 false, 3*HZ);
@@ -442,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
442 } 462 }
443 463
444 do { 464 do {
445 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
446 } while (*sequence == 0); 466 } while (*sequence == 0);
447 467
448 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5fa6a4ed238a..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
43 param->value = vmw_overlay_num_free_overlays(dev_priv); 43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break; 44 break;
45 case DRM_VMW_PARAM_3D: 45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; 46 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
47 break; 47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 686692de209a..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -707,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
707 if (ret) 707 if (ret)
708 goto try_dmabuf; 708 goto try_dmabuf;
709 709
710 if (!surface->scanout)
711 goto err_not_scanout;
712
710 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 713 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
711 mode_cmd->width, mode_cmd->height); 714 mode_cmd->width, mode_cmd->height);
712 715
@@ -740,6 +743,13 @@ try_dmabuf:
740 } 743 }
741 744
742 return &vfb->base; 745 return &vfb->base;
746
747err_not_scanout:
748 DRM_ERROR("surface not marked as scanout\n");
749 /* vmw_user_surface_lookup takes one ref */
750 vmw_surface_unreference(&surface);
751
752 return NULL;
743} 753}
744 754
745static int vmw_kms_fb_changed(struct drm_device *dev) 755static int vmw_kms_fb_changed(struct drm_device *dev)
@@ -759,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
759 769
760 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
761 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
762 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
763 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
764 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
765 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
766 776
767 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
768 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01db120efff..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
574 574
575 srf->flags = req->flags; 575 srf->flags = req->flags;
576 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0; 579 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,8 +600,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
600 goto out_err1; 601 goto out_err1;
601 602
602 603 if (srf->scanout &&
603 if (srf->flags & (1 << 9) &&
604 srf->num_sizes == 1 && 604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 && 605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 && 606 srf->sizes[0].height == 64 &&