aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c170
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c92
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c276
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c14
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c107
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c37
-rw-r--r--drivers/gpu/drm/radeon/r100.c19
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c180
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c165
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c75
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
100 files changed, 2104 insertions, 1133 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e050..305c59003963 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
66 66
67 If M is selected, the module will be called radeon. 67 If M is selected, the module will be called radeon.
68 68
69source "drivers/gpu/drm/radeon/Kconfig"
70
69config DRM_I810 71config DRM_I810
70 tristate "Intel I810" 72 tristate "Intel I810"
71 depends on DRM && AGP && AGP_INTEL 73 depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
113 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask); 116 (unsigned long long)gart_info->table_mask);
117 ret = 1; 117 ret = 1;
118 goto done; 118 goto done;
119 } 119 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index defcaf108460..f665b05592f3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
633 return NULL; 633 return NULL;
634 } 634 }
635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
636 printk(KERN_WARNING "integrated sync not supported\n"); 636 printk(KERN_WARNING "composite sync not supported\n");
637 return NULL;
638 } 637 }
639 638
640 /* it is incorrect if hsync/vsync width is zero */ 639 /* it is incorrect if hsync/vsync width is zero */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1c2b7d44ec05..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
389 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */ 390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */ 394 /* Display: Off; HSync: Off, VSync: On */
395 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
158 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
159 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
160 obj->size = size; 147 obj->size = size;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dceb..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
120 120
121const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
124 .has_hotplug = 1, 124 .has_hotplug = 1,
125}; 125};
126 126
@@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 179 pci_save_state(dev->pdev);
191 180
192 /* If KMS is active, we do the leavevt stuff here */ 181 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 183 int error = i915_gem_idle(dev);
184 if (error) {
195 dev_err(&dev->pdev->dev, 185 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 186 "GEM idle failed, resume might fail\n");
187 return error;
188 }
197 drm_irq_uninstall(dev); 189 drm_irq_uninstall(dev);
198 } 190 }
199 191
200 i915_save_state(dev); 192 i915_save_state(dev);
201 193
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
202 intel_opregion_free(dev, 1); 201 intel_opregion_free(dev, 1);
203 202
203 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0;
205}
206
207static int i915_suspend(struct drm_device *dev, pm_message_t state)
208{
209 int error;
210
211 if (!dev || !dev->dev_private) {
212 DRM_ERROR("dev: %p\n", dev);
213 DRM_ERROR("DRM not initialized, aborting suspend.\n");
214 return -ENODEV;
215 }
216
217 if (state.event == PM_EVENT_PRETHAW)
218 return 0;
219
220 error = i915_drm_freeze(dev);
221 if (error)
222 return error;
223
224 i915_drm_suspend(dev);
225
204 if (state.event == PM_EVENT_SUSPEND) { 226 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 227 /* Shut down the device */
206 pci_disable_device(dev->pdev); 228 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 229 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 230 }
209 231
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 232 return 0;
214} 233}
215 234
216static int i915_resume(struct drm_device *dev) 235static int i915_drm_thaw(struct drm_device *dev)
217{ 236{
218 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 238 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224
225 i915_restore_state(dev);
226
227 intel_opregion_init(dev, 1);
228 239
229 /* KMS EnterVT equivalent */ 240 /* KMS EnterVT equivalent */
230 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
231 mutex_lock(&dev->struct_mutex); 242 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 243 dev_priv->mm.suspended = 0;
233 244
234 ret = i915_gem_init_ringbuffer(dev); 245 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 246 mutex_unlock(&dev->struct_mutex);
238 247
239 drm_irq_install(dev); 248 drm_irq_install(dev);
240 } 249
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 250 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 251 drm_helper_resume_force_mode(dev);
244 } 252 }
245 253
246 dev_priv->modeset_on_lid = 0; 254 dev_priv->modeset_on_lid = 0;
247 255
248 return ret; 256 return error;
257}
258
259static int i915_resume(struct drm_device *dev)
260{
261 if (pci_enable_device(dev->pdev))
262 return -EIO;
263
264 pci_set_master(dev->pdev);
265
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev);
249} 271}
250 272
251/** 273/**
@@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 408 drm_put_dev(dev);
387} 409}
388 410
389static int 411static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 412{
392 struct drm_device *dev = pci_get_drvdata(pdev); 413 struct pci_dev *pdev = to_pci_dev(dev);
414 struct drm_device *drm_dev = pci_get_drvdata(pdev);
415 int error;
393 416
394 return i915_suspend(dev, state); 417 if (!drm_dev || !drm_dev->dev_private) {
395} 418 dev_err(dev, "DRM not initialized, aborting suspend.\n");
419 return -ENODEV;
420 }
396 421
397static int 422 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 423 if (error)
399{ 424 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 425
402 return i915_resume(dev); 426 i915_drm_suspend(drm_dev);
403}
404 427
405static int 428 pci_disable_device(pdev);
406i915_pm_suspend(struct device *dev) 429 pci_set_power_state(pdev, PCI_D3hot);
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409}
410 430
411static int 431 return 0;
412i915_pm_resume(struct device *dev)
413{
414 return i915_pci_resume(to_pci_dev(dev));
415} 432}
416 433
417static int 434static int i915_pm_resume(struct device *dev)
418i915_pm_freeze(struct device *dev)
419{ 435{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); 436 struct pci_dev *pdev = to_pci_dev(dev);
437 struct drm_device *drm_dev = pci_get_drvdata(pdev);
438
439 return i915_resume(drm_dev);
421} 440}
422 441
423static int 442static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 443{
426 /* thaw during hibernate, do nothing! */ 444 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
446
447 if (!drm_dev || !drm_dev->dev_private) {
448 dev_err(dev, "DRM not initialized, aborting suspend.\n");
449 return -ENODEV;
450 }
451
452 return i915_drm_freeze(drm_dev);
428} 453}
429 454
430static int 455static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 456{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 457 struct pci_dev *pdev = to_pci_dev(dev);
458 struct drm_device *drm_dev = pci_get_drvdata(pdev);
459
460 return i915_drm_thaw(drm_dev);
434} 461}
435 462
436static int 463static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 464{
439 return i915_pci_resume(to_pci_dev(dev)); 465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472
473 return error;
440} 474}
441 475
442const struct dev_pm_ops i915_pm_ops = { 476const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 479 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 480 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 481 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 482 .restore = i915_pm_resume,
449}; 483};
450 484
451static struct vm_operations_struct i915_gem_vm_ops = { 485static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c1669488b5a..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
@@ -872,7 +883,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
872void i915_gem_detach_phys_object(struct drm_device *dev, 883void i915_gem_detach_phys_object(struct drm_device *dev,
873 struct drm_gem_object *obj); 884 struct drm_gem_object *obj);
874void i915_gem_free_all_phys_object(struct drm_device *dev); 885void i915_gem_free_all_phys_object(struct drm_device *dev);
875int i915_gem_object_get_pages(struct drm_gem_object *obj); 886int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
876void i915_gem_object_put_pages(struct drm_gem_object *obj); 887void i915_gem_object_put_pages(struct drm_gem_object *obj);
877void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 888void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
878void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 889void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c67924ca80c..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2100,8 +2088,8 @@ static int
2100i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2101{ 2089{
2102 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2103 uint32_t seqno;
2104 int ret; 2091 int ret;
2092 uint32_t seqno;
2105 bool lists_empty; 2093 bool lists_empty;
2106 2094
2107 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2123 if (ret) 2111 if (ret)
2124 return ret; 2112 return ret;
2125 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2126 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2127 if (ret) 2117 if (ret)
2128 return ret; 2118 return ret;
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2230} 2220}
2231 2221
2232int 2222int
2233i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2234{ 2225{
2235 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2236 int page_count, i; 2227 int page_count, i;
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2257 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2258 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2259 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2260 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2261 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2262 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2581 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2582 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2583 int ret; 2577 int ret;
2584 2578
2585 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2624 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2625#endif 2619#endif
2626 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2627 i915_gem_object_set_page_gfp_mask (obj,
2628 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2629 }
2630 ret = i915_gem_object_get_pages(obj);
2631 if (retry_alloc) {
2632 i915_gem_object_set_page_gfp_mask (obj,
2633 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2634 }
2635 if (ret) { 2621 if (ret) {
2636 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2637 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2641 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2642 if (ret) { 2628 if (ret) {
2643 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2644 if (! retry_alloc) { 2630 if (gfpmask) {
2645 retry_alloc = true; 2631 gfpmask = 0;
2646 goto search_free; 2632 goto search_free;
2647 } 2633 }
2648 2634
2649 return ret; 2635 return ret;
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2721 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2722 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2723 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2724 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2725 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2726 2712
2727 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3584,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3584 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3585 int ret = 0; 3571 int ret = 0;
3586 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3587 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3588 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3589 int unwritten; 3578 int unwritten;
@@ -3673,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3673 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3674 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3675 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3676 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3677 int ret = 0, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3678 uint64_t exec_offset; 3667 uint64_t exec_offset;
3679 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
@@ -3699,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3699 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3700 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3701 GFP_KERNEL); 3690 GFP_KERNEL);
3702 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3703 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3704 3695
3705 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3706 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3742,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3742 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3743 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3744 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3745 ret = -EBADF; 3738 ret = -EBADF;
3746 goto err; 3739 goto err;
3747 } 3740 }
@@ -3750,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3750 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3751 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3752 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3753 ret = -EBADF; 3748 ret = -EBADF;
3754 goto err; 3749 goto err;
3755 } 3750 }
@@ -3863,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3863 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3864 dev->invalidate_domains, 3859 dev->invalidate_domains,
3865 dev->flush_domains); 3860 dev->flush_domains);
3866 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3867 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3868 dev->flush_domains); 3863 dev->flush_domains);
3869 } 3864 }
3870 3865
3871 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3872 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3873 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3874 3870
3875 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3876 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3877 obj->read_domains, 3879 obj->read_domains,
3878 old_write_domain); 3880 old_write_domain);
@@ -3946,6 +3948,7 @@ err:
3946 3948
3947 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3948 3950
3951pre_mutex_err:
3949 /* Copy the updated relocations out regardless of current error 3952 /* Copy the updated relocations out regardless of current error
3950 * state. Failure to update the relocs would mean that the next 3953 * state. Failure to update the relocs would mean that the next
3951 * time userland calls execbuf, it would do so with presumed offset 3954 * time userland calls execbuf, it would do so with presumed offset
@@ -3960,7 +3963,6 @@ err:
3960 ret = ret2; 3963 ret = ret2;
3961 } 3964 }
3962 3965
3963pre_mutex_err:
3964 drm_free_large(object_list); 3966 drm_free_large(object_list);
3965 kfree(cliprects); 3967 kfree(cliprects);
3966 3968
@@ -4383,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4383 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4384 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4385 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4386 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4387 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4388 4391
@@ -4834,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4834 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4835 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4836 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4837 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4838 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4839 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4946,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4946 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4947 return; 4951 return;
4948 4952
4949 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4950 if (ret) 4954 if (ret)
4951 goto out; 4955 goto out;
4952 4956
@@ -5004,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
5004 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
5005 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
5006 5010
5007 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
5008 if (ret) { 5012 if (ret) {
5009 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
5010 goto out; 5014 goto out;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
316
317 if (de_iir & DE_PLANEB_FLIP_DONE) {
318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
321
322 if (de_iir & DE_PIPEA_VBLANK)
323 drm_handle_vblank(dev, 0);
324
325 if (de_iir & DE_PIPEB_VBLANK)
326 drm_handle_vblank(dev, 1);
327
312 /* check event from PCH */ 328 /* check event from PCH */
313 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
314 (pch_iir & SDE_HOTPLUG_MASK)) { 330 (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
844 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
845 return -EINVAL; 861 return -EINVAL;
846 862
847 if (IS_IRONLAKE(dev))
848 return 0;
849
850 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
851 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
852 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
854 else 870 else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 unsigned long irqflags; 883 unsigned long irqflags;
868 884
869 if (IS_IRONLAKE(dev))
870 return;
871
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
874 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
875 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
876 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
877} 894}
878 895
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1015{ 1032{
1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1017 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1018 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1019 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1020 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1021 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1022 1040
1023 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1024 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1025 1043
1026 /* should always can generate irq */ 1044 /* should always can generate irq */
1027 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1638 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1640 1759
1760 drm_vblank_off(dev, pipe);
1641 /* Disable display plane */ 1761 /* Disable display plane */
1642 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1643 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2522 } 2646 }
2523 2647
2524 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2562 srwm = 1; 2686 srwm = 1;
2563 srwm &= 0x3f; 2687 srwm &= 0x3f;
2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2565 } 2693 }
2566 2694
2567 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2630 if (srwm < 0) 2758 if (srwm < 0)
2631 srwm = 1; 2759 srwm = 1;
2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2633 } 2765 }
2634 2766
2635 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3949struct intel_unpin_work { 4081struct intel_unpin_work {
3950 struct work_struct work; 4082 struct work_struct work;
3951 struct drm_device *dev; 4083 struct drm_device *dev;
3952 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3953 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3954 int pending; 4087 int pending;
3955}; 4088};
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3960 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3961 4094
3962 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3963 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3964 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3965 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3966 kfree(work); 4100 kfree(work);
3967} 4101}
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3984 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
3985 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3986 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
3987 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
3988 return; 4128 return;
3989 } 4129 }
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4004 4144
4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4006 4146
4007 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4008 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4009 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4010 schedule_work(&work->work); 4153 schedule_work(&work->work);
4011} 4154}
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4018 unsigned long flags; 4161 unsigned long flags;
4019 4162
4020 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4021 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4022 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4023 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4024} 4170}
4025 4171
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4036 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4037 unsigned long flags; 4183 unsigned long flags;
4038 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4039 RING_LOCALS; 4186 RING_LOCALS;
4040 4187
4041 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4047 work->event = event; 4194 work->event = event;
4048 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4049 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4050 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4051 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4052 4199
4053 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4054 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4055 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4056 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4057 kfree(work); 4205 kfree(work);
4058 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4066 4214
4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4068 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4069 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4070 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4071 return ret; 4222 return ret;
4072 } 4223 }
4073 4224
4074 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4075 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4076 4228
4077 crtc->fb = fb; 4229 crtc->fb = fb;
4078 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4079 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4080 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4081 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4082 4235
4083 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4084 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4086 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4087 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4088 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4089 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4090 } else { 4244 } else {
4091 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4092 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
611 { 611 {
612 .ident = "Samsung SX20S", 612 .ident = "Samsung SX20S",
613 .matches = { 613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 }, 616 },
617 }, 617 },
@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
623 }, 623 },
624 }, 624 },
625 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
626 .ident = "PC-81005", 633 .ident = "PC-81005",
627 .matches = { 634 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
630 }, 637 },
631 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
632 { } 646 { }
633}; 647};
634 648
@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
643{ 657{
644 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
645 659
646 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
647 status = connector_status_disconnected; 661 status = connector_status_disconnected;
648 662
649 return status; 663 return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2347 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2348 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2349 2357
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..2cd0fad17dac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
310 struct drm_device *dev = bios->dev; 310 struct drm_device *dev = bios->dev;
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 313 if (reg & 0x2 ||
314 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
315 reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 return 0; 316
317 } 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 /*
319 * Warn on C51 regs that have not been verified accessible in
320 * mmiotracing
321 */
322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
325 reg); 321 reg);
326 322
327 /* Trust the init scripts on G80 */ 323 if (reg >= (8*1024*1024)) {
328 if (dev_priv->card_type >= NV_50) 324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
329 return 1; 325 return 0;
330
331 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
332 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
333 return 1;
334 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
335 return 1;
336 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
337 return 1;
338 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
339 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
340 return 1;
341 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
342 WITHIN(reg, 0xc000, 0x48))
343 return 1;
344 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
347 if (reg == 0x00011014 || reg == 0x00020328)
348 return 1;
349 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
350 return 1;
351 } 326 }
352 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
353 return 1;
354 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
355 return 1;
356 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
357 return 1;
358 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
359 return 1;
360 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
361 return 1;
362 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
363 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
364 return 1;
365 #undef WITHIN
366
367 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
368 327
369 return 0; 328 return 1;
370} 329}
371 330
372static bool 331static bool
@@ -1906,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1906 1865
1907 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1908 1867
1909 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1910 return 1; 1869 return 1;
1911 1870
1912 /* 1871 /*
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3196 } 3155 }
3197#ifdef __powerpc__ 3156#ifdef __powerpc__
3198 /* Powerbook specific quirks */ 3157 /* Powerbook specific quirks */
3199 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3158 if ((dev->pci_device & 0xffff) == 0x0179 ||
3200 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3159 (dev->pci_device & 0xffff) == 0x0189 ||
3201 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3160 (dev->pci_device & 0xffff) == 0x0329) {
3202 if (script == LVDS_PANEL_ON) { 3161 if (script == LVDS_RESET) {
3203 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3162 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3204 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3163
3205 } 3164 } else if (script == LVDS_PANEL_ON) {
3206 if (script == LVDS_PANEL_OFF) { 3165 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3207 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3166 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3208 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3167 | (1 << 31));
3168 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3169 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3170
3171 } else if (script == LVDS_PANEL_OFF) {
3172 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3173 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3174 & ~(1 << 31));
3175 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3176 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3209 } 3177 }
3210 } 3178 }
3211#endif 3179#endif
@@ -3797,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3797 */ 3765 */
3798 3766
3799 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3800 struct init_exec iexec = {true, false};
3801 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3802 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3803 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3877,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3877 } 3844 }
3878 } 3845 }
3879 3846
3880 bios->display.output = dcbent;
3881
3882 if (pxclk == 0) { 3847 if (pxclk == 0) {
3883 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3884 if (!script) { 3849 if (!script) {
@@ -3887,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3887 } 3852 }
3888 3853
3889 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3890 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3891 } else 3856 } else
3892 if (pxclk == -1) { 3857 if (pxclk == -1) {
3893 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3897,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3897 } 3862 }
3898 3863
3899 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3900 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3901 } else 3866 } else
3902 if (pxclk == -2) { 3867 if (pxclk == -2) {
3903 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3910,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3910 } 3875 }
3911 3876
3912 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3913 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3914 } else 3879 } else
3915 if (pxclk > 0) { 3880 if (pxclk > 0) {
3916 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3922,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3922 } 3887 }
3923 3888
3924 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3925 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3926 } else 3891 } else
3927 if (pxclk < 0) { 3892 if (pxclk < 0) {
3928 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3934,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3934 } 3899 }
3935 3900
3936 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3937 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3938 } 3903 }
3939 3904
3940 return 0; 3905 return 0;
@@ -5434,52 +5399,49 @@ static bool
5434parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5436{ 5401{
5437 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5402 switch (conn & 0x0000000f) {
5438 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5403 case 0:
5439 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5404 entry->type = OUTPUT_ANALOG;
5440 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5405 break;
5441 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5406 case 1:
5442 conn != 0xf2205004 && conn != 0xf2209004) { 5407 entry->type = OUTPUT_TV;
5443 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5408 break;
5444 5409 case 2:
5445 /* cause output setting to fail for !TV, so message is seen */ 5410 case 3:
5446 if ((conn & 0xf) != 0x1)
5447 dcb->entries = 0;
5448
5449 return false;
5450 }
5451 /* most of the below is a "best guess" atm */
5452 entry->type = conn & 0xf;
5453 if (entry->type == 2)
5454 /* another way of specifying straps based lvds... */
5455 entry->type = OUTPUT_LVDS; 5411 entry->type = OUTPUT_LVDS;
5456 if (entry->type == 4) { /* digital */ 5412 break;
5457 if (conn & 0x10) 5413 case 4:
5458 entry->type = OUTPUT_LVDS; 5414 switch ((conn & 0x000000f0) >> 4) {
5459 else 5415 case 0:
5460 entry->type = OUTPUT_TMDS; 5416 entry->type = OUTPUT_TMDS;
5417 break;
5418 case 1:
5419 entry->type = OUTPUT_LVDS;
5420 break;
5421 default:
5422 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5423 (conn & 0x000000f0) >> 4);
5424 return false;
5425 }
5426 break;
5427 default:
5428 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5429 return false;
5461 } 5430 }
5462 /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5431
5463 entry->i2c_index = (conn >> 14) & 0xf; 5432 entry->i2c_index = (conn & 0x0003c000) >> 14;
5464 /* raw heads field is in range 0-1, so move to 1-2 */ 5433 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5465 entry->heads = ((conn >> 18) & 0x7) + 1; 5434 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5466 entry->location = (conn >> 21) & 0xf; 5435 entry->location = (conn & 0x01e00000) >> 21;
5467 /* unused: entry->bus = (conn >> 25) & 0x7; */ 5436 entry->bus = (conn & 0x0e000000) >> 25;
5468 /* set or to be same as heads -- hopefully safe enough */
5469 entry->or = entry->heads;
5470 entry->duallink_possible = false; 5437 entry->duallink_possible = false;
5471 5438
5472 switch (entry->type) { 5439 switch (entry->type) {
5473 case OUTPUT_ANALOG: 5440 case OUTPUT_ANALOG:
5474 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5441 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5475 break; 5442 break;
5476 case OUTPUT_LVDS: 5443 case OUTPUT_TV:
5477 /* 5444 entry->tvconf.has_component_output = false;
5478 * This is probably buried in conn's unknown bits.
5479 * This will upset EDID-ful models, if they exist
5480 */
5481 entry->lvdsconf.use_straps_for_mode = true;
5482 entry->lvdsconf.use_power_scripts = true;
5483 break; 5445 break;
5484 case OUTPUT_TMDS: 5446 case OUTPUT_TMDS:
5485 /* 5447 /*
@@ -5488,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5488 */ 5450 */
5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5451 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5490 break; 5452 break;
5491 case OUTPUT_TV: 5453 case OUTPUT_LVDS:
5492 entry->tvconf.has_component_output = false; 5454 if ((conn & 0x00003f00) != 0x10)
5455 entry->lvdsconf.use_straps_for_mode = true;
5456 entry->lvdsconf.use_power_scripts = true;
5457 break;
5458 default:
5493 break; 5459 break;
5494 } 5460 }
5495 5461
@@ -5564,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5564 dcb->entries = newentries; 5530 dcb->entries = newentries;
5565} 5531}
5566 5532
5567static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5533static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5568{ 5535{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private;
5569 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5537 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5570 struct parsed_dcb *dcb; 5538 struct parsed_dcb *dcb;
5571 uint16_t dcbptr, i2ctabptr = 0; 5539 uint16_t dcbptr = 0, i2ctabptr = 0;
5572 uint8_t *dcbtable; 5540 uint8_t *dcbtable;
5573 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5574 bool configblock = true; 5542 bool configblock = true;
@@ -5579,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
5579 dcb->entries = 0; 5547 dcb->entries = 0;
5580 5548
5581 /* get the offset from 0x36 */ 5549 /* get the offset from 0x36 */
5582 dcbptr = ROM16(bios->data[0x36]); 5550 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]);
5552 if (dcbptr == 0x0000)
5553 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5554 }
5583 5555
5556 /* this situation likely means a really old card, pre DCB */
5584 if (dcbptr == 0x0) { 5557 if (dcbptr == 0x0) {
5585 NV_WARN(dev, "No output data (DCB) found in BIOS, " 5558 NV_INFO(dev, "Assuming a CRT output exists\n");
5586 "assuming a CRT output exists\n");
5587 /* this situation likely means a really old card, pre DCB */
5588 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5559 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5589 5560
5590 if (nv04_tv_identify(dev, 5561 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5591 bios->legacy.i2c_indices.tv) >= 0)
5592 fabricate_tv_output(dcb, twoHeads); 5562 fabricate_tv_output(dcb, twoHeads);
5593 5563
5594 return 0; 5564 return 0;
@@ -5891,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5891 struct drm_nouveau_private *dev_priv = dev->dev_private; 5861 struct drm_nouveau_private *dev_priv = dev->dev_private;
5892 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5893 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5864 unsigned long flags;
5894 5865
5866 spin_lock_irqsave(&bios->lock, flags);
5895 bios->display.output = dcbent; 5867 bios->display.output = dcbent;
5896 parse_init_table(bios, table, &iexec); 5868 parse_init_table(bios, table, &iexec);
5897 bios->display.output = NULL; 5869 bios->display.output = NULL;
5870 spin_unlock_irqrestore(&bios->lock, flags);
5898} 5871}
5899 5872
5900static bool NVInitVBIOS(struct drm_device *dev) 5873static bool NVInitVBIOS(struct drm_device *dev)
@@ -5903,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5903 struct nvbios *bios = &dev_priv->VBIOS; 5876 struct nvbios *bios = &dev_priv->VBIOS;
5904 5877
5905 memset(bios, 0, sizeof(struct nvbios)); 5878 memset(bios, 0, sizeof(struct nvbios));
5879 spin_lock_init(&bios->lock);
5906 bios->dev = dev; 5880 bios->dev = dev;
5907 5881
5908 if (!NVShadowVBIOS(dev, bios->data)) 5882 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..68446fd4146b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 spinlock_t lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e342a418d434..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
65 65
66 /* 66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to 68 * align to to that as well as the page size. Align the size to the
69 * avoid corruption of other buffer objects. 69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
70 */ 72 */
71 if (dev_priv->card_type == NV_50) { 73 if (dev_priv->card_type == NV_50) {
72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
77 case 0x2800: 79 case 0x2800:
78 case 0x4800: 80 case 0x4800:
79 case 0x7a00: 81 case 0x7a00:
80 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) { 82 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) { 83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size; 84 *align = 12 * i * block_size;
85 if (!(*align % 65536)) 85 if (!(*align % 65536))
86 break; 86 break;
87 } 87 }
88 } else { 88 } else {
89 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) { 89 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size; 90 *align = 8 * i * block_size;
92 if (!(*align % 65536)) 91 if (!(*align % 65536))
93 break; 92 break;
94 } 93 }
95 } 94 }
95 *size = roundup(*size, *align);
96 break; 96 break;
97 default: 97 default:
98 break; 98 break;
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 469
470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
471 evict, no_wait, new_mem); 471 evict, no_wait, new_mem);
472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
472 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
473 return ret; 475 return ret;
474} 476}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
278 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
279 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
280 280
281 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
282 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
284 pgraph->fifo_access(dev, true);
285 }
286 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
287 286
288 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
289 pfifo->disable(dev); 288 pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
24 * 24 *
25 */ 25 */
26 26
27#include <acpi/button.h>
28
27#include "drmP.h" 29#include "drmP.h"
28#include "drm_edid.h" 30#include "drm_edid.h"
29#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32
30#include "nouveau_reg.h" 33#include "nouveau_reg.h"
31#include "nouveau_drv.h" 34#include "nouveau_drv.h"
32#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
83static void 86static void
84nouveau_connector_destroy(struct drm_connector *drm_connector) 87nouveau_connector_destroy(struct drm_connector *drm_connector)
85{ 88{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 89 struct nouveau_connector *nv_connector =
87 struct drm_device *dev = connector->base.dev; 90 nouveau_connector(drm_connector);
91 struct drm_device *dev;
88 92
89 NV_DEBUG_KMS(dev, "\n"); 93 if (!nv_connector)
90
91 if (!connector)
92 return; 94 return;
93 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
99 kfree(nv_connector->edid);
94 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector); 102 kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected;
245#endif
236 nouveau_connector_set_encoder(connector, nv_encoder); 246 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected; 247 return connector_status_connected;
238 } 248 }
239 249
250 /* Cleanup the previous EDID block. */
251 if (nv_connector->edid) {
252 drm_mode_connector_update_edid_property(connector, NULL);
253 kfree(nv_connector->edid);
254 nv_connector->edid = NULL;
255 }
256
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 257 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) { 258 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags); 259 nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
247 if (!nv_connector->edid) { 264 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 265 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector)); 266 drm_get_connector_name(connector));
250 return connector_status_disconnected; 267 goto detect_analog;
251 } 268 }
252 269
253 if (nv_encoder->dcb->type == OUTPUT_DP && 270 if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
281 return connector_status_connected; 298 return connector_status_connected;
282 } 299 }
283 300
301detect_analog:
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 302 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder) 303 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 304 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
687 */ 705 */
688 if (!nv_connector->edid && !nv_connector->native_mode && 706 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) { 707 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid = 708 struct edid *edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev); 709 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) {
711 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
712 *(nv_connector->edid) = *edid;
713 }
692 } 714 }
693 715
694 if (!nv_connector->edid) 716 if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 7afbe8b40d51..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
126 chan->dma.cur += nr_dwords; 126 chan->dma.cur += nr_dwords;
127} 127}
128 128
129static inline bool 129/* Fetch and adjust GPU GET pointer
130READ_GET(struct nouveau_channel *chan, uint32_t *get) 130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
131{ 138{
132 uint32_t val; 139 uint32_t val;
133 140
134 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
135 if (val < chan->pushbuf_base || 142
136 val > chan->pushbuf_base + (chan->dma.max << 2)) { 143 /* reset counter as long as GET is still advancing, this is
137 /* meaningless to dma_wait() except to know whether the 144 * to avoid misdetecting a GPU lockup if the GPU happens to
138 * GPU has stalled or not 145 * just be processing an operation that takes a long time
139 */ 146 */
140 *get = val; 147 if (val != *prev_get) {
141 return false; 148 *prev_get = val;
149 *timeout = 0;
150 }
151
152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
142 } 156 }
143 157
144 *get = (val - chan->pushbuf_base) >> 2; 158 if (val < chan->pushbuf_base ||
145 return true; 159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
146} 163}
147 164
148int 165int
149nouveau_dma_wait(struct nouveau_channel *chan, int size) 166nouveau_dma_wait(struct nouveau_channel *chan, int size)
150{ 167{
151 uint32_t get, prev_get = 0, cnt = 0; 168 uint32_t prev_get = 0, cnt = 0;
152 bool get_valid; 169 int get;
153 170
154 while (chan->dma.free < size) { 171 while (chan->dma.free < size) {
155 /* reset counter as long as GET is still advancing, this is 172 get = READ_GET(chan, &prev_get, &cnt);
156 * to avoid misdetecting a GPU lockup if the GPU happens to 173 if (unlikely(get == -EBUSY))
157 * just be processing an operation that takes a long time 174 return -EBUSY;
158 */
159 get_valid = READ_GET(chan, &get);
160 if (get != prev_get) {
161 prev_get = get;
162 cnt = 0;
163 }
164
165 if ((++cnt & 0xff) == 0) {
166 DRM_UDELAY(1);
167 if (cnt > 100000)
168 return -EBUSY;
169 }
170 175
171 /* loop until we have a usable GET pointer. the value 176 /* loop until we have a usable GET pointer. the value
172 * we read from the GPU may be outside the main ring if 177 * we read from the GPU may be outside the main ring if
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
177 * from the SKIPS area, so the code below doesn't have to deal 182 * from the SKIPS area, so the code below doesn't have to deal
178 * with some fun corner cases. 183 * with some fun corner cases.
179 */ 184 */
180 if (!get_valid || get < NOUVEAU_DMA_SKIPS) 185 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
181 continue; 186 continue;
182 187
183 if (get <= chan->dma.cur) { 188 if (get <= chan->dma.cur) {
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
203 * after processing the currently pending commands. 208 * after processing the currently pending commands.
204 */ 209 */
205 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211
212 /* wait for GET to depart from the skips area.
213 * prevents writing GET==PUT and causing a race
214 * condition that causes us to think the GPU is
215 * idle when it's not.
216 */
217 do {
218 get = READ_GET(chan, &prev_get, &cnt);
219 if (unlikely(get == -EBUSY))
220 return -EBUSY;
221 if (unlikely(get == -EINVAL))
222 continue;
223 } while (get <= NOUVEAU_DMA_SKIPS);
206 WRITE_PUT(NOUVEAU_DMA_SKIPS); 224 WRITE_PUT(NOUVEAU_DMA_SKIPS);
207 225
208 /* we're now submitting commands at the start of 226 /* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY; 493 ret = -EBUSY;
494 goto out;
494 } 495 }
495 496
496 udelay(400); 497 udelay(400);
@@ -502,6 +503,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 } 503 }
503 504
504 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
505 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -71,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1; 71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73 73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
74MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 026419fe8791..5445cefdd03e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -509,6 +509,8 @@ struct drm_nouveau_private {
509 void __iomem *ramin; 509 void __iomem *ramin;
510 uint32_t ramin_size; 510 uint32_t ramin_size;
511 511
512 struct nouveau_bo *vga_ram;
513
512 struct workqueue_struct *wq; 514 struct workqueue_struct *wq;
513 struct work_struct irq_work; 515 struct work_struct irq_work;
514 516
@@ -675,6 +677,9 @@ extern char *nouveau_tv_norm;
675extern int nouveau_reg_debug; 677extern int nouveau_reg_debug;
676extern char *nouveau_vbios; 678extern char *nouveau_vbios;
677extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid;
681extern int nouveau_nofbaccel;
682extern int nouveau_noaccel;
678 683
679/* nouveau_state.c */ 684/* nouveau_state.c */
680extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 685extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
107 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
108}; 108};
109 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
110static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 139 u16 blue, int regno)
112{ 140{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
267 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
268 296
269 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
270 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
271 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
272 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
273 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
274 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
316 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
317 par->dev = dev; 349 par->dev = dev;
318 350
319 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
320 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
321 case NV_50: 353 case NV_50:
322 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
323 break; 356 break;
324 default: 357 default:
325 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
326 break; 360 break;
327 }; 361 };
328 } 362 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
46void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2009db2426c3..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -321,6 +321,7 @@ retry:
321 else { 321 else {
322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
323 b->valid_domains); 323 b->valid_domains);
324 list_add_tail(&nvbo->entry, &op->both_list);
324 validate_fini(op, NULL); 325 validate_fini(op, NULL);
325 return -EINVAL; 326 return -EINVAL;
326 } 327 }
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
466static int 467static int
467nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
468 struct drm_nouveau_gem_pushbuf_bo *bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo,
469 int nr_relocs, uint64_t ptr_relocs, 470 unsigned nr_relocs, uint64_t ptr_relocs,
470 int nr_dwords, int first_dword, 471 unsigned nr_dwords, unsigned first_dword,
471 uint32_t *pushbuf, bool is_iomem) 472 uint32_t *pushbuf, bool is_iomem)
472{ 473{
473 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
474 struct drm_device *dev = chan->dev; 475 struct drm_device *dev = chan->dev;
475 int ret = 0, i; 476 int ret = 0;
477 unsigned i;
476 478
477 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
478 if (IS_ERR(reloc)) 480 if (IS_ERR(reloc))
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
667 } 669 }
668 pbbo = nouveau_gem_object(gem); 670 pbbo = nouveau_gem_object(gem);
669 671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
670 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
671 chan->fence.sequence); 685 chan->fence.sequence);
672 if (ret) { 686 if (ret) {
@@ -911,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
911 } 925 }
912 926
913 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
914 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
915 } else { 931 } else {
916 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
917 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 919a619ca7fa..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 497 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 498 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1; 499 unhandled = 1;
500 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
501 uint32_t v = nv_rd32(dev, 0x402000);
502 nv_wr32(dev, 0x402000, v);
503
504 /* dump the error anyway for now: it's useful for
505 Gallium development */
506 unhandled = 1;
486 } else { 507 } else {
487 unhandled = 1; 508 unhandled = 1;
488 } 509 }
@@ -559,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
559static void 580static void
560nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
561{ 582{
562 uint32_t status, nsource; 583 uint32_t status;
563 584
564 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
565 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
566 587
567 if (status & 0x00000001) { 588 if (status & 0x00000001) {
568 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
569 status &= ~0x00000001; 590 status &= ~0x00000001;
570 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
571 } 592 }
572 593
573 if (status & 0x00000010) { 594 if (status & 0x00000010) {
574 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
575 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
576 597
577 status &= ~0x00000010; 598 status &= ~0x00000010;
578 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
579 } 600 }
580 601
581 if (status & 0x00001000) { 602 if (status & 0x00001000) {
582 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
583 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
584 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
585 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
586 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
587 610
588 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
589 612
590 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
591 } 614 }
592 615
593 if (status & 0x00100000) { 616 if (status & 0x00100000) {
594 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
595 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
596 619
597 status &= ~0x00100000; 620 status &= ~0x00100000;
598 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
599 } 622 }
600 623
601 if (status & 0x00200000) { 624 if (status & 0x00200000) {
602 int r; 625 int r;
603 626
604 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
605 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
606 629
607 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
608 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
609 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
610 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
611 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
612 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
613 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
614 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
615 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
616 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
617 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
618 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
619 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
620 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
621 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 645 nv_rd32(dev, r));
623 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
624 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
625 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
626 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
627 658
628 if (status) { 659 if (status) {
629 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
630 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
631 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
632 664
633 { 665 {
634 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
635 667
636 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
637 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
638 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 670 nv_rd32(dev, 0x400500) | isb);
671 }
639 } 672 }
640 673
641 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
642} 676}
643 677
644static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fb9bdd6edf1f..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -383,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
383{ 383{
384 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385 385
386 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 386 nouveau_bo_unpin(dev_priv->vga_ram);
387 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 387 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
388 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
389 388
390 ttm_bo_device_release(&dev_priv->ttm.bdev); 389 ttm_bo_device_release(&dev_priv->ttm.bdev);
391 390
@@ -622,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
622 return ret; 621 return ret;
623 } 622 }
624 623
624 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
625 0, 0, true, true, &dev_priv->vga_ram);
626 if (ret == 0)
627 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
628 if (ret) {
629 NV_WARN(dev, "failed to reserve VGA memory\n");
630 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
631 }
632
625 /* GART */ 633 /* GART */
626#if !defined(__powerpc__) && !defined(__ia64__) 634#if !defined(__powerpc__) && !defined(__ia64__)
627 if (drm_device_is_agp(dev) && dev->agp) { 635 if (drm_device_is_agp(dev) && dev->agp) {
@@ -653,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
653 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
654 drm_get_resource_len(dev, 1), 662 drm_get_resource_len(dev, 1),
655 DRM_MTRR_WC); 663 DRM_MTRR_WC);
664
656 return 0; 665 return 0;
657} 666}
658 667
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 09b9a46dfc0e..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
310static unsigned int 310static unsigned int
311nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
312{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
313 if (state) 321 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
427 if (ret) 435 if (ret)
428 goto out_timer; 436 goto out_timer;
429 437
430 /* PGRAPH */ 438 if (nouveau_noaccel)
431 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
432 if (ret) 440 else {
433 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
434 445
435 /* PFIFO */ 446 /* PFIFO */
436 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
437 if (ret) 448 if (ret)
438 goto out_graph; 449 goto out_graph;
450 }
439 451
440 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall 453 * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
479out_irq: 491out_irq:
480 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
481out_fifo: 493out_fifo:
482 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
483out_graph: 496out_graph:
484 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
485out_fb: 499out_fb:
486 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
487out_timer: 501out_timer:
@@ -518,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
518 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
519 } 533 }
520 534
521 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
522 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
523 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
524 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
525 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
526 542
527 mutex_lock(&dev->struct_mutex); 543 mutex_lock(&dev->struct_mutex);
544 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 545 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
529 mutex_unlock(&dev->struct_mutex); 546 mutex_unlock(&dev->struct_mutex);
530 nouveau_sgdma_takedown(dev); 547 nouveau_sgdma_takedown(dev);
@@ -816,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
816 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
817 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
818 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
819 default: 845 default:
820 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
821 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
54 FIRE_RING(chan); 54 FIRE_RING(chan);
55} 55}
56 56
57static void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
88 FIRE_RING(chan); 88 FIRE_RING(chan);
89} 89}
90 90
91static void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
307 307
308 FIRE_RING(chan); 308 FIRE_RING(chan);
309 309
310 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
311 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
312 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
313 return 0; 310 return 0;
314} 311}
315 312
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
30 * of vram. For now, only reserve a small piece until we know 30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires. 31 * more about what each chipset requires.
32 */ 32 */
33 switch (dev_priv->chipset & 0xf0) { 33 switch (dev_priv->chipset) {
34 case 0x40: 34 case 0x40:
35 case 0x47: 35 case 0x47:
36 case 0x49: 36 case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
@@ -432,6 +435,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 435 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev; 436 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 437 struct drm_encoder *encoder;
438 uint32_t dac = 0, sor = 0;
435 439
436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 440 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 441
@@ -439,9 +443,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 443 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441 445
442 if (drm_helper_encoder_in_use(encoder)) 446 if (!drm_helper_encoder_in_use(encoder))
443 continue; 447 continue;
444 448
449 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
450 nv_encoder->dcb->type == OUTPUT_TV)
451 dac |= (1 << nv_encoder->or);
452 else
453 sor |= (1 << nv_encoder->or);
454 }
455
456 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
457 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
458
459 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
460 nv_encoder->dcb->type == OUTPUT_TV) {
461 if (dac & (1 << nv_encoder->or))
462 continue;
463 } else {
464 if (sor & (1 << nv_encoder->or))
465 continue;
466 }
467
445 nv_encoder->disconnect(nv_encoder); 468 nv_encoder->disconnect(nv_encoder);
446 } 469 }
447 470
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
46 FIRE_RING(chan); 46 FIRE_RING(chan);
47} 47}
48 48
49static void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
263 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
264 264
265 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
266 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
267 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
268 return 0; 265 return 0;
269} 266}
270 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 39caf167587d..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
272 return ret; 272 return ret;
273 ramfc = chan->ramfc->gpuobj; 273 ramfc = chan->ramfc->gpuobj;
274 274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
276 0, &chan->cache); 276 0, &chan->cache);
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
84 nv_wr32(dev, 0x400804, 0xc0000000); 84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000); 87 nv_wr32(dev, 0x401800, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000);
90 90
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
282 return 0; 288 return 0;
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nv_wr32(dev, 0x400500, fifo & ~1); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
288 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
289 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
290 nv_wr32(dev, 0x400500, fifo);
291 296
292 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
293 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,25 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{ 90{
91 struct drm_device *dev = encoder->dev; 91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
93 uint32_t val; 94 uint32_t val;
94 int or = nv_encoder->or; 95 int or = nv_encoder->or;
95 96
96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
105 nvenc->dcb->or != nv_encoder->dcb->or)
106 continue;
107
108 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
109 return;
110 }
111
98 /* wait for it to be done */ 112 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 113 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..2a3df5599ab4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -246,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
246 case ATOM_WS_ATTRIBUTES: 249 case ATOM_WS_ATTRIBUTES:
247 val = gctx->io_attr; 250 val = gctx->io_attr;
248 break; 251 break;
252 case ATOM_WS_REGPTR:
253 val = gctx->reg_block;
254 break;
249 default: 255 default:
250 val = ctx->ws[idx]; 256 val = ctx->ws[idx];
251 } 257 }
@@ -385,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 391 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
386} 392}
387 393
394static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
395{
396 uint32_t val = 0xCDCDCDCD;
397
398 switch (align) {
399 case ATOM_SRC_DWORD:
400 val = U32(*ptr);
401 (*ptr) += 4;
402 break;
403 case ATOM_SRC_WORD0:
404 case ATOM_SRC_WORD8:
405 case ATOM_SRC_WORD16:
406 val = U16(*ptr);
407 (*ptr) += 2;
408 break;
409 case ATOM_SRC_BYTE0:
410 case ATOM_SRC_BYTE8:
411 case ATOM_SRC_BYTE16:
412 case ATOM_SRC_BYTE24:
413 val = U8(*ptr);
414 (*ptr)++;
415 break;
416 }
417 return val;
418}
419
388static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 420static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
389 int *ptr, uint32_t *saved, int print) 421 int *ptr, uint32_t *saved, int print)
390{ 422{
@@ -482,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
482 case ATOM_WS_ATTRIBUTES: 514 case ATOM_WS_ATTRIBUTES:
483 gctx->io_attr = val; 515 gctx->io_attr = val;
484 break; 516 break;
517 case ATOM_WS_REGPTR:
518 gctx->reg_block = val;
519 break;
485 default: 520 default:
486 ctx->ws[idx] = val; 521 ctx->ws[idx] = val;
487 } 522 }
@@ -677,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
677 SDEBUG(" dst: "); 712 SDEBUG(" dst: ");
678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 713 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
679 SDEBUG(" src1: "); 714 SDEBUG(" src1: ");
680 src1 = atom_get_src(ctx, attr, ptr); 715 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
681 SDEBUG(" src2: "); 716 SDEBUG(" src2: ");
682 src2 = atom_get_src(ctx, attr, ptr); 717 src2 = atom_get_src(ctx, attr, ptr);
683 dst &= src1; 718 dst &= src1;
@@ -809,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 844 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
810} 845}
811 846
847static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
848{
849 uint8_t attr = U8((*ptr)++), shift;
850 uint32_t saved, dst;
851 int dptr = *ptr;
852 attr &= 0x38;
853 attr |= atom_def_dst[attr >> 3] << 6;
854 SDEBUG(" dst: ");
855 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
856 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
857 SDEBUG(" shift: %d\n", shift);
858 dst <<= shift;
859 SDEBUG(" dst: ");
860 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
861}
862
863static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
864{
865 uint8_t attr = U8((*ptr)++), shift;
866 uint32_t saved, dst;
867 int dptr = *ptr;
868 attr &= 0x38;
869 attr |= atom_def_dst[attr >> 3] << 6;
870 SDEBUG(" dst: ");
871 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
872 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
873 SDEBUG(" shift: %d\n", shift);
874 dst >>= shift;
875 SDEBUG(" dst: ");
876 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
877}
878
812static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 879static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
813{ 880{
814 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
818 attr |= atom_def_dst[attr >> 3] << 6; 885 attr |= atom_def_dst[attr >> 3] << 6;
819 SDEBUG(" dst: "); 886 SDEBUG(" dst: ");
820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
821 shift = U8((*ptr)++); 888 shift = atom_get_src(ctx, attr, ptr);
822 SDEBUG(" shift: %d\n", shift); 889 SDEBUG(" shift: %d\n", shift);
823 dst <<= shift; 890 dst <<= shift;
824 SDEBUG(" dst: "); 891 SDEBUG(" dst: ");
@@ -834,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
834 attr |= atom_def_dst[attr >> 3] << 6; 901 attr |= atom_def_dst[attr >> 3] << 6;
835 SDEBUG(" dst: "); 902 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 shift = U8((*ptr)++); 904 shift = atom_get_src(ctx, attr, ptr);
838 SDEBUG(" shift: %d\n", shift); 905 SDEBUG(" shift: %d\n", shift);
839 dst >>= shift; 906 dst >>= shift;
840 SDEBUG(" dst: "); 907 SDEBUG(" dst: ");
@@ -937,18 +1004,18 @@ static struct {
937 atom_op_or, ATOM_ARG_FB}, { 1004 atom_op_or, ATOM_ARG_FB}, {
938 atom_op_or, ATOM_ARG_PLL}, { 1005 atom_op_or, ATOM_ARG_PLL}, {
939 atom_op_or, ATOM_ARG_MC}, { 1006 atom_op_or, ATOM_ARG_MC}, {
940 atom_op_shl, ATOM_ARG_REG}, { 1007 atom_op_shift_left, ATOM_ARG_REG}, {
941 atom_op_shl, ATOM_ARG_PS}, { 1008 atom_op_shift_left, ATOM_ARG_PS}, {
942 atom_op_shl, ATOM_ARG_WS}, { 1009 atom_op_shift_left, ATOM_ARG_WS}, {
943 atom_op_shl, ATOM_ARG_FB}, { 1010 atom_op_shift_left, ATOM_ARG_FB}, {
944 atom_op_shl, ATOM_ARG_PLL}, { 1011 atom_op_shift_left, ATOM_ARG_PLL}, {
945 atom_op_shl, ATOM_ARG_MC}, { 1012 atom_op_shift_left, ATOM_ARG_MC}, {
946 atom_op_shr, ATOM_ARG_REG}, { 1013 atom_op_shift_right, ATOM_ARG_REG}, {
947 atom_op_shr, ATOM_ARG_PS}, { 1014 atom_op_shift_right, ATOM_ARG_PS}, {
948 atom_op_shr, ATOM_ARG_WS}, { 1015 atom_op_shift_right, ATOM_ARG_WS}, {
949 atom_op_shr, ATOM_ARG_FB}, { 1016 atom_op_shift_right, ATOM_ARG_FB}, {
950 atom_op_shr, ATOM_ARG_PLL}, { 1017 atom_op_shift_right, ATOM_ARG_PLL}, {
951 atom_op_shr, ATOM_ARG_MC}, { 1018 atom_op_shift_right, ATOM_ARG_MC}, {
952 atom_op_mul, ATOM_ARG_REG}, { 1019 atom_op_mul, ATOM_ARG_REG}, {
953 atom_op_mul, ATOM_ARG_PS}, { 1020 atom_op_mul, ATOM_ARG_PS}, {
954 atom_op_mul, ATOM_ARG_WS}, { 1021 atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1125,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1058 1125
1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1126 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1060 1127
1061 /* reset reg block */
1062 ctx->reg_block = 0;
1063 ectx.ctx = ctx; 1128 ectx.ctx = ctx;
1064 ectx.ps_shift = ps / 4; 1129 ectx.ps_shift = ps / 4;
1065 ectx.start = base; 1130 ectx.start = base;
@@ -1096,6 +1161,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1161void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{ 1162{
1098 mutex_lock(&ctx->mutex); 1163 mutex_lock(&ctx->mutex);
1164 /* reset reg block */
1165 ctx->reg_block = 0;
1166 /* reset fb window */
1167 ctx->fb_base = 0;
1168 /* reset io mode */
1169 ctx->io_mode = ATOM_IO_MM;
1099 atom_execute_table_locked(ctx, index, params); 1170 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex); 1171 mutex_unlock(&ctx->mutex);
1101} 1172}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
91#define ATOM_WS_AND_MASK 0x45 91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46 92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47 93#define ATOM_WS_ATTRIBUTES 0x47
94#define ATOM_WS_REGPTR 0x48
94 95
95#define ATOM_IIO_NOP 0 96#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1 97#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id; 308 args.ucCRTC = radeon_crtc->crtc_id;
309 309
310 printk("executing set crtc dtd timing\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312} 311}
313 312
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id; 347 args.ucCRTC = radeon_crtc->crtc_id;
349 348
350 printk("executing set crtc timing\n");
351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352} 350}
353 351
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
409 } 407 }
410} 408}
411 409
412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 410union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
412};
413
414static u32 atombios_adjust_pll(struct drm_crtc *crtc,
415 struct drm_display_mode *mode,
416 struct radeon_pll *pll)
413{ 417{
414 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
415 struct drm_device *dev = crtc->dev; 418 struct drm_device *dev = crtc->dev;
416 struct radeon_device *rdev = dev->dev_private; 419 struct radeon_device *rdev = dev->dev_private;
417 struct drm_encoder *encoder = NULL; 420 struct drm_encoder *encoder = NULL;
418 struct radeon_encoder *radeon_encoder = NULL; 421 struct radeon_encoder *radeon_encoder = NULL;
419 uint8_t frev, crev; 422 u32 adjusted_clock = mode->clock;
420 int index;
421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
428 struct radeon_pll *pll;
429 int pll_flags = 0;
430 423
431 memset(&args, 0, sizeof(args)); 424 /* reset the pll flags */
425 pll->flags = 0;
432 426
433 if (ASIC_IS_AVIVO(rdev)) { 427 if (ASIC_IS_AVIVO(rdev)) {
434 if ((rdev->family == CHIP_RS600) || 428 if ((rdev->family == CHIP_RS600) ||
435 (rdev->family == CHIP_RS690) || 429 (rdev->family == CHIP_RS690) ||
436 (rdev->family == CHIP_RS740)) 430 (rdev->family == CHIP_RS740))
437 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 431 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
438 RADEON_PLL_PREFER_CLOSEST_LOWER); 432 RADEON_PLL_PREFER_CLOSEST_LOWER);
439 433
440 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 434 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 435 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
442 else 436 else
443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 437 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
444 } else { 438 } else {
445 pll_flags |= RADEON_PLL_LEGACY; 439 pll->flags |= RADEON_PLL_LEGACY;
446 440
447 if (mode->clock > 200000) /* range limits??? */ 441 if (mode->clock > 200000) /* range limits??? */
448 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 442 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
449 else 443 else
450 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 444 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
451 445
452 } 446 }
453 447
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 if (encoder->crtc == crtc) { 449 if (encoder->crtc == crtc) {
456 if (!ASIC_IS_AVIVO(rdev)) {
457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (encoder->encoder_type ==
461 DRM_MODE_ENCODER_LVDS)
462 pll_flags |= RADEON_PLL_USE_REF_DIV;
463 }
464 radeon_encoder = to_radeon_encoder(encoder); 450 radeon_encoder = to_radeon_encoder(encoder);
451 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2;
455 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
458 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
459 pll->flags |= RADEON_PLL_USE_REF_DIV;
460 }
465 break; 461 break;
466 } 462 }
467 } 463 }
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
471 * special hw requirements. 467 * special hw requirements.
472 */ 468 */
473 if (ASIC_IS_DCE3(rdev)) { 469 if (ASIC_IS_DCE3(rdev)) {
474 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 470 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev;
473 int index;
475 474
476 if (!encoder) 475 if (!radeon_encoder->enc_priv)
477 return; 476 return adjusted_clock;
478 477 dig = radeon_encoder->enc_priv;
479 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
480 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
481 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
482 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
483 478
484 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
485 atom_execute_table(rdev->mode_info.atom_context, 480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
486 index, (uint32_t *)&adjust_pll_args); 481 &crev);
487 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 482
488 } else { 483 memset(&args, 0, sizeof(args));
489 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 484
490 if (ASIC_IS_AVIVO(rdev) && 485 switch (frev) {
491 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 486 case 1:
492 adjusted_clock = mode->clock * 2; 487 switch (crev) {
493 else 488 case 1:
494 adjusted_clock = mode->clock; 489 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
493
494 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
505 return adjusted_clock;
506 }
495 } 507 }
508 return adjusted_clock;
509}
510
511union set_pixel_clock {
512 SET_PIXEL_CLOCK_PS_ALLOCATION base;
513 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3;
516};
517
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev;
522 struct radeon_device *rdev = dev->dev_private;
523 struct drm_encoder *encoder = NULL;
524 struct radeon_encoder *radeon_encoder = NULL;
525 u8 frev, crev;
526 int index;
527 union set_pixel_clock args;
528 u32 pll_clock = mode->clock;
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll;
531 u32 adjusted_clock;
532
533 memset(&args, 0, sizeof(args));
534
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder);
538 break;
539 }
540 }
541
542 if (!radeon_encoder)
543 return;
496 544
497 if (radeon_crtc->crtc_id == 0) 545 if (radeon_crtc->crtc_id == 0)
498 pll = &rdev->clock.p1pll; 546 pll = &rdev->clock.p1pll;
499 else 547 else
500 pll = &rdev->clock.p2pll; 548 pll = &rdev->clock.p2pll;
501 549
550 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552
502 if (ASIC_IS_AVIVO(rdev)) { 553 if (ASIC_IS_AVIVO(rdev)) {
503 if (radeon_new_pll) 554 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div, 556 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags); 557 &ref_div, &post_div);
507 else 558 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div, 560 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags); 561 &ref_div, &post_div);
511 } else 562 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags); 564 &ref_div, &post_div);
514 565
515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
520 case 1: 571 case 1:
521 switch (crev) { 572 switch (crev) {
522 case 1: 573 case 1:
523 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 574 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
524 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 575 args.v1.usRefDiv = cpu_to_le16(ref_div);
525 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 576 args.v1.usFbDiv = cpu_to_le16(fb_div);
526 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 577 args.v1.ucFracFbDiv = frac_fb_div;
527 spc1_ptr->ucFracFbDiv = frac_fb_div; 578 args.v1.ucPostDiv = post_div;
528 spc1_ptr->ucPostDiv = post_div; 579 args.v1.ucPpll =
529 spc1_ptr->ucPpll =
530 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
531 spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 581 args.v1.ucCRTC = radeon_crtc->crtc_id;
532 spc1_ptr->ucRefDivSrc = 1; 582 args.v1.ucRefDivSrc = 1;
533 break; 583 break;
534 case 2: 584 case 2:
535 spc2_ptr = 585 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
536 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 586 args.v2.usRefDiv = cpu_to_le16(ref_div);
537 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v2.usFbDiv = cpu_to_le16(fb_div);
538 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 588 args.v2.ucFracFbDiv = frac_fb_div;
539 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 589 args.v2.ucPostDiv = post_div;
540 spc2_ptr->ucFracFbDiv = frac_fb_div; 590 args.v2.ucPpll =
541 spc2_ptr->ucPostDiv = post_div;
542 spc2_ptr->ucPpll =
543 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
544 spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 592 args.v2.ucCRTC = radeon_crtc->crtc_id;
545 spc2_ptr->ucRefDivSrc = 1; 593 args.v2.ucRefDivSrc = 1;
546 break; 594 break;
547 case 3: 595 case 3:
548 if (!encoder) 596 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
549 return; 597 args.v3.usRefDiv = cpu_to_le16(ref_div);
550 spc3_ptr = 598 args.v3.usFbDiv = cpu_to_le16(fb_div);
551 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 599 args.v3.ucFracFbDiv = frac_fb_div;
552 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 600 args.v3.ucPostDiv = post_div;
553 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 601 args.v3.ucPpll =
554 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
555 spc3_ptr->ucFracFbDiv = frac_fb_div;
556 spc3_ptr->ucPostDiv = post_div;
557 spc3_ptr->ucPpll =
558 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
559 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
560 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 604 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
561 spc3_ptr->ucEncoderMode = 605 args.v3.ucEncoderMode =
562 atombios_get_encoder_mode(encoder); 606 atombios_get_encoder_mode(encoder);
563 break; 607 break;
564 default: 608 default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
571 return; 615 return;
572 } 616 }
573 617
574 printk("executing set pll\n");
575 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
576} 619}
577 620
578int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
579 struct drm_framebuffer *old_fb) 622 struct drm_framebuffer *old_fb)
580{ 623{
581 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 624 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
582 struct drm_device *dev = crtc->dev; 625 struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
706 return 0; 749 return 0;
707} 750}
708 751
752int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
753 struct drm_framebuffer *old_fb)
754{
755 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private;
757
758 if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb);
762}
763
764/* properly set additional regs when using atombios */
765static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
766{
767 struct drm_device *dev = crtc->dev;
768 struct radeon_device *rdev = dev->dev_private;
769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
770 u32 disp_merge_cntl;
771
772 switch (radeon_crtc->crtc_id) {
773 case 0:
774 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
775 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
776 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
777 break;
778 case 1:
779 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
780 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
781 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
782 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
783 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
784 break;
785 }
786}
787
709int atombios_crtc_mode_set(struct drm_crtc *crtc, 788int atombios_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode, 789 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode, 790 struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
727 else { 806 else {
728 if (radeon_crtc->crtc_id == 0) 807 if (radeon_crtc->crtc_id == 0)
729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
730 radeon_crtc_set_base(crtc, x, y, old_fb); 809 atombios_crtc_set_base(crtc, x, y, old_fb);
731 radeon_legacy_atom_set_surface(crtc); 810 radeon_legacy_atom_fixup(crtc);
732 } 811 }
733 atombios_overscan_setup(crtc, mode, adjusted_mode); 812 atombios_overscan_setup(crtc, mode, adjusted_mode);
734 atombios_scaler_setup(crtc); 813 atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
746 825
747static void atombios_crtc_prepare(struct drm_crtc *crtc) 826static void atombios_crtc_prepare(struct drm_crtc *crtc)
748{ 827{
749 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
750 atombios_lock_crtc(crtc, 1); 828 atombios_lock_crtc(crtc, 1);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
751} 830}
752 831
753static void atombios_crtc_commit(struct drm_crtc *crtc) 832static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3eb0ca5b3d73..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
@@ -468,7 +472,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
468 struct radeon_connector *radeon_connector; 472 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector; 473 struct radeon_connector_atom_dig *dig_connector;
470 474
471 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || 475 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
472 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 476 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
473 return; 477 return;
474 478
@@ -583,7 +587,7 @@ void dp_link_train(struct drm_encoder *encoder,
583 u8 train_set[4]; 587 u8 train_set[4];
584 int i; 588 int i;
585 589
586 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || 590 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
587 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 591 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
588 return; 592 return;
589 593
@@ -596,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
596 return; 600 return;
597 dig_connector = radeon_connector->con_priv; 601 dig_connector = radeon_connector->con_priv;
598 602
599 if (ASIC_IS_DCE32(rdev)) { 603 if (dig->dig_encoder)
600 if (dig->dig_block) 604 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
601 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 605 else
602 else 606 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
603 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 607 if (dig_connector->linkb)
604 if (dig_connector->linkb) 608 enc_id |= ATOM_DP_CONFIG_LINK_B;
605 enc_id |= ATOM_DP_CONFIG_LINK_B; 609 else
606 else 610 enc_id |= ATOM_DP_CONFIG_LINK_A;
607 enc_id |= ATOM_DP_CONFIG_LINK_A;
608 } else {
609 if (dig_connector->linkb)
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
611 else
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
613 }
614 611
615 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 612 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
616 if (dig_connector->dp_clock == 270000) 613 if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8760d66e058a..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
355} 355}
356 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
357void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
358 struct radeon_fence *fence) 360 struct radeon_fence *fence)
359{ 361{
360 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
361 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
362 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
363 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
364 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -1504,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1510 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1505 return -EINVAL; 1511 return -EINVAL;
1506 } 1512 }
1513 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1507 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1514 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1508 track->immd_dwords = pkt->count - 1; 1515 track->immd_dwords = pkt->count - 1;
1509 r = r100_cs_track_check(p->rdev, track); 1516 r = r100_cs_track_check(p->rdev, track);
@@ -3368,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
3368 3375
3369void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3370{ 3377{
3371 r100_suspend(rdev);
3372 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3373 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3374 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
@@ -3399,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
3399 if (rdev->flags & RADEON_IS_AGP) { 3405 if (rdev->flags & RADEON_IS_AGP) {
3400 r = radeon_agp_init(rdev); 3406 r = radeon_agp_init(rdev);
3401 if (r) { 3407 if (r) {
3402 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3408 radeon_agp_disable(rdev);
3403 rdev->flags &= ~RADEON_IS_AGP;
3404 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3405 } else { 3409 } else {
3406 rdev->mc.gtt_location = rdev->mc.agp_base; 3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3407 } 3411 }
@@ -3482,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3482 if (r) { 3486 if (r) {
3483 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3484 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3485 r100_suspend(rdev);
3486 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3487 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3488 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3489 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3490 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3491 radeon_irq_kms_fini(rdev);
3492 rdev->accel_working = false; 3495 rdev->accel_working = false;
3493 } 3496 }
3494 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
371 case 5: 371 case 5:
372 case 6: 372 case 6:
373 case 7: 373 case 7:
374 /* 1D/2D */
374 track->textures[i].tex_coord_type = 0; 375 track->textures[i].tex_coord_type = 0;
375 break; 376 break;
376 case 1: 377 case 1:
377 track->textures[i].tex_coord_type = 1; 378 /* CUBE */
379 track->textures[i].tex_coord_type = 2;
378 break; 380 break;
379 case 2: 381 case 2:
380 track->textures[i].tex_coord_type = 2; 382 /* 3D */
383 track->textures[i].tex_coord_type = 1;
381 break; 384 break;
382 } 385 }
383 break; 386 break;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
506 506
507 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
509 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
510 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
511 rdev->mc.vram_width = 128; 512 switch (tmp) {
512 } else { 513 case 0: rdev->mc.vram_width = 64; break;
513 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
514 } 517 }
515 518
516 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1327 1330
1328void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1329{ 1332{
1330 r300_suspend(rdev);
1331 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1332 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1333 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1418 if (r) { 1420 if (r) {
1419 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1421 r300_suspend(rdev);
1422 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1423 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1424 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1425 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1426 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1427 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1428 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1429 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1430 rdev->accel_working = false; 1432 rdev->accel_working = false;
1431 } 1433 }
1432 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 053404e71a9d..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
50 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
52 if (r) { 52 if (r) {
53 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
54 rdev->flags &= ~RADEON_IS_AGP;
55 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
56 } else { 54 } else {
57 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
58 } 56 }
@@ -391,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
391 if (r) { 389 if (r) {
392 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
393 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
394 r420_suspend(rdev);
395 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
396 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
397 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
398 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
399 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
400 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
401 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
402 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
403 radeon_irq_kms_fini(rdev);
404 rdev->accel_working = false; 401 rdev->accel_working = false;
405 } 402 }
406 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
294 if (r) { 294 if (r) {
295 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
297 rv515_suspend(rdev);
298 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
299 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
300 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
301 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
302 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
303 radeon_irq_kms_fini(rdev);
304 rdev->accel_working = false; 303 rdev->accel_working = false;
305 } 304 }
306 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f5ff3490929f..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
624 fixed20_12 a; 624 fixed20_12 a;
625 u32 tmp; 625 u32 tmp;
626 int chansize, numchan; 626 int chansize, numchan;
627 int r;
628 627
629 /* Get VRAM informations */ 628 /* Get VRAM informations */
630 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
667 rdev->mc.real_vram_size = rdev->mc.aper_size; 666 rdev->mc.real_vram_size = rdev->mc.aper_size;
668 667
669 if (rdev->flags & RADEON_IS_AGP) { 668 if (rdev->flags & RADEON_IS_AGP) {
670 r = radeon_agp_init(rdev);
671 if (r)
672 return r;
673 /* gtt_size is setup by radeon_agp_init */ 669 /* gtt_size is setup by radeon_agp_init */
674 rdev->mc.gtt_location = rdev->mc.agp_base; 670 rdev->mc.gtt_location = rdev->mc.agp_base;
675 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1658 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1659} 1655}
1660 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1661 1663
1662/* 1664/*
1663 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1792,23 +1794,24 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1792 radeon_ring_write(rdev, RB_INT_STAT); 1794 radeon_ring_write(rdev, RB_INT_STAT);
1793} 1795}
1794 1796
1795int r600_copy_dma(struct radeon_device *rdev,
1796 uint64_t src_offset,
1797 uint64_t dst_offset,
1798 unsigned num_pages,
1799 struct radeon_fence *fence)
1800{
1801 /* FIXME: implement */
1802 return 0;
1803}
1804
1805int r600_copy_blit(struct radeon_device *rdev, 1797int r600_copy_blit(struct radeon_device *rdev,
1806 uint64_t src_offset, uint64_t dst_offset, 1798 uint64_t src_offset, uint64_t dst_offset,
1807 unsigned num_pages, struct radeon_fence *fence) 1799 unsigned num_pages, struct radeon_fence *fence)
1808{ 1800{
1809 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 1801 int r;
1802
1803 mutex_lock(&rdev->r600_blit.mutex);
1804 rdev->r600_blit.vb_ib = NULL;
1805 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1806 if (r) {
1807 if (rdev->r600_blit.vb_ib)
1808 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1809 mutex_unlock(&rdev->r600_blit.mutex);
1810 return r;
1811 }
1810 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 1812 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1811 r600_blit_done_copy(rdev, fence); 1813 r600_blit_done_copy(rdev, fence);
1814 mutex_unlock(&rdev->r600_blit.mutex);
1812 return 0; 1815 return 0;
1813} 1816}
1814 1817
@@ -1864,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
1864 return r; 1867 return r;
1865 } 1868 }
1866 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1867 1870 r = r600_blit_init(rdev);
1868 if (!rdev->r600_blit.shader_obj) { 1871 if (r) {
1869 r = r600_blit_init(rdev); 1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1876 /* pin copy shader into vram */
1877 if (rdev->r600_blit.shader_obj) {
1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1879 if (unlikely(r != 0))
1880 return r;
1881 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1882 &rdev->r600_blit.shader_gpu_addr);
1883 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1870 if (r) { 1884 if (r) {
1871 DRM_ERROR("radeon: failed blitter (%d).\n", r); 1885 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1872 return r; 1886 return r;
1873 } 1887 }
1874 } 1888 }
1875
1876 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1877 if (unlikely(r != 0))
1878 return r;
1879 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1880 &rdev->r600_blit.shader_gpu_addr);
1881 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1882 if (r) {
1883 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1884 return r;
1885 }
1886
1887 /* Enable IRQ */ 1889 /* Enable IRQ */
1888 r = r600_irq_init(rdev); 1890 r = r600_irq_init(rdev);
1889 if (r) { 1891 if (r) {
@@ -1948,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1948 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1949 return r; 1951 return r;
1950 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1951 return r; 1960 return r;
1952} 1961}
1953 1962
@@ -1955,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
1955{ 1964{
1956 int r; 1965 int r;
1957 1966
1967 r600_audio_fini(rdev);
1958 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1959 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1960 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
1971 r600_irq_suspend(rdev);
1961 r600_wb_disable(rdev); 1972 r600_wb_disable(rdev);
1962 r600_pcie_gart_disable(rdev); 1973 r600_pcie_gart_disable(rdev);
1963 /* unpin shaders bo */ 1974 /* unpin shaders bo */
1964 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1975 if (rdev->r600_blit.shader_obj) {
1965 if (unlikely(r != 0)) 1976 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1966 return r; 1977 if (!r) {
1967 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1978 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1968 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1979 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1980 }
1981 }
1969 return 0; 1982 return 0;
1970} 1983}
1971 1984
@@ -2026,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
2026 r = radeon_fence_driver_init(rdev); 2039 r = radeon_fence_driver_init(rdev);
2027 if (r) 2040 if (r)
2028 return r; 2041 return r;
2042 if (rdev->flags & RADEON_IS_AGP) {
2043 r = radeon_agp_init(rdev);
2044 if (r)
2045 radeon_agp_disable(rdev);
2046 }
2029 r = r600_mc_init(rdev); 2047 r = r600_mc_init(rdev);
2030 if (r) 2048 if (r)
2031 return r; 2049 return r;
@@ -2051,22 +2069,25 @@ int r600_init(struct radeon_device *rdev)
2051 rdev->accel_working = true; 2069 rdev->accel_working = true;
2052 r = r600_startup(rdev); 2070 r = r600_startup(rdev);
2053 if (r) { 2071 if (r) {
2054 r600_suspend(rdev); 2072 dev_err(rdev->dev, "disabling GPU acceleration\n");
2073 r600_cp_fini(rdev);
2055 r600_wb_fini(rdev); 2074 r600_wb_fini(rdev);
2056 radeon_ring_fini(rdev); 2075 r600_irq_fini(rdev);
2076 radeon_irq_kms_fini(rdev);
2057 r600_pcie_gart_fini(rdev); 2077 r600_pcie_gart_fini(rdev);
2058 rdev->accel_working = false; 2078 rdev->accel_working = false;
2059 } 2079 }
2060 if (rdev->accel_working) { 2080 if (rdev->accel_working) {
2061 r = radeon_ib_pool_init(rdev); 2081 r = radeon_ib_pool_init(rdev);
2062 if (r) { 2082 if (r) {
2063 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2083 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2064 rdev->accel_working = false;
2065 }
2066 r = r600_ib_test(rdev);
2067 if (r) {
2068 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2069 rdev->accel_working = false; 2084 rdev->accel_working = false;
2085 } else {
2086 r = r600_ib_test(rdev);
2087 if (r) {
2088 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2089 rdev->accel_working = false;
2090 }
2070 } 2091 }
2071 } 2092 }
2072 2093
@@ -2078,20 +2099,17 @@ int r600_init(struct radeon_device *rdev)
2078 2099
2079void r600_fini(struct radeon_device *rdev) 2100void r600_fini(struct radeon_device *rdev)
2080{ 2101{
2081 /* Suspend operations */
2082 r600_suspend(rdev);
2083
2084 r600_audio_fini(rdev); 2102 r600_audio_fini(rdev);
2085 r600_blit_fini(rdev); 2103 r600_blit_fini(rdev);
2104 r600_cp_fini(rdev);
2105 r600_wb_fini(rdev);
2086 r600_irq_fini(rdev); 2106 r600_irq_fini(rdev);
2087 radeon_irq_kms_fini(rdev); 2107 radeon_irq_kms_fini(rdev);
2088 radeon_ring_fini(rdev);
2089 r600_wb_fini(rdev);
2090 r600_pcie_gart_fini(rdev); 2108 r600_pcie_gart_fini(rdev);
2109 radeon_agp_fini(rdev);
2091 radeon_gem_fini(rdev); 2110 radeon_gem_fini(rdev);
2092 radeon_fence_driver_fini(rdev); 2111 radeon_fence_driver_fini(rdev);
2093 radeon_clocks_fini(rdev); 2112 radeon_clocks_fini(rdev);
2094 radeon_agp_fini(rdev);
2095 radeon_bo_fini(rdev); 2113 radeon_bo_fini(rdev);
2096 radeon_atombios_fini(rdev); 2114 radeon_atombios_fini(rdev);
2097 kfree(rdev->bios); 2115 kfree(rdev->bios);
@@ -2197,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2197 rb_bufsz = drm_order(ring_size / 4); 2215 rb_bufsz = drm_order(ring_size / 4);
2198 ring_size = (1 << rb_bufsz) * 4; 2216 ring_size = (1 << rb_bufsz) * 4;
2199 rdev->ih.ring_size = ring_size; 2217 rdev->ih.ring_size = ring_size;
2200 rdev->ih.align_mask = 4 - 1; 2218 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2219 rdev->ih.rptr = 0;
2201} 2220}
2202 2221
2203static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2222static int r600_ih_ring_alloc(struct radeon_device *rdev)
2204{ 2223{
2205 int r; 2224 int r;
2206 2225
2207 rdev->ih.ring_size = ring_size;
2208 /* Allocate ring buffer */ 2226 /* Allocate ring buffer */
2209 if (rdev->ih.ring_obj == NULL) { 2227 if (rdev->ih.ring_obj == NULL) {
2210 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2228 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2234,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2234 return r; 2252 return r;
2235 } 2253 }
2236 } 2254 }
2237 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2238 rdev->ih.rptr = 0;
2239
2240 return 0; 2255 return 0;
2241} 2256}
2242 2257
@@ -2386,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
2386 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2401 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2387 2402
2388 /* allocate ring */ 2403 /* allocate ring */
2389 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2404 ret = r600_ih_ring_alloc(rdev);
2390 if (ret) 2405 if (ret)
2391 return ret; 2406 return ret;
2392 2407
@@ -2449,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
2449 return ret; 2464 return ret;
2450} 2465}
2451 2466
2452void r600_irq_fini(struct radeon_device *rdev) 2467void r600_irq_suspend(struct radeon_device *rdev)
2453{ 2468{
2454 r600_disable_interrupts(rdev); 2469 r600_disable_interrupts(rdev);
2455 r600_rlc_stop(rdev); 2470 r600_rlc_stop(rdev);
2471}
2472
2473void r600_irq_fini(struct radeon_device *rdev)
2474{
2475 r600_irq_suspend(rdev);
2456 r600_ih_ring_fini(rdev); 2476 r600_ih_ring_fini(rdev);
2457} 2477}
2458 2478
@@ -2467,8 +2487,12 @@ int r600_irq_set(struct radeon_device *rdev)
2467 return -EINVAL; 2487 return -EINVAL;
2468 } 2488 }
2469 /* don't enable anything if the ih is disabled */ 2489 /* don't enable anything if the ih is disabled */
2470 if (!rdev->ih.enabled) 2490 if (!rdev->ih.enabled) {
2491 r600_disable_interrupts(rdev);
2492 /* force the active interrupt state to all disabled */
2493 r600_disable_interrupt_state(rdev);
2471 return 0; 2494 return 0;
2495 }
2472 2496
2473 if (ASIC_IS_DCE3(rdev)) { 2497 if (ASIC_IS_DCE3(rdev)) {
2474 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2498 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2638,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2638 wptr = RREG32(IH_RB_WPTR); 2662 wptr = RREG32(IH_RB_WPTR);
2639 2663
2640 if (wptr & RB_OVERFLOW) { 2664 if (wptr & RB_OVERFLOW) {
2641 WARN_ON(1); 2665 /* When a ring buffer overflow happen start parsing interrupt
2642 /* XXX deal with overflow */ 2666 * from the last not overwritten vector (wptr + 16). Hopefully
2643 DRM_ERROR("IH RB overflow\n"); 2667 * this should allow us to catchup.
2668 */
2669 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2670 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2671 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2644 tmp = RREG32(IH_RB_CNTL); 2672 tmp = RREG32(IH_RB_CNTL);
2645 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2673 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2646 WREG32(IH_RB_CNTL, tmp); 2674 WREG32(IH_RB_CNTL, tmp);
2647 } 2675 }
2648 wptr = wptr & WPTR_OFFSET_MASK; 2676 return (wptr & rdev->ih.ptr_mask);
2649
2650 return wptr;
2651} 2677}
2652 2678
2653/* r600 IV Ring 2679/* r600 IV Ring
@@ -2683,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
2683 u32 wptr = r600_get_ih_wptr(rdev); 2709 u32 wptr = r600_get_ih_wptr(rdev);
2684 u32 rptr = rdev->ih.rptr; 2710 u32 rptr = rdev->ih.rptr;
2685 u32 src_id, src_data; 2711 u32 src_id, src_data;
2686 u32 last_entry = rdev->ih.ring_size - 16;
2687 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2712 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2688 unsigned long flags; 2713 unsigned long flags;
2689 bool queue_hotplug = false; 2714 bool queue_hotplug = false;
2690 2715
2691 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2717 if (!rdev->ih.enabled)
2718 return IRQ_NONE;
2692 2719
2693 spin_lock_irqsave(&rdev->ih.lock, flags); 2720 spin_lock_irqsave(&rdev->ih.lock, flags);
2694 2721
@@ -2817,10 +2844,8 @@ restart_ih:
2817 } 2844 }
2818 2845
2819 /* wptr/rptr are in bytes! */ 2846 /* wptr/rptr are in bytes! */
2820 if (rptr == last_entry) 2847 rptr += 16;
2821 rptr = 0; 2848 rptr &= rdev->ih.ptr_mask;
2822 else
2823 rptr += 16;
2824 } 2849 }
2825 /* make sure wptr hasn't changed while processing */ 2850 /* make sure wptr hasn't changed while processing */
2826 wptr = r600_get_ih_wptr(rdev); 2851 wptr = r600_get_ih_wptr(rdev);
@@ -2888,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2888 return 0; 2913 return 0;
2889#endif 2914#endif
2890} 2915}
2916
2917/**
2918 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2919 * rdev: radeon device structure
2920 * bo: buffer object struct which userspace is waiting for idle
2921 *
2922 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2923 * through ring buffer, this leads to corruption in rendering, see
2924 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2925 * directly perform HDP flush by writing register through MMIO.
2926 */
2927void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2928{
2929 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2930}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8787ea89dc6e..af1c3ca8a4cb 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
449 u32 packet2s[16]; 449 u32 packet2s[16];
450 int num_packet2s = 0; 450 int num_packet2s = 0;
451 451
452 mutex_init(&rdev->r600_blit.mutex);
452 rdev->r600_blit.state_offset = 0; 453 rdev->r600_blit.state_offset = 0;
453 454
454 if (rdev->family >= CHIP_RV770) 455 if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
512{ 513{
513 int r; 514 int r;
514 515
516 if (rdev->r600_blit.shader_obj == NULL)
517 return;
518 /* If we can't reserve the bo, unref should be enough to destroy
519 * it when it becomes idle.
520 */
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 521 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) { 522 if (!r) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 523 radeon_bo_unpin(rdev->r600_blit.shader_obj);
518 goto out_unref; 524 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
519 } 525 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 526 radeon_bo_unref(&rdev->r600_blit.shader_obj);
524} 527}
525 528
@@ -555,7 +558,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
555 int dwords_per_loop = 76, num_loops; 558 int dwords_per_loop = 76, num_loops;
556 559
557 r = r600_vb_ib_get(rdev); 560 r = r600_vb_ib_get(rdev);
558 WARN_ON(r); 561 if (r)
562 return r;
559 563
560 /* set_render_target emits 2 extra dwords on rv6xx */ 564 /* set_render_target emits 2 extra dwords on rv6xx */
561 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) 565 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -581,7 +585,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
581 ring_size += 5; /* done copy */ 585 ring_size += 5; /* done copy */
582 ring_size += 7; /* fence emit for done copy */ 586 ring_size += 7; /* fence emit for done copy */
583 r = radeon_ring_lock(rdev, ring_size); 587 r = radeon_ring_lock(rdev, ring_size);
584 WARN_ON(r); 588 if (r)
589 return r;
585 590
586 set_default_state(rdev); /* 14 */ 591 set_default_state(rdev); /* 14 */
587 set_shaders(rdev); /* 26 */ 592 set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 38
39struct r600_cs_track {
40 u32 cb_color0_base_last;
41};
42
39/** 43/**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context. 45 * @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
177} 181}
178 182
179/** 183/**
184 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
185 * @parser: parser structure holding parsing context.
186 *
187 * Check next packet is relocation packet3, do bo validation and compute
188 * GPU offset using the provided start.
189 **/
190static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
191{
192 struct radeon_cs_packet p3reloc;
193 int r;
194
195 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
196 if (r) {
197 return 0;
198 }
199 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
200 return 0;
201 }
202 return 1;
203}
204
205/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet 206 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context. 207 * @parser: parser structure holding parsing context.
182 * 208 *
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt) 363 struct radeon_cs_packet *pkt)
338{ 364{
339 struct radeon_cs_reloc *reloc; 365 struct radeon_cs_reloc *reloc;
366 struct r600_cs_track *track;
340 volatile u32 *ib; 367 volatile u32 *ib;
341 unsigned idx; 368 unsigned idx;
342 unsigned i; 369 unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
344 int r; 371 int r;
345 u32 idx_value; 372 u32 idx_value;
346 373
374 track = (struct r600_cs_track *)p->track;
347 ib = p->ib->ptr; 375 ib = p->ib->ptr;
348 idx = pkt->idx + 1; 376 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx); 377 idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
503 for (i = 0; i < pkt->count; i++) { 531 for (i = 0; i < pkt->count; i++) {
504 reg = start_reg + (4 * i); 532 reg = start_reg + (4 * i);
505 switch (reg) { 533 switch (reg) {
534 /* This register were added late, there is userspace
535 * which does provide relocation for those but set
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
506 case DB_DEPTH_BASE: 576 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE: 577 case DB_HTILE_DATA_BASE:
508 case CB_COLOR0_BASE: 578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
509 case CB_COLOR1_BASE: 588 case CB_COLOR1_BASE:
510 case CB_COLOR2_BASE: 589 case CB_COLOR2_BASE:
511 case CB_COLOR3_BASE: 590 case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
678int r600_cs_parse(struct radeon_cs_parser *p) 757int r600_cs_parse(struct radeon_cs_parser *p)
679{ 758{
680 struct radeon_cs_packet pkt; 759 struct radeon_cs_packet pkt;
760 struct r600_cs_track *track;
681 int r; 761 int r;
682 762
763 track = kzalloc(sizeof(*track), GFP_KERNEL);
764 p->track = track;
683 do { 765 do {
684 r = r600_cs_packet_parse(p, &pkt, p->idx); 766 r = r600_cs_packet_parse(p, &pkt, p->idx);
685 if (r) { 767 if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
757 /* initialize parser */ 839 /* initialize parser */
758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 840 memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 parser.filp = filp; 841 parser.filp = filp;
842 parser.dev = &dev->pdev->dev;
760 parser.rdev = NULL; 843 parser.rdev = NULL;
761 parser.family = family; 844 parser.family = family;
762 parser.ib = &fake_ib; 845 parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
889#define C_0280E0_BASE_256B 0x00000000
890#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
891#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
892#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
893#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
894#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
895#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
896#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
897#define R_0280C0_CB_COLOR0_TILE 0x0280C0
898#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
899#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
900#define C_0280C0_BASE_256B 0x00000000
901#define R_0280C4_CB_COLOR1_TILE 0x0280C4
902#define R_0280C8_CB_COLOR2_TILE 0x0280C8
903#define R_0280CC_CB_COLOR3_TILE 0x0280CC
904#define R_0280D0_CB_COLOR4_TILE 0x0280D0
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908
909
885#endif 910#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index eb5f99b9469d..f57480ba1355 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -410,13 +410,13 @@ struct r600_ih {
410 unsigned wptr_old; 410 unsigned wptr_old;
411 unsigned ring_size; 411 unsigned ring_size;
412 uint64_t gpu_addr; 412 uint64_t gpu_addr;
413 uint32_t align_mask;
414 uint32_t ptr_mask; 413 uint32_t ptr_mask;
415 spinlock_t lock; 414 spinlock_t lock;
416 bool enabled; 415 bool enabled;
417}; 416};
418 417
419struct r600_blit { 418struct r600_blit {
419 struct mutex mutex;
420 struct radeon_bo *shader_obj; 420 struct radeon_bo *shader_obj;
421 u64 shader_gpu_addr; 421 u64 shader_gpu_addr;
422 u32 vs_offset, ps_offset; 422 u32 vs_offset, ps_offset;
@@ -465,6 +465,7 @@ struct radeon_cs_chunk {
465}; 465};
466 466
467struct radeon_cs_parser { 467struct radeon_cs_parser {
468 struct device *dev;
468 struct radeon_device *rdev; 469 struct radeon_device *rdev;
469 struct drm_file *filp; 470 struct drm_file *filp;
470 /* chunks */ 471 /* chunks */
@@ -660,6 +661,13 @@ struct radeon_asic {
660 void (*hpd_fini)(struct radeon_device *rdev); 661 void (*hpd_fini)(struct radeon_device *rdev);
661 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
662 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
664 /* ioctl hw specific callback. Some hw might want to perform special
665 * operation on specific ioctl. For instance on wait idle some hw
666 * might want to perform and HDP flush through MMIO as it seems that
667 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
668 * through ring.
669 */
670 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
663}; 671};
664 672
665/* 673/*
@@ -847,7 +855,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
847 855
848static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 856static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 857{
850 if (reg < 0x10000) 858 if (reg < rdev->rmmio_size)
851 return readl(((void __iomem *)rdev->rmmio) + reg); 859 return readl(((void __iomem *)rdev->rmmio) + reg);
852 else { 860 else {
853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 861 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -857,7 +865,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
857 865
858static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 866static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
859{ 867{
860 if (reg < 0x10000) 868 if (reg < rdev->rmmio_size)
861 writel(v, ((void __iomem *)rdev->rmmio) + reg); 869 writel(v, ((void __iomem *)rdev->rmmio) + reg);
862 else { 870 else {
863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 871 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1017,6 +1025,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1017#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1025#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1018 1026
1019/* Common functions */ 1027/* Common functions */
1028/* AGP */
1029extern void radeon_agp_disable(struct radeon_device *rdev);
1020extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1030extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1021extern int radeon_modeset_init(struct radeon_device *rdev); 1031extern int radeon_modeset_init(struct radeon_device *rdev);
1022extern void radeon_modeset_fini(struct radeon_device *rdev); 1032extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1140,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1140extern void r600_cp_stop(struct radeon_device *rdev); 1150extern void r600_cp_stop(struct radeon_device *rdev);
1141extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1151extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1142extern int r600_cp_resume(struct radeon_device *rdev); 1152extern int r600_cp_resume(struct radeon_device *rdev);
1153extern void r600_cp_fini(struct radeon_device *rdev);
1143extern int r600_count_pipe_bits(uint32_t val); 1154extern int r600_count_pipe_bits(uint32_t val);
1144extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1155extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1145extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1156extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1160,7 +1171,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
1160extern void r600_irq_fini(struct radeon_device *rdev); 1171extern void r600_irq_fini(struct radeon_device *rdev);
1161extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1172extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1162extern int r600_irq_set(struct radeon_device *rdev); 1173extern int r600_irq_set(struct radeon_device *rdev);
1163 1174extern void r600_irq_suspend(struct radeon_device *rdev);
1175/* r600 audio */
1164extern int r600_audio_init(struct radeon_device *rdev); 1176extern int r600_audio_init(struct radeon_device *rdev);
1165extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1177extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1166extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1178extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 220f454ea9fa..c0681a5556dc 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
144 144
145 ret = drm_agp_info(rdev->ddev, &info); 145 ret = drm_agp_info(rdev->ddev, &info);
146 if (ret) { 146 if (ret) {
147 drm_agp_release(rdev->ddev);
147 DRM_ERROR("Unable to get AGP info: %d\n", ret); 148 DRM_ERROR("Unable to get AGP info: %d\n", ret);
148 return ret; 149 return ret;
149 } 150 }
151
152 if (rdev->ddev->agp->agp_info.aper_size < 32) {
153 drm_agp_release(rdev->ddev);
154 dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
155 "need at least 32M, disabling AGP\n",
156 rdev->ddev->agp->agp_info.aper_size);
157 return -EINVAL;
158 }
159
150 mode.mode = info.mode; 160 mode.mode = info.mode;
151 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 161 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
152 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 162 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
221 ret = drm_agp_enable(rdev->ddev, mode); 231 ret = drm_agp_enable(rdev->ddev, mode);
222 if (ret) { 232 if (ret) {
223 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); 233 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
234 drm_agp_release(rdev->ddev);
224 return ret; 235 return ret;
225 } 236 }
226 237
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
117 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
118 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
119 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
120}; 121};
121 122
122 123
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
176 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
177 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
179}; 181};
180 182
181/* 183/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
219 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
220 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
221 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
222}; 225};
223 226
224 227
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
267 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
268 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
269 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
270}; 274};
271 275
272 276
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
323 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
324 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
325 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
326}; 331};
327 332
328 333
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
370 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
371 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
372 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
373}; 379};
374 380
375 381
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
421 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
422 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
423 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
424}; 431};
425 432
426 433
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
463 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
466}; 474};
467 475
468/* 476/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
505void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
506 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
507 516
508static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
509 .init = &r600_init, 518 .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
538 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
539 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
540 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
541}; 551};
542 552
543/* 553/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
582 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
583 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
584 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
585}; 596};
586 597
587#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..2dcda6115874 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
287 *connector_type = DRM_MODE_CONNECTOR_DVID; 287 *connector_type = DRM_MODE_CONNECTOR_DVID;
288 } 288 }
289 289
290 /* XFX Pine Group device rv730 reports no VGA DDC lines
291 * even though they are wired up to record 0x93
292 */
293 if ((dev->pdev->device == 0x9498) &&
294 (dev->pdev->subsystem_vendor == 0x1682) &&
295 (dev->pdev->subsystem_device == 0x2452)) {
296 struct radeon_device *rdev = dev->dev_private;
297 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
298 }
290 return true; 299 return true;
291} 300}
292 301
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
56 else if (post_div == 3) 56 else if (post_div == 3)
57 sclk >>= 2; 57 sclk >>= 2;
58 else if (post_div == 4) 58 else if (post_div == 4)
59 sclk >>= 4; 59 sclk >>= 3;
60 60
61 return sclk; 61 return sclk;
62} 62}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
86 else if (post_div == 3) 86 else if (post_div == 3)
87 mclk >>= 2; 87 mclk >>= 2;
88 else if (post_div == 4) 88 else if (post_div == 4)
89 mclk >>= 4; 89 mclk >>= 3;
90 90
91 return mclk; 91 return mclk;
92} 92}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
971 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
972 972
973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
974 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
975 lvds->panel_vcc_delay = 2000;
976 975
977 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
978 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..238188540017 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
582 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
583 bool dret; 583 bool dret = false;
584 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
585 585
586 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
587 if (!encoder) 587 if (!encoder)
588 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
589 589
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
591 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
593 if (dret) { 595 if (dret) {
594 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
595 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
740 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
741 int i; 743 int i;
742 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
743 bool dret; 745 bool dret = false;
744 746
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
746 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
747 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
748 if (dret) { 752 if (dret) {
749 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
750 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1343 radeon_connector->dac_load_detect = false; 1347 radeon_connector->dac_load_detect = false;
1344 drm_connector_attach_property(&radeon_connector->base, 1348 drm_connector_attach_property(&radeon_connector->base,
1345 rdev->mode_info.load_detect_property, 1349 rdev->mode_info.load_detect_property,
1346 1); 1350 radeon_connector->dac_load_detect);
1347 drm_connector_attach_property(&radeon_connector->base, 1351 drm_connector_attach_property(&radeon_connector->base,
1348 rdev->mode_info.tv_std_property, 1352 rdev->mode_info.tv_std_property,
1349 radeon_combios_get_tv_info(rdev)); 1353 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..1190148cf5e6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -189,7 +189,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_unvalidate(&parser->validated,
194 parser->ib->fence); 194 parser->ib->fence);
195 } else { 195 } else {
@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 231 memset(&parser, 0, sizeof(struct radeon_cs_parser));
232 parser.filp = filp; 232 parser.filp = filp;
233 parser.rdev = rdev; 233 parser.rdev = rdev;
234 parser.dev = rdev->dev;
234 r = radeon_cs_parser_init(&parser, data); 235 r = radeon_cs_parser_init(&parser, data);
235 if (r) { 236 if (r) {
236 DRM_ERROR("Failed to initialize parser !\n"); 237 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0c51f8e46613..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
546 } 546 }
547 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
547} 548}
548 549
549void radeon_check_arguments(struct radeon_device *rdev) 550void radeon_check_arguments(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0ec491ead2ff..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
281 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
283 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
284 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
288 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
289 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
290 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
291 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
293 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
@@ -357,7 +366,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 367 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 368 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
360 if (dig->dp_i2c_bus) 369 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
370 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 371 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
362 } 372 }
363 if (!radeon_connector->ddc_bus) 373 if (!radeon_connector->ddc_bus)
@@ -410,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
410 uint32_t *fb_div_p, 420 uint32_t *fb_div_p,
411 uint32_t *frac_fb_div_p, 421 uint32_t *frac_fb_div_p,
412 uint32_t *ref_div_p, 422 uint32_t *ref_div_p,
413 uint32_t *post_div_p, 423 uint32_t *post_div_p)
414 int flags)
415{ 424{
416 uint32_t min_ref_div = pll->min_ref_div; 425 uint32_t min_ref_div = pll->min_ref_div;
417 uint32_t max_ref_div = pll->max_ref_div; 426 uint32_t max_ref_div = pll->max_ref_div;
427 uint32_t min_post_div = pll->min_post_div;
428 uint32_t max_post_div = pll->max_post_div;
418 uint32_t min_fractional_feed_div = 0; 429 uint32_t min_fractional_feed_div = 0;
419 uint32_t max_fractional_feed_div = 0; 430 uint32_t max_fractional_feed_div = 0;
420 uint32_t best_vco = pll->best_vco; 431 uint32_t best_vco = pll->best_vco;
@@ -430,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
430 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 441 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
431 freq = freq * 1000; 442 freq = freq * 1000;
432 443
433 if (flags & RADEON_PLL_USE_REF_DIV) 444 if (pll->flags & RADEON_PLL_USE_REF_DIV)
434 min_ref_div = max_ref_div = pll->reference_div; 445 min_ref_div = max_ref_div = pll->reference_div;
435 else { 446 else {
436 while (min_ref_div < max_ref_div-1) { 447 while (min_ref_div < max_ref_div-1) {
@@ -445,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
445 } 456 }
446 } 457 }
447 458
448 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 459 if (pll->flags & RADEON_PLL_USE_POST_DIV)
460 min_post_div = max_post_div = pll->post_div;
461
462 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
449 min_fractional_feed_div = pll->min_frac_feedback_div; 463 min_fractional_feed_div = pll->min_frac_feedback_div;
450 max_fractional_feed_div = pll->max_frac_feedback_div; 464 max_fractional_feed_div = pll->max_frac_feedback_div;
451 } 465 }
452 466
453 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 467 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
454 uint32_t ref_div; 468 uint32_t ref_div;
455 469
456 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 470 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
457 continue; 471 continue;
458 472
459 /* legacy radeons only have a few post_divs */ 473 /* legacy radeons only have a few post_divs */
460 if (flags & RADEON_PLL_LEGACY) { 474 if (pll->flags & RADEON_PLL_LEGACY) {
461 if ((post_div == 5) || 475 if ((post_div == 5) ||
462 (post_div == 7) || 476 (post_div == 7) ||
463 (post_div == 9) || 477 (post_div == 9) ||
@@ -504,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
504 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 518 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
505 current_freq = radeon_div(tmp, ref_div * post_div); 519 current_freq = radeon_div(tmp, ref_div * post_div);
506 520
507 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 521 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
508 error = freq - current_freq; 522 error = freq - current_freq;
509 error = error < 0 ? 0xffffffff : error; 523 error = error < 0 ? 0xffffffff : error;
510 } else 524 } else
@@ -531,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
531 best_freq = current_freq; 545 best_freq = current_freq;
532 best_error = error; 546 best_error = error;
533 best_vco_diff = vco_diff; 547 best_vco_diff = vco_diff;
534 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 548 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
535 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 549 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
536 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 550 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
537 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 551 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
538 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 552 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
539 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 553 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
540 best_post_div = post_div; 554 best_post_div = post_div;
541 best_ref_div = ref_div; 555 best_ref_div = ref_div;
542 best_feedback_div = feedback_div; 556 best_feedback_div = feedback_div;
@@ -572,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
572 uint32_t *fb_div_p, 586 uint32_t *fb_div_p,
573 uint32_t *frac_fb_div_p, 587 uint32_t *frac_fb_div_p,
574 uint32_t *ref_div_p, 588 uint32_t *ref_div_p,
575 uint32_t *post_div_p, 589 uint32_t *post_div_p)
576 int flags)
577{ 590{
578 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
579 fixed20_12 pll_out_max, pll_out_min; 592 fixed20_12 pll_out_max, pll_out_min;
@@ -667,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
667 radeonfb_remove(dev, fb); 680 radeonfb_remove(dev, fb);
668 681
669 if (radeon_fb->obj) { 682 if (radeon_fb->obj) {
670 radeon_gem_object_unpin(radeon_fb->obj);
671 mutex_lock(&dev->struct_mutex); 683 mutex_lock(&dev->struct_mutex);
672 drm_gem_object_unreference(radeon_fb->obj); 684 drm_gem_object_unreference(radeon_fb->obj);
673 mutex_unlock(&dev->struct_mutex); 685 mutex_unlock(&dev->struct_mutex);
@@ -715,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
715 struct drm_gem_object *obj; 727 struct drm_gem_object *obj;
716 728
717 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 729 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
718 730 if (obj == NULL) {
731 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
732 "can't create framebuffer\n", mode_cmd->handle);
733 return NULL;
734 }
719 return radeon_framebuffer_create(dev, mode_cmd, obj); 735 return radeon_framebuffer_create(dev, mode_cmd, obj);
720} 736}
721 737
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 82eb551970b9..3c91724457ca 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
156 return ret; 156 return ret;
157} 157}
158 158
159static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
160{
161 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
162 switch (radeon_encoder->encoder_id) {
163 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
164 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
165 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
166 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
167 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
168 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
169 case ENCODER_OBJECT_ID_INTERNAL_DDI:
170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
172 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
173 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
174 return true;
175 default:
176 return false;
177 }
178}
159void 179void
160radeon_link_encoder_connector(struct drm_device *dev) 180radeon_link_encoder_connector(struct drm_device *dev)
161{ 181{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
202 222
203 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 223 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
204 radeon_connector = to_radeon_connector(connector); 224 radeon_connector = to_radeon_connector(connector);
205 if (radeon_encoder->devices & radeon_connector->devices) 225 if (radeon_encoder->active_device & radeon_connector->devices)
206 return connector; 226 return connector;
207 } 227 }
208 return NULL; 228 return NULL;
@@ -676,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
676 696
677 memset(&args, 0, sizeof(args)); 697 memset(&args, 0, sizeof(args));
678 698
679 if (ASIC_IS_DCE32(rdev)) { 699 if (dig->dig_encoder)
680 if (dig->dig_block) 700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
681 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 701 else
682 else 702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
683 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 703 num = dig->dig_encoder + 1;
684 num = dig->dig_block + 1;
685 } else {
686 switch (radeon_encoder->encoder_id) {
687 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
688 /* XXX doesn't really matter which dig encoder we pick as long as it's
689 * not already in use
690 */
691 if (dig_connector->linkb)
692 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
693 else
694 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
695 num = 1;
696 break;
697 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
698 /* Only dig2 encoder can drive LVTMA */
699 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
700 num = 2;
701 break;
702 }
703 }
704 704
705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
706 706
@@ -822,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
823 } 823 }
824 if (ASIC_IS_DCE32(rdev)) { 824 if (ASIC_IS_DCE32(rdev)) {
825 if (dig->dig_block) 825 if (dig->dig_encoder == 1)
826 args.v2.acConfig.ucEncoderSel = 1; 826 args.v2.acConfig.ucEncoderSel = 1;
827 if (dig_connector->linkb) 827 if (dig_connector->linkb)
828 args.v2.acConfig.ucLinkSel = 1; 828 args.v2.acConfig.ucLinkSel = 1;
@@ -849,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
849 args.v2.acConfig.fCoherentMode = 1; 849 args.v2.acConfig.fCoherentMode = 1;
850 } 850 }
851 } else { 851 } else {
852
852 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
853 854
855 if (dig->dig_encoder)
856 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
857 else
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
859
854 switch (radeon_encoder->encoder_id) { 860 switch (radeon_encoder->encoder_id) {
855 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
856 /* XXX doesn't really matter which dig encoder we pick as long as it's
857 * not already in use
858 */
859 if (dig_connector->linkb)
860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
861 else
862 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
863 if (rdev->flags & RADEON_IS_IGP) { 862 if (rdev->flags & RADEON_IS_IGP) {
864 if (radeon_encoder->pixel_clock > 165000) { 863 if (radeon_encoder->pixel_clock > 165000) {
865 if (dig_connector->igp_lane_info & 0x3) 864 if (dig_connector->igp_lane_info & 0x3)
@@ -878,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
878 } 877 }
879 } 878 }
880 break; 879 break;
881 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
882 /* Only dig2 encoder can drive LVTMA */
883 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
884 break;
885 } 880 }
886 881
887 if (radeon_encoder->pixel_clock > 165000) 882 if (radeon_encoder->pixel_clock > 165000)
@@ -1046,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1046 union crtc_sourc_param args; 1041 union crtc_sourc_param args;
1047 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1048 uint8_t frev, crev; 1043 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig;
1049 1045
1050 memset(&args, 0, sizeof(args)); 1046 memset(&args, 0, sizeof(args));
1051 1047
@@ -1109,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1105 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1110 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1106 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1111 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1112 if (ASIC_IS_DCE32(rdev)) { 1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1113 if (radeon_crtc->crtc_id) 1109 dig = radeon_encoder->enc_priv;
1114 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1110 if (dig->dig_encoder)
1115 else 1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1116 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1112 else
1117 } else { 1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1118 struct drm_connector *connector;
1119 struct radeon_connector *radeon_connector;
1120 struct radeon_connector_atom_dig *dig_connector;
1121
1122 connector = radeon_get_connector_for_encoder(encoder);
1123 if (!connector)
1124 return;
1125 radeon_connector = to_radeon_connector(connector);
1126 if (!radeon_connector->con_priv)
1127 return;
1128 dig_connector = radeon_connector->con_priv;
1129
1130 /* XXX doesn't really matter which dig encoder we pick as long as it's
1131 * not already in use
1132 */
1133 if (dig_connector->linkb)
1134 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1135 else
1136 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1137 }
1138 break; 1114 break;
1139 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1140 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1141 break; 1117 break;
1142 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1143 /* Only dig2 encoder can drive LVTMA */
1144 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1145 break;
1146 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1118 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1147 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1119 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1148 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; 1120 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1202,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1202 } 1174 }
1203} 1175}
1204 1176
1177static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1178{
1179 struct drm_device *dev = encoder->dev;
1180 struct radeon_device *rdev = dev->dev_private;
1181 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1183 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0;
1186 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id;
1189 }
1190
1191 /* on DCE3 - LVTMA can only be driven by DIGB */
1192 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1193 struct radeon_encoder *radeon_test_encoder;
1194
1195 if (encoder == test_encoder)
1196 continue;
1197
1198 if (!radeon_encoder_is_digital(test_encoder))
1199 continue;
1200
1201 radeon_test_encoder = to_radeon_encoder(test_encoder);
1202 dig = radeon_test_encoder->enc_priv;
1203
1204 if (dig->dig_encoder >= 0)
1205 dig_enc_in_use |= (1 << dig->dig_encoder);
1206 }
1207
1208 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1209 if (dig_enc_in_use & 0x2)
1210 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1211 return 1;
1212 }
1213 if (!(dig_enc_in_use & 1))
1214 return 0;
1215 return 1;
1216}
1217
1205static void 1218static void
1206radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1219radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1207 struct drm_display_mode *mode, 1220 struct drm_display_mode *mode,
@@ -1214,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1214 1227
1215 if (radeon_encoder->active_device & 1228 if (radeon_encoder->active_device &
1216 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1229 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1217 if (radeon_encoder->enc_priv) { 1230 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1218 struct radeon_encoder_atom_dig *dig; 1231 if (dig)
1219 1232 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1220 dig = radeon_encoder->enc_priv;
1221 dig->dig_block = radeon_crtc->crtc_id;
1222 }
1223 } 1233 }
1224 radeon_encoder->pixel_clock = adjusted_mode->clock; 1234 radeon_encoder->pixel_clock = adjusted_mode->clock;
1225 1235
@@ -1379,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1379static void radeon_atom_encoder_disable(struct drm_encoder *encoder) 1389static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1380{ 1390{
1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1391 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1392 struct radeon_encoder_atom_dig *dig;
1382 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1393 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1394
1395 if (radeon_encoder_is_digital(encoder)) {
1396 dig = radeon_encoder->enc_priv;
1397 dig->dig_encoder = -1;
1398 }
1383 radeon_encoder->active_device = 0; 1399 radeon_encoder->active_device = 0;
1384} 1400}
1385 1401
@@ -1436,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1436 1452
1437 /* coherent mode by default */ 1453 /* coherent mode by default */
1438 dig->coherent_mode = true; 1454 dig->coherent_mode = true;
1455 dig->dig_encoder = -1;
1439 1456
1440 return dig; 1457 return dig;
1441} 1458}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 } 308 }
309 robj = gobj->driver_private; 309 robj = gobj->driver_private;
310 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
311 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
339 } 339 }
340} 340}
341 341
342/* properly set crtc bpp when using atombios */
343void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
344{
345 struct drm_device *dev = crtc->dev;
346 struct radeon_device *rdev = dev->dev_private;
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 int format;
349 uint32_t crtc_gen_cntl;
350 uint32_t disp_merge_cntl;
351 uint32_t crtc_pitch;
352
353 switch (crtc->fb->bits_per_pixel) {
354 case 8:
355 format = 2;
356 break;
357 case 15: /* 555 */
358 format = 3;
359 break;
360 case 16: /* 565 */
361 format = 4;
362 break;
363 case 24: /* RGB */
364 format = 5;
365 break;
366 case 32: /* xRGB */
367 format = 6;
368 break;
369 default:
370 return;
371 }
372
373 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
374 ((crtc->fb->bits_per_pixel * 8) - 1)) /
375 (crtc->fb->bits_per_pixel * 8));
376 crtc_pitch |= crtc_pitch << 16;
377
378 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
379
380 switch (radeon_crtc->crtc_id) {
381 case 0:
382 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
383 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
384 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
385
386 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
387 crtc_gen_cntl |= (format << 8);
388 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
389 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
390 break;
391 case 1:
392 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
393 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
394 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
395
396 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
397 crtc_gen_cntl |= (format << 8);
398 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
399 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
400 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
401 break;
402 }
403}
404
405int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 342int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
406 struct drm_framebuffer *old_fb) 343 struct drm_framebuffer *old_fb)
407{ 344{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
755 uint32_t post_divider = 0; 692 uint32_t post_divider = 0;
756 uint32_t freq = 0; 693 uint32_t freq = 0;
757 uint8_t pll_gain; 694 uint8_t pll_gain;
758 int pll_flags = RADEON_PLL_LEGACY;
759 bool use_bios_divs = false; 695 bool use_bios_divs = false;
760 /* PLL registers */ 696 /* PLL registers */
761 uint32_t pll_ref_div = 0; 697 uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
789 else 725 else
790 pll = &rdev->clock.p1pll; 726 pll = &rdev->clock.p1pll;
791 727
728 pll->flags = RADEON_PLL_LEGACY;
729
792 if (mode->clock > 200000) /* range limits??? */ 730 if (mode->clock > 200000) /* range limits??? */
793 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
794 else 732 else
795 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 733 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
796 734
797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 735 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
798 if (encoder->crtc == crtc) { 736 if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
804 } 742 }
805 743
806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
807 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 745 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
809 if (!rdev->is_atom_bios) { 747 if (!rdev->is_atom_bios) {
810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
819 } 757 }
820 } 758 }
821 } 759 }
822 pll_flags |= RADEON_PLL_USE_REF_DIV; 760 pll->flags |= RADEON_PLL_USE_REF_DIV;
823 } 761 }
824 } 762 }
825 } 763 }
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
829 if (!use_bios_divs) { 767 if (!use_bios_divs) {
830 radeon_compute_pll(pll, mode->clock, 768 radeon_compute_pll(pll, mode->clock,
831 &freq, &feedback_div, &frac_fb_div, 769 &freq, &feedback_div, &frac_fb_div,
832 &reference_div, &post_divider, 770 &reference_div, &post_divider);
833 pll_flags);
834 771
835 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 772 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
836 if (post_div->divider == post_divider) 773 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 91cb041cb40d..e81b2aeb6a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -125,16 +125,24 @@ struct radeon_tmds_pll {
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12)
128 129
129struct radeon_pll { 130struct radeon_pll {
130 uint16_t reference_freq; 131 /* reference frequency */
131 uint16_t reference_div; 132 uint32_t reference_freq;
133
134 /* fixed dividers */
135 uint32_t reference_div;
136 uint32_t post_div;
137
138 /* pll in/out limits */
132 uint32_t pll_in_min; 139 uint32_t pll_in_min;
133 uint32_t pll_in_max; 140 uint32_t pll_in_max;
134 uint32_t pll_out_min; 141 uint32_t pll_out_min;
135 uint32_t pll_out_max; 142 uint32_t pll_out_max;
136 uint16_t xclk; 143 uint32_t best_vco;
137 144
145 /* divider limits */
138 uint32_t min_ref_div; 146 uint32_t min_ref_div;
139 uint32_t max_ref_div; 147 uint32_t max_ref_div;
140 uint32_t min_post_div; 148 uint32_t min_post_div;
@@ -143,7 +151,12 @@ struct radeon_pll {
143 uint32_t max_feedback_div; 151 uint32_t max_feedback_div;
144 uint32_t min_frac_feedback_div; 152 uint32_t min_frac_feedback_div;
145 uint32_t max_frac_feedback_div; 153 uint32_t max_frac_feedback_div;
146 uint32_t best_vco; 154
155 /* flags for the current clock */
156 uint32_t flags;
157
158 /* pll id */
159 uint32_t id;
147}; 160};
148 161
149struct radeon_i2c_chan { 162struct radeon_i2c_chan {
@@ -286,7 +299,7 @@ struct radeon_atom_ss {
286struct radeon_encoder_atom_dig { 299struct radeon_encoder_atom_dig {
287 /* atom dig */ 300 /* atom dig */
288 bool coherent_mode; 301 bool coherent_mode;
289 int dig_block; 302 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
290 /* atom lvds */ 303 /* atom lvds */
291 uint32_t lvds_misc; 304 uint32_t lvds_misc;
292 uint16_t panel_pwr_delay; 305 uint16_t panel_pwr_delay;
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
417 uint32_t *fb_div_p, 430 uint32_t *fb_div_p,
418 uint32_t *frac_fb_div_p, 431 uint32_t *frac_fb_div_p,
419 uint32_t *ref_div_p, 432 uint32_t *ref_div_p,
420 uint32_t *post_div_p, 433 uint32_t *post_div_p);
421 int flags);
422 434
423extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
424 uint64_t freq, 436 uint64_t freq,
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
426 uint32_t *fb_div_p, 438 uint32_t *fb_div_p,
427 uint32_t *frac_fb_div_p, 439 uint32_t *frac_fb_div_p,
428 uint32_t *ref_div_p, 440 uint32_t *ref_div_p,
429 uint32_t *post_div_p, 441 uint32_t *post_div_p);
430 int flags);
431 442
432extern void radeon_setup_encoder_clones(struct drm_device *dev); 443extern void radeon_setup_encoder_clones(struct drm_device *dev);
433 444
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
453 464
454extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 465extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
455 struct drm_framebuffer *old_fb); 466 struct drm_framebuffer *old_fb);
456extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
457 467
458extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 468extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
459 struct drm_file *file_priv, 469 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4e636de877b2..d72a71bff218 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
220 220
221int radeon_bo_evict_vram(struct radeon_device *rdev) 221int radeon_bo_evict_vram(struct radeon_device *rdev)
222{ 222{
223 if (rdev->flags & RADEON_IS_IGP) { 223 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
224 if (0 && (rdev->flags & RADEON_IS_IGP)) {
224 if (rdev->mc.igp_sideport_enabled == false) 225 if (rdev->mc.igp_sideport_enabled == false)
225 /* Useless to evict on IGP chips */ 226 /* Useless to evict on IGP chips */
226 return 0; 227 return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
910x22b8 SE_TCL_TEX_CYL_WRAP_CTL 910x22b8 SE_TCL_TEX_CYL_WRAP_CTL
920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
930x22c4 SE_TCL_POINT_SPRITE_CNTL 930x22c4 SE_TCL_POINT_SPRITE_CNTL
940x22d0 SE_PVS_CNTL
950x22d4 SE_PVS_CONST_CNTL
940x2648 RE_POINTSIZE 960x2648 RE_POINTSIZE
950x26c0 RE_TOP_LEFT 970x26c0 RE_TOP_LEFT
960x26c4 RE_MISC 980x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
370 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
371 387
372 /* Wait for mc idle */ 388 /* Wait for mc idle */
373 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
374 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
375 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
448 464
449void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
450{ 466{
451 rs400_suspend(rdev);
452 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
453 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
454 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
527 if (r) { 542 if (r) {
528 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
530 rs400_suspend(rdev);
531 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
532 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
533 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
610 610
611void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
612{ 612{
613 rs600_suspend(rdev);
614 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
615 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
616 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
689 if (r) { 688 if (r) {
690 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
692 rs600_suspend(rdev);
693 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
694 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
695 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
676 676
677void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
678{ 678{
679 rs690_suspend(rdev);
680 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
681 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
682 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
756 if (r) { 755 if (r) {
757 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
759 rs690_suspend(rdev);
760 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
761 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
762 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
537 537
538void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
539{ 539{
540 rv515_suspend(rdev);
541 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
542 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
543 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
615 if (r) { 614 if (r) {
616 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
618 rv515_suspend(rdev);
619 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
620 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
621 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
622 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
623 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 rdev->accel_working = false; 623 rdev->accel_working = false;
626 } 624 }
627 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 59c71245fb91..5943d561fd1e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev)
779 fixed20_12 a; 779 fixed20_12 a;
780 u32 tmp; 780 u32 tmp;
781 int chansize, numchan; 781 int chansize, numchan;
782 int r;
783 782
784 /* Get VRAM informations */ 783 /* Get VRAM informations */
785 rdev->mc.vram_is_ddr = true; 784 rdev->mc.vram_is_ddr = true;
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev)
822 rdev->mc.real_vram_size = rdev->mc.aper_size; 821 rdev->mc.real_vram_size = rdev->mc.aper_size;
823 822
824 if (rdev->flags & RADEON_IS_AGP) { 823 if (rdev->flags & RADEON_IS_AGP) {
825 r = radeon_agp_init(rdev);
826 if (r)
827 return r;
828 /* gtt_size is setup by radeon_agp_init */ 824 /* gtt_size is setup by radeon_agp_init */
829 rdev->mc.gtt_location = rdev->mc.agp_base; 825 rdev->mc.gtt_location = rdev->mc.agp_base;
830 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 826 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,26 +887,25 @@ static int rv770_startup(struct radeon_device *rdev)
891 return r; 887 return r;
892 } 888 }
893 rv770_gpu_init(rdev); 889 rv770_gpu_init(rdev);
894 890 r = r600_blit_init(rdev);
895 if (!rdev->r600_blit.shader_obj) { 891 if (r) {
896 r = r600_blit_init(rdev); 892 r600_blit_fini(rdev);
893 rdev->asic->copy = NULL;
894 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
895 }
896 /* pin copy shader into vram */
897 if (rdev->r600_blit.shader_obj) {
898 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
899 if (unlikely(r != 0))
900 return r;
901 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
902 &rdev->r600_blit.shader_gpu_addr);
903 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
897 if (r) { 904 if (r) {
898 DRM_ERROR("radeon: failed blitter (%d).\n", r); 905 DRM_ERROR("failed to pin blit object %d\n", r);
899 return r; 906 return r;
900 } 907 }
901 } 908 }
902
903 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
904 if (unlikely(r != 0))
905 return r;
906 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
907 &rdev->r600_blit.shader_gpu_addr);
908 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
909 if (r) {
910 DRM_ERROR("failed to pin blit object %d\n", r);
911 return r;
912 }
913
914 /* Enable IRQ */ 909 /* Enable IRQ */
915 r = r600_irq_init(rdev); 910 r = r600_irq_init(rdev);
916 if (r) { 911 if (r) {
@@ -972,13 +967,16 @@ int rv770_suspend(struct radeon_device *rdev)
972 /* FIXME: we should wait for ring to be empty */ 967 /* FIXME: we should wait for ring to be empty */
973 r700_cp_stop(rdev); 968 r700_cp_stop(rdev);
974 rdev->cp.ready = false; 969 rdev->cp.ready = false;
970 r600_irq_suspend(rdev);
975 r600_wb_disable(rdev); 971 r600_wb_disable(rdev);
976 rv770_pcie_gart_disable(rdev); 972 rv770_pcie_gart_disable(rdev);
977 /* unpin shaders bo */ 973 /* unpin shaders bo */
978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 974 if (rdev->r600_blit.shader_obj) {
979 if (likely(r == 0)) { 975 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
980 radeon_bo_unpin(rdev->r600_blit.shader_obj); 976 if (likely(r == 0)) {
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 977 radeon_bo_unpin(rdev->r600_blit.shader_obj);
978 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
979 }
982 } 980 }
983 return 0; 981 return 0;
984} 982}
@@ -1037,6 +1035,11 @@ int rv770_init(struct radeon_device *rdev)
1037 r = radeon_fence_driver_init(rdev); 1035 r = radeon_fence_driver_init(rdev);
1038 if (r) 1036 if (r)
1039 return r; 1037 return r;
1038 if (rdev->flags & RADEON_IS_AGP) {
1039 r = radeon_agp_init(rdev);
1040 if (r)
1041 radeon_agp_disable(rdev);
1042 }
1040 r = rv770_mc_init(rdev); 1043 r = rv770_mc_init(rdev);
1041 if (r) 1044 if (r)
1042 return r; 1045 return r;
@@ -1062,22 +1065,25 @@ int rv770_init(struct radeon_device *rdev)
1062 rdev->accel_working = true; 1065 rdev->accel_working = true;
1063 r = rv770_startup(rdev); 1066 r = rv770_startup(rdev);
1064 if (r) { 1067 if (r) {
1065 rv770_suspend(rdev); 1068 dev_err(rdev->dev, "disabling GPU acceleration\n");
1069 r600_cp_fini(rdev);
1066 r600_wb_fini(rdev); 1070 r600_wb_fini(rdev);
1067 radeon_ring_fini(rdev); 1071 r600_irq_fini(rdev);
1072 radeon_irq_kms_fini(rdev);
1068 rv770_pcie_gart_fini(rdev); 1073 rv770_pcie_gart_fini(rdev);
1069 rdev->accel_working = false; 1074 rdev->accel_working = false;
1070 } 1075 }
1071 if (rdev->accel_working) { 1076 if (rdev->accel_working) {
1072 r = radeon_ib_pool_init(rdev); 1077 r = radeon_ib_pool_init(rdev);
1073 if (r) { 1078 if (r) {
1074 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1079 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1075 rdev->accel_working = false;
1076 }
1077 r = r600_ib_test(rdev);
1078 if (r) {
1079 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1080 rdev->accel_working = false; 1080 rdev->accel_working = false;
1081 } else {
1082 r = r600_ib_test(rdev);
1083 if (r) {
1084 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1085 rdev->accel_working = false;
1086 }
1081 } 1087 }
1082 } 1088 }
1083 return 0; 1089 return 0;
@@ -1085,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
1085 1091
1086void rv770_fini(struct radeon_device *rdev) 1092void rv770_fini(struct radeon_device *rdev)
1087{ 1093{
1088 rv770_suspend(rdev);
1089
1090 r600_blit_fini(rdev); 1094 r600_blit_fini(rdev);
1095 r600_cp_fini(rdev);
1096 r600_wb_fini(rdev);
1091 r600_irq_fini(rdev); 1097 r600_irq_fini(rdev);
1092 radeon_irq_kms_fini(rdev); 1098 radeon_irq_kms_fini(rdev);
1093 radeon_ring_fini(rdev);
1094 r600_wb_fini(rdev);
1095 rv770_pcie_gart_fini(rdev); 1099 rv770_pcie_gart_fini(rdev);
1096 radeon_gem_fini(rdev); 1100 radeon_gem_fini(rdev);
1097 radeon_fence_driver_fini(rdev); 1101 radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1019,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1019 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1020{ 1021{
1021 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1022 1029
1023 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1024 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
@@ -1844,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1851 * anyone tries to access a ttm page.
1845 */ 1852 */
1846 1853
1854 if (bo->bdev->driver->swap_notify)
1855 bo->bdev->driver->swap_notify(bo);
1856
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1857 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1858out:
1849 1859
@@ -1864,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1874 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1875 ;
1866} 1876}
1877EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 53{
54 struct ttm_tt *ttm = bo->ttm; 54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 56 int ret;
58 57
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 58 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 61 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 62 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 63 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 64 }
67 65
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 66 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 75
78 *old_mem = *new_mem; 76 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 77 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 78
81 return 0; 79 return 0;
82} 80}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 81EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 217 void *old_iomap;
220 void *new_iomap; 218 void *new_iomap;
221 int ret; 219 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 220 unsigned long i;
224 unsigned long page; 221 unsigned long page;
225 unsigned long add = 0; 222 unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
270 267
271 *old_mem = *new_mem; 268 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 269 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 270
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 271 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 272 ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 533 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem; 534 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret; 535 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj; 536 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL; 537 void *tmp_obj = NULL;
543 538
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
598 593
599 *old_mem = *new_mem; 594 *old_mem = *new_mem;
600 new_mem->mm_node = NULL; 595 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 596
602 return 0; 597 return 0;
603} 598}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 599EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
109 struct drm_hash_item hash; 109 struct drm_hash_item hash;
110 struct list_head head; 110 struct list_head head;
111 struct kref kref; 111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type; 112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile; 114 struct ttm_object_file *tfile;
115}; 115};
116 116
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..e2123af7775a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -198,17 +198,26 @@ EXPORT_SYMBOL(ttm_tt_populate);
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_state)
200{ 200{
201 int ret = 0;
202
201 if (PageHighMem(p)) 203 if (PageHighMem(p))
202 return 0; 204 return 0;
203 205
204 switch (c_state) { 206 if (get_page_memtype(p) != -1) {
205 case tt_cached: 207 /* p isn't in the default caching state, set it to
206 return set_pages_wb(p, 1); 208 * writeback first to free its current memtype. */
207 case tt_wc: 209
208 return set_memory_wc((unsigned long) page_address(p), 1); 210 ret = set_pages_wb(p, 1);
209 default: 211 if (ret)
210 return set_pages_uc(p, 1); 212 return ret;
211 } 213 }
214
215 if (c_state == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached)
218 ret = set_pages_uc(p, 1);
219
220 return ret;
212} 221}
213#else /* CONFIG_X86 */ 222#else /* CONFIG_X86 */
214static inline int ttm_tt_set_page_caching(struct page *p, 223static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 48 .busy_placement = &vram_placement_flags
49}; 49};
50 50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
51struct ttm_placement vmw_vram_ne_placement = { 60struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0, 61 .fpfn = 0,
53 .lpfn = 0, 62 .lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
172 return 0; 181 return 0;
173} 182}
174 183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
175/** 196/**
176 * FIXME: We're using the old vmware polling method to sync. 197 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead. 198 * Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
225 .sync_obj_wait = vmw_sync_obj_wait, 246 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush, 247 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref, 248 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref 249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
229}; 252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..a6e8f687fa64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
147 147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *); 149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
150 152
151static void vmw_print_capabilities(uint32_t capabilities) 153static void vmw_print_capabilities(uint32_t capabilities)
152{ 154{
@@ -207,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
207{ 209{
208 struct vmw_private *dev_priv; 210 struct vmw_private *dev_priv;
209 int ret; 211 int ret;
212 uint32_t svga_id;
210 213
211 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 214 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
212 if (unlikely(dev_priv == NULL)) { 215 if (unlikely(dev_priv == NULL)) {
@@ -217,6 +220,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
217 220
218 dev_priv->dev = dev; 221 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset; 222 dev_priv->vmw_chipset = chipset;
223 dev_priv->last_read_sequence = (uint32_t) -100;
220 mutex_init(&dev_priv->hw_mutex); 224 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex); 225 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock); 226 rwlock_init(&dev_priv->resource_lock);
@@ -236,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
236 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 240 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
237 241
238 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
245 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
246 if (svga_id != SVGA_ID_2) {
247 ret = -ENOSYS;
248 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
249 mutex_unlock(&dev_priv->hw_mutex);
250 goto out_err0;
251 }
252
239 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 253 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
240 254
241 if (dev_priv->capabilities & SVGA_CAP_GMR) { 255 if (dev_priv->capabilities & SVGA_CAP_GMR) {
@@ -351,6 +365,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
351 vmw_fb_init(dev_priv); 365 vmw_fb_init(dev_priv);
352 } 366 }
353 367
368 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
369 register_pm_notifier(&dev_priv->pm_nb);
370
371 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
372
354 return 0; 373 return 0;
355 374
356out_no_device: 375out_no_device:
@@ -385,6 +404,8 @@ static int vmw_driver_unload(struct drm_device *dev)
385 404
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 405 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387 406
407 unregister_pm_notifier(&dev_priv->pm_nb);
408
388 if (!dev_priv->stealth) { 409 if (!dev_priv->stealth) {
389 vmw_fb_close(dev_priv); 410 vmw_fb_close(dev_priv);
390 vmw_kms_close(dev_priv); 411 vmw_kms_close(dev_priv);
@@ -650,6 +671,57 @@ static void vmw_remove(struct pci_dev *pdev)
650 drm_put_dev(dev); 671 drm_put_dev(dev);
651} 672}
652 673
674static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
675 void *ptr)
676{
677 struct vmw_private *dev_priv =
678 container_of(nb, struct vmw_private, pm_nb);
679 struct vmw_master *vmaster = dev_priv->active_master;
680
681 switch (val) {
682 case PM_HIBERNATION_PREPARE:
683 case PM_SUSPEND_PREPARE:
684 ttm_suspend_lock(&vmaster->lock);
685
686 /**
687 * This empties VRAM and unbinds all GMR bindings.
688 * Buffer contents is moved to swappable memory.
689 */
690 ttm_bo_swapout_all(&dev_priv->bdev);
691 break;
692 case PM_POST_HIBERNATION:
693 case PM_POST_SUSPEND:
694 ttm_suspend_unlock(&vmaster->lock);
695 break;
696 case PM_RESTORE_PREPARE:
697 break;
698 case PM_POST_RESTORE:
699 break;
700 default:
701 break;
702 }
703 return 0;
704}
705
706/**
707 * These might not be needed with the virtual SVGA device.
708 */
709
710int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
711{
712 pci_save_state(pdev);
713 pci_disable_device(pdev);
714 pci_set_power_state(pdev, PCI_D3hot);
715 return 0;
716}
717
718int vmw_pci_resume(struct pci_dev *pdev)
719{
720 pci_set_power_state(pdev, PCI_D0);
721 pci_restore_state(pdev);
722 return pci_enable_device(pdev);
723}
724
653static struct drm_driver driver = { 725static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 726 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET, 727 DRIVER_MODESET,
@@ -689,7 +761,9 @@ static struct drm_driver driver = {
689 .name = VMWGFX_DRIVER_NAME, 761 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list, 762 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe, 763 .probe = vmw_probe,
692 .remove = vmw_remove 764 .remove = vmw_remove,
765 .suspend = vmw_pci_suspend,
766 .resume = vmw_pci_resume
693 }, 767 },
694 .name = VMWGFX_DRIVER_NAME, 768 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC, 769 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,16 +32,17 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "vmwgfx_drm.h" 33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h" 34#include "drm_hashtab.h"
35#include "linux/suspend.h"
35#include "ttm/ttm_bo_driver.h" 36#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h" 37#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
40 41
41#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
42#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
43#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
44#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -95,6 +96,8 @@ struct vmw_surface {
95 struct drm_vmw_size *sizes; 96 struct drm_vmw_size *sizes;
96 uint32_t num_sizes; 97 uint32_t num_sizes;
97 98
99 bool scanout;
100
98 /* TODO so far just a extra pointer */ 101 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper; 102 struct vmw_cursor_snooper snooper;
100}; 103};
@@ -110,6 +113,7 @@ struct vmw_fifo_state {
110 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
111 bool using_bounce_buffer; 114 bool using_bounce_buffer;
112 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
113 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
114}; 118};
115 119
@@ -210,7 +214,7 @@ struct vmw_private {
210 * Fencing and IRQs. 214 * Fencing and IRQs.
211 */ 215 */
212 216
213 uint32_t fence_seq; 217 atomic_t fence_seq;
214 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
215 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
216 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
@@ -258,6 +262,7 @@ struct vmw_private {
258 262
259 struct vmw_master *active_master; 263 struct vmw_master *active_master;
260 struct vmw_master fbdev_master; 264 struct vmw_master fbdev_master;
265 struct notifier_block pm_nb;
261}; 266};
262 267
263static inline struct vmw_private *vmw_priv(struct drm_device *dev) 268static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +358,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo); 358 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 359extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo); 360 struct vmw_dma_buffer *bo);
361extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 362extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv); 363 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 364extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -386,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
386 uint32_t *sequence); 392 uint32_t *sequence);
387extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
388extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
389 396
390/** 397/**
391 * TTM glue - vmwgfx_ttm_glue.c 398 * TTM glue - vmwgfx_ttm_glue.c
@@ -401,6 +408,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
401 408
402extern struct ttm_placement vmw_vram_placement; 409extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement; 410extern struct ttm_placement vmw_vram_ne_placement;
411extern struct ttm_placement vmw_vram_sys_placement;
404extern struct ttm_placement vmw_sys_placement; 412extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver; 413extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev); 414extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0; 491 return 0;
492 492
493 /**
494 * Put BO in VRAM, only if there is space.
495 */
496
497 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
498 if (unlikely(ret == -ERESTARTSYS))
499 return ret;
500
501 /**
502 * Otherwise, set it up as GMR.
503 */
504
505 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
506 return 0;
507
493 ret = vmw_gmr_bind(dev_priv, bo); 508 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 509 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret; 510 return ret;
496 511
512 /**
513 * If that failed, try VRAM again, this time evicting
514 * previous contents.
515 */
497 516
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret; 518 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..4f4f6432be8b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
649 if (unlikely(ret != 0)) 649 if (unlikely(ret != 0))
650 goto err_unlock; 650 goto err_unlock;
651 651
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 652 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo); 653 ttm_bo_unreserve(bo);
662err_unlock: 654err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -29,6 +29,25 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false;
40
41 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42 if (hwversion == 0)
43 return false;
44
45 if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46 return false;
47
48 return true;
49}
50
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{ 52{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
55 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
57 76
77 mutex_init(&fifo->fifo_mutex);
58 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
59 79
60 /* 80 /*
@@ -98,8 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 118 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
100 120
101 dev_priv->fence_seq = (uint32_t) -100; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104 123
105 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -265,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
265 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
266 int ret; 285 int ret;
267 286
268 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
269 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
270 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
271 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -333,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
333 } 352 }
334out_err: 353out_err:
335 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
336 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
337 return NULL; 356 return NULL;
338} 357}
339 358
@@ -408,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
408 427
409 } 428 }
410 429
430 down_write(&fifo_state->rwsem);
411 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
412 next_cmd += bytes; 432 next_cmd += bytes;
413 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -419,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
419 if (reserveable) 439 if (reserveable)
420 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
421 mb(); 441 mb();
422 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
423 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
424} 445}
425 446
426int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -433,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
433 454
434 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
435 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
436 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
437 *sequence = dev_priv->fence_seq;
438 up_write(&fifo_state->rwsem);
439 ret = -ENOMEM; 458 ret = -ENOMEM;
440 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
441 false, 3*HZ); 460 false, 3*HZ);
@@ -443,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
443 } 462 }
444 463
445 do { 464 do {
446 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
447 } while (*sequence == 0); 466 } while (*sequence == 0);
448 467
449 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5fa6a4ed238a..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
43 param->value = vmw_overlay_num_free_overlays(dev_priv); 43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break; 44 break;
45 case DRM_VMW_PARAM_3D: 45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; 46 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
47 break; 47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
553 } *cmd; 553 } *cmd;
554 int i, increment = 1; 554 int i, increment = 1;
555 555
556 if (!num_clips || 556 if (!num_clips) {
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1; 557 num_clips = 1;
560 clips = &norect; 558 clips = &norect;
561 norect.x1 = norect.y1 = 0; 559 norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
574 572
575 for (i = 0; i < num_clips; i++, clips += increment) { 573 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 575 cmd[i].body.x = cpu_to_le32(clips->x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 576 cmd[i].body.y = cpu_to_le32(clips->y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
581 } 579 }
582 580
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
@@ -709,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
709 if (ret) 707 if (ret)
710 goto try_dmabuf; 708 goto try_dmabuf;
711 709
710 if (!surface->scanout)
711 goto err_not_scanout;
712
712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 713 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
713 mode_cmd->width, mode_cmd->height); 714 mode_cmd->width, mode_cmd->height);
714 715
@@ -742,6 +743,13 @@ try_dmabuf:
742 } 743 }
743 744
744 return &vfb->base; 745 return &vfb->base;
746
747err_not_scanout:
748 DRM_ERROR("surface not marked as scanout\n");
749 /* vmw_user_surface_lookup takes one ref */
750 vmw_surface_unreference(&surface);
751
752 return NULL;
745} 753}
746 754
747static int vmw_kms_fb_changed(struct drm_device *dev) 755static int vmw_kms_fb_changed(struct drm_device *dev)
@@ -761,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
761 769
762 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
763 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
764 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
765 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
766 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
767 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
768 776
769 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
770 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
104 bool pin, bool interruptible) 104 bool pin, bool interruptible)
105{ 105{
106 struct ttm_buffer_object *bo = &buf->base; 106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret; 108 int ret;
110 109
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
116 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
117 goto err; 116 goto err;
118 117
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin) 118 if (pin)
128 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
129 120
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
574 574
575 srf->flags = req->flags; 575 srf->flags = req->flags;
576 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0; 579 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,6 +600,26 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
600 goto out_err1; 601 goto out_err1;
601 602
603 if (srf->scanout &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
602 user_srf->base.shareable = false; 623 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL; 624 user_srf->base.tfile = NULL;
604 625
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
622 return ret; 643 return ret;
623 } 644 }
624 645
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key; 646 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID) 647 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n"); 648 DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 return bo_user_size + page_array_size; 757 return bo_user_size + page_array_size;
755} 758}
756 759
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
758{ 761{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob; 763 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv = 764 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev); 765 container_of(bo->bdev, struct vmw_private, bdev);
763 766
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) { 767 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock); 769 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock); 771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
770 } 773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
771 kfree(vmw_bo); 783 kfree(vmw_bo);
772} 784}
773 785
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{ 826{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob; 828 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820 829
830 vmw_dmabuf_gmr_unbind(bo);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo); 832 kfree(vmw_user_bo);
829} 833}
830 834
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
868 } 872 }
869 873
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true, 875 &vmw_vram_sys_placement, true,
872 &vmw_user_dmabuf_destroy); 876 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0)) 877 if (unlikely(ret != 0))
874 return ret; 878 return ret;