aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c86
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c79
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c52
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c130
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c45
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c102
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c56
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c47
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c188
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c92
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c30
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h14
-rw-r--r--drivers/gpu/drm/radeon/ni.c112
-rw-r--r--drivers/gpu/drm/radeon/nid.h3
-rw-r--r--drivers/gpu/drm/radeon/r600.c105
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv770.c74
-rw-r--r--drivers/gpu/drm/radeon/si.c84
-rw-r--r--drivers/gpu/drm/radeon/sid.h18
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c25
-rw-r--r--drivers/gpu/drm/tegra/host1x.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c15
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c17
138 files changed, 2111 insertions, 1496 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 31123b6a0be5..2d2c2f8d6dc6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
60 60
61MODULE_DEVICE_TABLE(pci, pciidlist); 61MODULE_DEVICE_TABLE(pci, pciidlist);
62 62
63static int __devinit 63static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65{ 64{
66 return drm_get_pci_dev(pdev, ent, &driver); 65 return drm_get_pci_dev(pdev, ent, &driver);
67} 66}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dcd1a8c029eb..8ecb601152ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -56,8 +56,8 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
56 return 0; 56 return 0;
57} 57}
58 58
59static int __devinit 59static int cirrus_pci_probe(struct pci_dev *pdev,
60cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 60 const struct pci_device_id *ent)
61{ 61{
62 int ret; 62 int ret;
63 63
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
184 * -ENOSPC if no suitable free area is available. The preallocated memory node 184 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 * must be cleared. 185 * must be cleared.
186 */ 186 */
187int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, 187int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
188 unsigned long size, unsigned alignment) 188 unsigned long size, unsigned alignment,
189 unsigned long color)
189{ 190{
190 struct drm_mm_node *hole_node; 191 struct drm_mm_node *hole_node;
191 192
192 hole_node = drm_mm_search_free(mm, size, alignment, false); 193 hole_node = drm_mm_search_free_generic(mm, size, alignment,
194 color, 0);
193 if (!hole_node) 195 if (!hole_node)
194 return -ENOSPC; 196 return -ENOSPC;
195 197
196 drm_mm_insert_helper(hole_node, node, size, alignment, 0); 198 drm_mm_insert_helper(hole_node, node, size, alignment, color);
197
198 return 0; 199 return 0;
199} 200}
201EXPORT_SYMBOL(drm_mm_insert_node_generic);
202
203int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
204 unsigned long size, unsigned alignment)
205{
206 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
207}
200EXPORT_SYMBOL(drm_mm_insert_node); 208EXPORT_SYMBOL(drm_mm_insert_node);
201 209
202static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 210static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -213,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
213 221
214 BUG_ON(!hole_node->hole_follows || node->allocated); 222 BUG_ON(!hole_node->hole_follows || node->allocated);
215 223
216 if (mm->color_adjust)
217 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
218
219 if (adj_start < start) 224 if (adj_start < start)
220 adj_start = start; 225 adj_start = start;
226 if (adj_end > end)
227 adj_end = end;
228
229 if (mm->color_adjust)
230 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
221 231
222 if (alignment) { 232 if (alignment) {
223 unsigned tmp = adj_start % alignment; 233 unsigned tmp = adj_start % alignment;
@@ -275,22 +285,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
275 * -ENOSPC if no suitable free area is available. This is for range 285 * -ENOSPC if no suitable free area is available. This is for range
276 * restricted allocations. The preallocated memory node must be cleared. 286 * restricted allocations. The preallocated memory node must be cleared.
277 */ 287 */
278int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, 288int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
279 unsigned long size, unsigned alignment, 289 unsigned long size, unsigned alignment, unsigned long color,
280 unsigned long start, unsigned long end) 290 unsigned long start, unsigned long end)
281{ 291{
282 struct drm_mm_node *hole_node; 292 struct drm_mm_node *hole_node;
283 293
284 hole_node = drm_mm_search_free_in_range(mm, size, alignment, 294 hole_node = drm_mm_search_free_in_range_generic(mm,
285 start, end, false); 295 size, alignment, color,
296 start, end, 0);
286 if (!hole_node) 297 if (!hole_node)
287 return -ENOSPC; 298 return -ENOSPC;
288 299
289 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, 300 drm_mm_insert_helper_range(hole_node, node,
301 size, alignment, color,
290 start, end); 302 start, end);
291
292 return 0; 303 return 0;
293} 304}
305EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
306
307int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
308 unsigned long size, unsigned alignment,
309 unsigned long start, unsigned long end)
310{
311 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
312}
294EXPORT_SYMBOL(drm_mm_insert_node_in_range); 313EXPORT_SYMBOL(drm_mm_insert_node_in_range);
295 314
296/** 315/**
@@ -489,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
489 mm->scan_size = size; 508 mm->scan_size = size;
490 mm->scanned_blocks = 0; 509 mm->scanned_blocks = 0;
491 mm->scan_hit_start = 0; 510 mm->scan_hit_start = 0;
492 mm->scan_hit_size = 0; 511 mm->scan_hit_end = 0;
493 mm->scan_check_range = 0; 512 mm->scan_check_range = 0;
494 mm->prev_scanned_node = NULL; 513 mm->prev_scanned_node = NULL;
495} 514}
@@ -516,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
516 mm->scan_size = size; 535 mm->scan_size = size;
517 mm->scanned_blocks = 0; 536 mm->scanned_blocks = 0;
518 mm->scan_hit_start = 0; 537 mm->scan_hit_start = 0;
519 mm->scan_hit_size = 0; 538 mm->scan_hit_end = 0;
520 mm->scan_start = start; 539 mm->scan_start = start;
521 mm->scan_end = end; 540 mm->scan_end = end;
522 mm->scan_check_range = 1; 541 mm->scan_check_range = 1;
@@ -535,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
535 struct drm_mm *mm = node->mm; 554 struct drm_mm *mm = node->mm;
536 struct drm_mm_node *prev_node; 555 struct drm_mm_node *prev_node;
537 unsigned long hole_start, hole_end; 556 unsigned long hole_start, hole_end;
538 unsigned long adj_start; 557 unsigned long adj_start, adj_end;
539 unsigned long adj_end;
540 558
541 mm->scanned_blocks++; 559 mm->scanned_blocks++;
542 560
@@ -553,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
553 node->node_list.next = &mm->prev_scanned_node->node_list; 571 node->node_list.next = &mm->prev_scanned_node->node_list;
554 mm->prev_scanned_node = node; 572 mm->prev_scanned_node = node;
555 573
556 hole_start = drm_mm_hole_node_start(prev_node); 574 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
557 hole_end = drm_mm_hole_node_end(prev_node); 575 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
558
559 adj_start = hole_start;
560 adj_end = hole_end;
561
562 if (mm->color_adjust)
563 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
564 576
565 if (mm->scan_check_range) { 577 if (mm->scan_check_range) {
566 if (adj_start < mm->scan_start) 578 if (adj_start < mm->scan_start)
@@ -569,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
569 adj_end = mm->scan_end; 581 adj_end = mm->scan_end;
570 } 582 }
571 583
584 if (mm->color_adjust)
585 mm->color_adjust(prev_node, mm->scan_color,
586 &adj_start, &adj_end);
587
572 if (check_free_hole(adj_start, adj_end, 588 if (check_free_hole(adj_start, adj_end,
573 mm->scan_size, mm->scan_alignment)) { 589 mm->scan_size, mm->scan_alignment)) {
574 mm->scan_hit_start = hole_start; 590 mm->scan_hit_start = hole_start;
575 mm->scan_hit_size = hole_end; 591 mm->scan_hit_end = hole_end;
576
577 return 1; 592 return 1;
578 } 593 }
579 594
@@ -609,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
609 node_list); 624 node_list);
610 625
611 prev_node->hole_follows = node->scanned_preceeds_hole; 626 prev_node->hole_follows = node->scanned_preceeds_hole;
612 INIT_LIST_HEAD(&node->node_list);
613 list_add(&node->node_list, &prev_node->node_list); 627 list_add(&node->node_list, &prev_node->node_list);
614 628
615 /* Only need to check for containement because start&size for the 629 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
616 * complete resulting free block (not just the desired part) is 630 node->start < mm->scan_hit_end);
617 * stored. */
618 if (node->start >= mm->scan_hit_start &&
619 node->start + node->size
620 <= mm->scan_hit_start + mm->scan_hit_size) {
621 return 1;
622 }
623
624 return 0;
625} 631}
626EXPORT_SYMBOL(drm_mm_scan_remove_block); 632EXPORT_SYMBOL(drm_mm_scan_remove_block);
627 633
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index bef43e0342a6..4e9b5ba8edff 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -66,6 +66,6 @@ struct i2c_driver ddc_driver = {
66 }, 66 },
67 .id_table = ddc_idtable, 67 .id_table = ddc_idtable,
68 .probe = s5p_ddc_probe, 68 .probe = s5p_ddc_probe,
69 .remove = __devexit_p(s5p_ddc_remove), 69 .remove = s5p_ddc_remove,
70 .command = NULL, 70 .command = NULL,
71}; 71};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 9601bad47a2e..57affae9568b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
@@ -29,6 +15,7 @@
29#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h" 16#include "exynos_drm_gem.h"
31#include "exynos_drm_buf.h" 17#include "exynos_drm_buf.h"
18#include "exynos_drm_iommu.h"
32 19
33static int lowlevel_buffer_allocate(struct drm_device *dev, 20static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 21 unsigned int flags, struct exynos_drm_gem_buf *buf)
@@ -51,7 +38,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
51 * region will be allocated else physically contiguous 38 * region will be allocated else physically contiguous
52 * as possible. 39 * as possible.
53 */ 40 */
54 if (flags & EXYNOS_BO_CONTIG) 41 if (!(flags & EXYNOS_BO_NONCONTIG))
55 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs); 42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
56 43
57 /* 44 /*
@@ -66,14 +53,45 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
66 dma_set_attr(attr, &buf->dma_attrs); 53 dma_set_attr(attr, &buf->dma_attrs);
67 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); 54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
68 55
69 buf->pages = dma_alloc_attrs(dev->dev, buf->size, 56 nr_pages = buf->size >> PAGE_SHIFT;
70 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); 57
71 if (!buf->pages) { 58 if (!is_drm_iommu_supported(dev)) {
72 DRM_ERROR("failed to allocate buffer.\n"); 59 dma_addr_t start_addr;
73 return -ENOMEM; 60 unsigned int i = 0;
61
62 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
63 GFP_KERNEL);
64 if (!buf->pages) {
65 DRM_ERROR("failed to allocate pages.\n");
66 return -ENOMEM;
67 }
68
69 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
70 &buf->dma_addr, GFP_KERNEL,
71 &buf->dma_attrs);
72 if (!buf->kvaddr) {
73 DRM_ERROR("failed to allocate buffer.\n");
74 kfree(buf->pages);
75 return -ENOMEM;
76 }
77
78 start_addr = buf->dma_addr;
79 while (i < nr_pages) {
80 buf->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
82 i++;
83 }
84 } else {
85
86 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
87 &buf->dma_addr, GFP_KERNEL,
88 &buf->dma_attrs);
89 if (!buf->pages) {
90 DRM_ERROR("failed to allocate buffer.\n");
91 return -ENOMEM;
92 }
74 } 93 }
75 94
76 nr_pages = buf->size >> PAGE_SHIFT;
77 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); 95 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
78 if (!buf->sgt) { 96 if (!buf->sgt) {
79 DRM_ERROR("failed to get sg table.\n"); 97 DRM_ERROR("failed to get sg table.\n");
@@ -92,6 +110,9 @@ err_free_attrs:
92 (dma_addr_t)buf->dma_addr, &buf->dma_attrs); 110 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
93 buf->dma_addr = (dma_addr_t)NULL; 111 buf->dma_addr = (dma_addr_t)NULL;
94 112
113 if (!is_drm_iommu_supported(dev))
114 kfree(buf->pages);
115
95 return ret; 116 return ret;
96} 117}
97 118
@@ -114,8 +135,14 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
114 kfree(buf->sgt); 135 kfree(buf->sgt);
115 buf->sgt = NULL; 136 buf->sgt = NULL;
116 137
117 dma_free_attrs(dev->dev, buf->size, buf->pages, 138 if (!is_drm_iommu_supported(dev)) {
139 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
118 (dma_addr_t)buf->dma_addr, &buf->dma_attrs); 140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141 kfree(buf->pages);
142 } else
143 dma_free_attrs(dev->dev, buf->size, buf->pages,
144 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
145
119 buf->dma_addr = (dma_addr_t)NULL; 146 buf->dma_addr = (dma_addr_t)NULL;
120} 147}
121 148
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 25cf16285033..a6412f19673c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_BUF_H_ 12#ifndef _EXYNOS_DRM_BUF_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 0f68a2872673..ab37437bad8a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#include <drm/drmP.h> 14#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 22f6cc442c3d..547c6b590357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_CONNECTOR_H_ 14#ifndef _EXYNOS_DRM_CONNECTOR_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 94026ad76a77..4667c9f67acd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 2efa4b031d73..e8894bc9e6d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
@@ -407,3 +393,33 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
407 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 393 exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
408 exynos_drm_disable_vblank); 394 exynos_drm_disable_vblank);
409} 395}
396
397void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
398{
399 struct exynos_drm_private *dev_priv = dev->dev_private;
400 struct drm_pending_vblank_event *e, *t;
401 struct timeval now;
402 unsigned long flags;
403
404 DRM_DEBUG_KMS("%s\n", __FILE__);
405
406 spin_lock_irqsave(&dev->event_lock, flags);
407
408 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
409 base.link) {
410 /* if event's pipe isn't same as crtc then ignore it. */
411 if (crtc != e->pipe)
412 continue;
413
414 do_gettimeofday(&now);
415 e->event.sequence = 0;
416 e->event.tv_sec = now.tv_sec;
417 e->event.tv_usec = now.tv_usec;
418
419 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
420 wake_up_interruptible(&e->base.file_priv->event_wait);
421 drm_vblank_put(dev, crtc);
422 }
423
424 spin_unlock_irqrestore(&dev->event_lock, flags);
425}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6bae8d8c250e..3e197e6ae7d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_CRTC_H_ 15#ifndef _EXYNOS_DRM_CRTC_H_
@@ -32,5 +18,6 @@
32int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); 18int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
33int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 19int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
34void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 20void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
21void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
35 22
36#endif 23#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 61d5a8402eb8..9df97714b6c0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
@@ -222,7 +208,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 208 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 209
224 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, 210 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
225 exynos_gem_obj->base.size, 0600); 211 exynos_gem_obj->base.size, flags);
226} 212}
227 213
228struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, 214struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
@@ -246,7 +232,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
246 232
247 /* is it from our device? */ 233 /* is it from our device? */
248 if (obj->dev == drm_dev) { 234 if (obj->dev == drm_dev) {
235 /*
236 * Importing dmabuf exported from out own gem increases
237 * refcount on gem itself instead of f_count of dmabuf.
238 */
249 drm_gem_object_reference(obj); 239 drm_gem_object_reference(obj);
240 dma_buf_put(dma_buf);
250 return obj; 241 return obj;
251 } 242 }
252 } 243 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
index 662a8f98ccdb..49acfafb4fdb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_DMABUF_H_ 12#ifndef _EXYNOS_DRM_DMABUF_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e0a8e8024b01..3da5c2d214d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#include <drm/drmP.h> 14#include <drm/drmP.h>
@@ -325,7 +311,7 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
325 311
326static struct platform_driver exynos_drm_platform_driver = { 312static struct platform_driver exynos_drm_platform_driver = {
327 .probe = exynos_drm_platform_probe, 313 .probe = exynos_drm_platform_probe,
328 .remove = __devexit_p(exynos_drm_platform_remove), 314 .remove = exynos_drm_platform_remove,
329 .driver = { 315 .driver = {
330 .owner = THIS_MODULE, 316 .owner = THIS_MODULE,
331 .name = "exynos-drm", 317 .name = "exynos-drm",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f5a97745bf93..b9e51bc09e81 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_DRV_H_ 15#ifndef _EXYNOS_DRM_DRV_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 301485215a70..c63721f64aec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 88bb25a2a917..89e2fb0770af 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_ENCODER_H_ 14#ifndef _EXYNOS_DRM_ENCODER_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 5426cc5a5e8d..294c0513f587 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 96262e54f76d..517471b37566 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_FB_H_ 14#ifndef _EXYNOS_DRM_FB_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f433eb7533a9..71f867340a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
@@ -34,6 +20,7 @@
34#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 21#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 22#include "exynos_drm_gem.h"
23#include "exynos_drm_iommu.h"
37 24
38#define MAX_CONNECTOR 4 25#define MAX_CONNECTOR 4
39#define PREFERRED_BPP 32 26#define PREFERRED_BPP 32
@@ -111,9 +98,18 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
111 98
112 /* map pages with kernel virtual space. */ 99 /* map pages with kernel virtual space. */
113 if (!buffer->kvaddr) { 100 if (!buffer->kvaddr) {
114 unsigned int nr_pages = buffer->size >> PAGE_SHIFT; 101 if (is_drm_iommu_supported(dev)) {
115 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, 102 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
103
104 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
116 pgprot_writecombine(PAGE_KERNEL)); 105 pgprot_writecombine(PAGE_KERNEL));
106 } else {
107 phys_addr_t dma_addr = buffer->dma_addr;
108 if (dma_addr)
109 buffer->kvaddr = phys_to_virt(dma_addr);
110 else
111 buffer->kvaddr = (void __iomem *)NULL;
112 }
117 if (!buffer->kvaddr) { 113 if (!buffer->kvaddr) {
118 DRM_ERROR("failed to map pages to kernel space.\n"); 114 DRM_ERROR("failed to map pages to kernel space.\n");
119 return -EIO; 115 return -EIO;
@@ -128,8 +124,12 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
128 124
129 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 125 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
130 fbi->screen_base = buffer->kvaddr + offset; 126 fbi->screen_base = buffer->kvaddr + offset;
131 fbi->fix.smem_start = (unsigned long) 127 if (is_drm_iommu_supported(dev))
128 fbi->fix.smem_start = (unsigned long)
132 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset); 129 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
130 else
131 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
132
133 fbi->screen_size = size; 133 fbi->screen_size = size;
134 fbi->fix.smem_len = size; 134 fbi->fix.smem_len = size;
135 135
@@ -320,7 +320,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; 320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
321 struct drm_framebuffer *fb; 321 struct drm_framebuffer *fb;
322 322
323 if (exynos_gem_obj->buffer->kvaddr) 323 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
324 vunmap(exynos_gem_obj->buffer->kvaddr); 324 vunmap(exynos_gem_obj->buffer->kvaddr);
325 325
326 /* release drm framebuffer and real buffer */ 326 /* release drm framebuffer and real buffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index ccfce8a1a451..e16d7f0ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_FBDEV_H_ 15#ifndef _EXYNOS_DRM_FBDEV_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61ea24296b52..67a83e69544b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,7 @@
25#include "exynos_drm_fimc.h" 25#include "exynos_drm_fimc.h"
26 26
27/* 27/*
28 * FIMC is stand for Fully Interactive Mobile Camera and 28 * FIMC stands for Fully Interactive Mobile Camera and
29 * supports image scaler/rotator and input/output DMA operations. 29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory. 30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory. 31 * output DMA writes image data to memory.
@@ -163,19 +163,29 @@ struct fimc_context {
163 bool suspended; 163 bool suspended;
164}; 164};
165 165
166static void fimc_sw_reset(struct fimc_context *ctx, bool pattern) 166static void fimc_sw_reset(struct fimc_context *ctx)
167{ 167{
168 u32 cfg; 168 u32 cfg;
169 169
170 DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern); 170 DRM_DEBUG_KMS("%s\n", __func__);
171
172 /* stop dma operation */
173 cfg = fimc_read(EXYNOS_CISTATUS);
174 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
175 cfg = fimc_read(EXYNOS_MSCTRL);
176 cfg &= ~EXYNOS_MSCTRL_ENVID;
177 fimc_write(cfg, EXYNOS_MSCTRL);
178 }
171 179
172 cfg = fimc_read(EXYNOS_CISRCFMT); 180 cfg = fimc_read(EXYNOS_CISRCFMT);
173 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT; 181 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
174 if (pattern)
175 cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
176
177 fimc_write(cfg, EXYNOS_CISRCFMT); 182 fimc_write(cfg, EXYNOS_CISRCFMT);
178 183
184 /* disable image capture */
185 cfg = fimc_read(EXYNOS_CIIMGCPT);
186 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
187 fimc_write(cfg, EXYNOS_CIIMGCPT);
188
179 /* s/w reset */ 189 /* s/w reset */
180 cfg = fimc_read(EXYNOS_CIGCTRL); 190 cfg = fimc_read(EXYNOS_CIGCTRL);
181 cfg |= (EXYNOS_CIGCTRL_SWRST); 191 cfg |= (EXYNOS_CIGCTRL_SWRST);
@@ -695,7 +705,7 @@ static int fimc_src_set_addr(struct device *dev,
695{ 705{
696 struct fimc_context *ctx = get_fimc_context(dev); 706 struct fimc_context *ctx = get_fimc_context(dev);
697 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 707 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
698 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 708 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
699 struct drm_exynos_ipp_property *property; 709 struct drm_exynos_ipp_property *property;
700 struct drm_exynos_ipp_config *config; 710 struct drm_exynos_ipp_config *config;
701 711
@@ -705,10 +715,6 @@ static int fimc_src_set_addr(struct device *dev,
705 } 715 }
706 716
707 property = &c_node->property; 717 property = &c_node->property;
708 if (!property) {
709 DRM_ERROR("failed to get property.\n");
710 return -EINVAL;
711 }
712 718
713 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 719 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
714 property->prop_id, buf_id, buf_type); 720 property->prop_id, buf_id, buf_type);
@@ -1206,7 +1212,7 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1206 } 1212 }
1207 1213
1208 /* sequence id */ 1214 /* sequence id */
1209 cfg &= (~mask); 1215 cfg &= ~mask;
1210 cfg |= (enable << buf_id); 1216 cfg |= (enable << buf_id);
1211 fimc_write(cfg, EXYNOS_CIFCNTSEQ); 1217 fimc_write(cfg, EXYNOS_CIFCNTSEQ);
1212 1218
@@ -1231,7 +1237,7 @@ static int fimc_dst_set_addr(struct device *dev,
1231{ 1237{
1232 struct fimc_context *ctx = get_fimc_context(dev); 1238 struct fimc_context *ctx = get_fimc_context(dev);
1233 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1239 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1234 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1240 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1235 struct drm_exynos_ipp_property *property; 1241 struct drm_exynos_ipp_property *property;
1236 struct drm_exynos_ipp_config *config; 1242 struct drm_exynos_ipp_config *config;
1237 1243
@@ -1241,10 +1247,6 @@ static int fimc_dst_set_addr(struct device *dev,
1241 } 1247 }
1242 1248
1243 property = &c_node->property; 1249 property = &c_node->property;
1244 if (!property) {
1245 DRM_ERROR("failed to get property.\n");
1246 return -EINVAL;
1247 }
1248 1250
1249 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 1251 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1250 property->prop_id, buf_id, buf_type); 1252 property->prop_id, buf_id, buf_type);
@@ -1317,7 +1319,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1317{ 1319{
1318 struct fimc_context *ctx = dev_id; 1320 struct fimc_context *ctx = dev_id;
1319 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1321 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1320 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1322 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1321 struct drm_exynos_ipp_event_work *event_work = 1323 struct drm_exynos_ipp_event_work *event_work =
1322 c_node->event_work; 1324 c_node->event_work;
1323 int buf_id; 1325 int buf_id;
@@ -1395,6 +1397,7 @@ static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1395 case EXYNOS_DRM_FLIP_NONE: 1397 case EXYNOS_DRM_FLIP_NONE:
1396 case EXYNOS_DRM_FLIP_VERTICAL: 1398 case EXYNOS_DRM_FLIP_VERTICAL:
1397 case EXYNOS_DRM_FLIP_HORIZONTAL: 1399 case EXYNOS_DRM_FLIP_HORIZONTAL:
1400 case EXYNOS_DRM_FLIP_BOTH:
1398 return true; 1401 return true;
1399 default: 1402 default:
1400 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 1403 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
@@ -1543,7 +1546,7 @@ static int fimc_ippdrv_reset(struct device *dev)
1543 DRM_DEBUG_KMS("%s\n", __func__); 1546 DRM_DEBUG_KMS("%s\n", __func__);
1544 1547
1545 /* reset h/w block */ 1548 /* reset h/w block */
1546 fimc_sw_reset(ctx, false); 1549 fimc_sw_reset(ctx);
1547 1550
1548 /* reset scaler capability */ 1551 /* reset scaler capability */
1549 memset(&ctx->sc, 0x0, sizeof(ctx->sc)); 1552 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
@@ -1557,7 +1560,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1557{ 1560{
1558 struct fimc_context *ctx = get_fimc_context(dev); 1561 struct fimc_context *ctx = get_fimc_context(dev);
1559 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1562 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1560 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1563 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1561 struct drm_exynos_ipp_property *property; 1564 struct drm_exynos_ipp_property *property;
1562 struct drm_exynos_ipp_config *config; 1565 struct drm_exynos_ipp_config *config;
1563 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; 1566 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
@@ -1573,10 +1576,6 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1573 } 1576 }
1574 1577
1575 property = &c_node->property; 1578 property = &c_node->property;
1576 if (!property) {
1577 DRM_ERROR("failed to get property.\n");
1578 return -EINVAL;
1579 }
1580 1579
1581 fimc_handle_irq(ctx, true, false, true); 1580 fimc_handle_irq(ctx, true, false, true);
1582 1581
@@ -1714,7 +1713,7 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1714 fimc_write(cfg, EXYNOS_CIGCTRL); 1713 fimc_write(cfg, EXYNOS_CIGCTRL);
1715} 1714}
1716 1715
1717static int __devinit fimc_probe(struct platform_device *pdev) 1716static int fimc_probe(struct platform_device *pdev)
1718{ 1717{
1719 struct device *dev = &pdev->dev; 1718 struct device *dev = &pdev->dev;
1720 struct fimc_context *ctx; 1719 struct fimc_context *ctx;
@@ -1739,93 +1738,64 @@ static int __devinit fimc_probe(struct platform_device *pdev)
1739 platform_get_device_id(pdev)->driver_data; 1738 platform_get_device_id(pdev)->driver_data;
1740 1739
1741 /* clock control */ 1740 /* clock control */
1742 ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc"); 1741 ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
1743 if (IS_ERR(ctx->sclk_fimc_clk)) { 1742 if (IS_ERR(ctx->sclk_fimc_clk)) {
1744 dev_err(dev, "failed to get src fimc clock.\n"); 1743 dev_err(dev, "failed to get src fimc clock.\n");
1745 ret = PTR_ERR(ctx->sclk_fimc_clk); 1744 return PTR_ERR(ctx->sclk_fimc_clk);
1746 goto err_ctx;
1747 } 1745 }
1748 clk_enable(ctx->sclk_fimc_clk); 1746 clk_enable(ctx->sclk_fimc_clk);
1749 1747
1750 ctx->fimc_clk = clk_get(dev, "fimc"); 1748 ctx->fimc_clk = devm_clk_get(dev, "fimc");
1751 if (IS_ERR(ctx->fimc_clk)) { 1749 if (IS_ERR(ctx->fimc_clk)) {
1752 dev_err(dev, "failed to get fimc clock.\n"); 1750 dev_err(dev, "failed to get fimc clock.\n");
1753 ret = PTR_ERR(ctx->fimc_clk);
1754 clk_disable(ctx->sclk_fimc_clk); 1751 clk_disable(ctx->sclk_fimc_clk);
1755 clk_put(ctx->sclk_fimc_clk); 1752 return PTR_ERR(ctx->fimc_clk);
1756 goto err_ctx;
1757 } 1753 }
1758 1754
1759 ctx->wb_clk = clk_get(dev, "pxl_async0"); 1755 ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
1760 if (IS_ERR(ctx->wb_clk)) { 1756 if (IS_ERR(ctx->wb_clk)) {
1761 dev_err(dev, "failed to get writeback a clock.\n"); 1757 dev_err(dev, "failed to get writeback a clock.\n");
1762 ret = PTR_ERR(ctx->wb_clk);
1763 clk_disable(ctx->sclk_fimc_clk); 1758 clk_disable(ctx->sclk_fimc_clk);
1764 clk_put(ctx->sclk_fimc_clk); 1759 return PTR_ERR(ctx->wb_clk);
1765 clk_put(ctx->fimc_clk);
1766 goto err_ctx;
1767 } 1760 }
1768 1761
1769 ctx->wb_b_clk = clk_get(dev, "pxl_async1"); 1762 ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
1770 if (IS_ERR(ctx->wb_b_clk)) { 1763 if (IS_ERR(ctx->wb_b_clk)) {
1771 dev_err(dev, "failed to get writeback b clock.\n"); 1764 dev_err(dev, "failed to get writeback b clock.\n");
1772 ret = PTR_ERR(ctx->wb_b_clk);
1773 clk_disable(ctx->sclk_fimc_clk); 1765 clk_disable(ctx->sclk_fimc_clk);
1774 clk_put(ctx->sclk_fimc_clk); 1766 return PTR_ERR(ctx->wb_b_clk);
1775 clk_put(ctx->fimc_clk);
1776 clk_put(ctx->wb_clk);
1777 goto err_ctx;
1778 } 1767 }
1779 1768
1780 parent_clk = clk_get(dev, ddata->parent_clk); 1769 parent_clk = devm_clk_get(dev, ddata->parent_clk);
1781 1770
1782 if (IS_ERR(parent_clk)) { 1771 if (IS_ERR(parent_clk)) {
1783 dev_err(dev, "failed to get parent clock.\n"); 1772 dev_err(dev, "failed to get parent clock.\n");
1784 ret = PTR_ERR(parent_clk);
1785 clk_disable(ctx->sclk_fimc_clk); 1773 clk_disable(ctx->sclk_fimc_clk);
1786 clk_put(ctx->sclk_fimc_clk); 1774 return PTR_ERR(parent_clk);
1787 clk_put(ctx->fimc_clk);
1788 clk_put(ctx->wb_clk);
1789 clk_put(ctx->wb_b_clk);
1790 goto err_ctx;
1791 } 1775 }
1792 1776
1793 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) { 1777 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
1794 dev_err(dev, "failed to set parent.\n"); 1778 dev_err(dev, "failed to set parent.\n");
1795 ret = -EINVAL;
1796 clk_put(parent_clk);
1797 clk_disable(ctx->sclk_fimc_clk); 1779 clk_disable(ctx->sclk_fimc_clk);
1798 clk_put(ctx->sclk_fimc_clk); 1780 return -EINVAL;
1799 clk_put(ctx->fimc_clk);
1800 clk_put(ctx->wb_clk);
1801 clk_put(ctx->wb_b_clk);
1802 goto err_ctx;
1803 } 1781 }
1804 1782
1805 clk_put(parent_clk); 1783 devm_clk_put(dev, parent_clk);
1806 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate); 1784 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1807 1785
1808 /* resource memory */ 1786 /* resource memory */
1809 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1787 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1810 if (!ctx->regs_res) {
1811 dev_err(dev, "failed to find registers.\n");
1812 ret = -ENOENT;
1813 goto err_clk;
1814 }
1815
1816 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res); 1788 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1817 if (!ctx->regs) { 1789 if (!ctx->regs) {
1818 dev_err(dev, "failed to map registers.\n"); 1790 dev_err(dev, "failed to map registers.\n");
1819 ret = -ENXIO; 1791 return -ENXIO;
1820 goto err_clk;
1821 } 1792 }
1822 1793
1823 /* resource irq */ 1794 /* resource irq */
1824 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1795 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1825 if (!res) { 1796 if (!res) {
1826 dev_err(dev, "failed to request irq resource.\n"); 1797 dev_err(dev, "failed to request irq resource.\n");
1827 ret = -ENOENT; 1798 return -ENOENT;
1828 goto err_get_regs;
1829 } 1799 }
1830 1800
1831 ctx->irq = res->start; 1801 ctx->irq = res->start;
@@ -1833,7 +1803,7 @@ static int __devinit fimc_probe(struct platform_device *pdev)
1833 IRQF_ONESHOT, "drm_fimc", ctx); 1803 IRQF_ONESHOT, "drm_fimc", ctx);
1834 if (ret < 0) { 1804 if (ret < 0) {
1835 dev_err(dev, "failed to request irq.\n"); 1805 dev_err(dev, "failed to request irq.\n");
1836 goto err_get_regs; 1806 return ret;
1837 } 1807 }
1838 1808
1839 /* context initailization */ 1809 /* context initailization */
@@ -1879,19 +1849,11 @@ err_ippdrv_register:
1879 pm_runtime_disable(dev); 1849 pm_runtime_disable(dev);
1880err_get_irq: 1850err_get_irq:
1881 free_irq(ctx->irq, ctx); 1851 free_irq(ctx->irq, ctx);
1882err_get_regs: 1852
1883 devm_iounmap(dev, ctx->regs);
1884err_clk:
1885 clk_put(ctx->sclk_fimc_clk);
1886 clk_put(ctx->fimc_clk);
1887 clk_put(ctx->wb_clk);
1888 clk_put(ctx->wb_b_clk);
1889err_ctx:
1890 devm_kfree(dev, ctx);
1891 return ret; 1853 return ret;
1892} 1854}
1893 1855
1894static int __devexit fimc_remove(struct platform_device *pdev) 1856static int fimc_remove(struct platform_device *pdev)
1895{ 1857{
1896 struct device *dev = &pdev->dev; 1858 struct device *dev = &pdev->dev;
1897 struct fimc_context *ctx = get_fimc_context(dev); 1859 struct fimc_context *ctx = get_fimc_context(dev);
@@ -1905,14 +1867,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
1905 pm_runtime_disable(dev); 1867 pm_runtime_disable(dev);
1906 1868
1907 free_irq(ctx->irq, ctx); 1869 free_irq(ctx->irq, ctx);
1908 devm_iounmap(dev, ctx->regs);
1909
1910 clk_put(ctx->sclk_fimc_clk);
1911 clk_put(ctx->fimc_clk);
1912 clk_put(ctx->wb_clk);
1913 clk_put(ctx->wb_b_clk);
1914
1915 devm_kfree(dev, ctx);
1916 1870
1917 return 0; 1871 return 0;
1918} 1872}
@@ -1990,7 +1944,7 @@ static const struct dev_pm_ops fimc_pm_ops = {
1990 1944
1991struct platform_driver fimc_driver = { 1945struct platform_driver fimc_driver = {
1992 .probe = fimc_probe, 1946 .probe = fimc_probe,
1993 .remove = __devexit_p(fimc_remove), 1947 .remove = fimc_remove,
1994 .id_table = fimc_driver_ids, 1948 .id_table = fimc_driver_ids,
1995 .driver = { 1949 .driver = {
1996 .name = "exynos-drm-fimc", 1950 .name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
index dc970fa0d888..127a424c5fdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -6,24 +6,10 @@
6 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com> 7 * Sangmin Lee <lsmin.lee@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_FIMC_H_ 15#ifndef _EXYNOS_DRM_FIMC_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bf0d9baca2bc..9537761931ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -663,34 +663,6 @@ static struct exynos_drm_manager fimd_manager = {
663 .display_ops = &fimd_display_ops, 663 .display_ops = &fimd_display_ops,
664}; 664};
665 665
666static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
667{
668 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
669 struct drm_pending_vblank_event *e, *t;
670 struct timeval now;
671 unsigned long flags;
672
673 spin_lock_irqsave(&drm_dev->event_lock, flags);
674
675 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
676 base.link) {
677 /* if event's pipe isn't same as crtc then ignore it. */
678 if (crtc != e->pipe)
679 continue;
680
681 do_gettimeofday(&now);
682 e->event.sequence = 0;
683 e->event.tv_sec = now.tv_sec;
684 e->event.tv_usec = now.tv_usec;
685
686 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
687 wake_up_interruptible(&e->base.file_priv->event_wait);
688 drm_vblank_put(drm_dev, crtc);
689 }
690
691 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
692}
693
694static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 666static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
695{ 667{
696 struct fimd_context *ctx = (struct fimd_context *)dev_id; 668 struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -710,7 +682,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
710 goto out; 682 goto out;
711 683
712 drm_handle_vblank(drm_dev, manager->pipe); 684 drm_handle_vblank(drm_dev, manager->pipe);
713 fimd_finish_pageflip(drm_dev, manager->pipe); 685 exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
714 686
715 /* set wait vsync event to zero and wake up queue. */ 687 /* set wait vsync event to zero and wake up queue. */
716 if (atomic_read(&ctx->wait_vsync_event)) { 688 if (atomic_read(&ctx->wait_vsync_event)) {
@@ -898,7 +870,7 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
898 return 0; 870 return 0;
899} 871}
900 872
901static int __devinit fimd_probe(struct platform_device *pdev) 873static int fimd_probe(struct platform_device *pdev)
902{ 874{
903 struct device *dev = &pdev->dev; 875 struct device *dev = &pdev->dev;
904 struct fimd_context *ctx; 876 struct fimd_context *ctx;
@@ -997,7 +969,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
997 return 0; 969 return 0;
998} 970}
999 971
1000static int __devexit fimd_remove(struct platform_device *pdev) 972static int fimd_remove(struct platform_device *pdev)
1001{ 973{
1002 struct device *dev = &pdev->dev; 974 struct device *dev = &pdev->dev;
1003 struct fimd_context *ctx = platform_get_drvdata(pdev); 975 struct fimd_context *ctx = platform_get_drvdata(pdev);
@@ -1046,7 +1018,7 @@ static int fimd_resume(struct device *dev)
1046 * of pm runtime would still be 1 so in this case, fimd driver 1018 * of pm runtime would still be 1 so in this case, fimd driver
1047 * should be on directly not drawing on pm runtime interface. 1019 * should be on directly not drawing on pm runtime interface.
1048 */ 1020 */
1049 if (pm_runtime_suspended(dev)) { 1021 if (!pm_runtime_suspended(dev)) {
1050 int ret; 1022 int ret;
1051 1023
1052 ret = fimd_activate(ctx, true); 1024 ret = fimd_activate(ctx, true);
@@ -1105,7 +1077,7 @@ static const struct dev_pm_ops fimd_pm_ops = {
1105 1077
1106struct platform_driver fimd_driver = { 1078struct platform_driver fimd_driver = {
1107 .probe = fimd_probe, 1079 .probe = fimd_probe,
1108 .remove = __devexit_p(fimd_remove), 1080 .remove = fimd_remove,
1109 .id_table = fimd_driver_ids, 1081 .id_table = fimd_driver_ids,
1110 .driver = { 1082 .driver = {
1111 .name = "exynos4-fb", 1083 .name = "exynos4-fb",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6ffa0763c078..36c3905536a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1090,7 +1090,7 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
1090 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
1091} 1091}
1092 1092
1093static int __devinit g2d_probe(struct platform_device *pdev) 1093static int g2d_probe(struct platform_device *pdev)
1094{ 1094{
1095 struct device *dev = &pdev->dev; 1095 struct device *dev = &pdev->dev;
1096 struct resource *res; 1096 struct resource *res;
@@ -1188,7 +1188,7 @@ err_destroy_slab:
1188 return ret; 1188 return ret;
1189} 1189}
1190 1190
1191static int __devexit g2d_remove(struct platform_device *pdev) 1191static int g2d_remove(struct platform_device *pdev)
1192{ 1192{
1193 struct g2d_data *g2d = platform_get_drvdata(pdev); 1193 struct g2d_data *g2d = platform_get_drvdata(pdev);
1194 1194
@@ -1242,7 +1242,7 @@ static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
1242 1242
1243struct platform_driver g2d_driver = { 1243struct platform_driver g2d_driver = {
1244 .probe = g2d_probe, 1244 .probe = g2d_probe,
1245 .remove = __devexit_p(g2d_remove), 1245 .remove = g2d_remove,
1246 .driver = { 1246 .driver = {
1247 .name = "s5p-g2d", 1247 .name = "s5p-g2d",
1248 .owner = THIS_MODULE, 1248 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d48183e7e056..473180776528 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index f11f2afd5bfc..35ebac47dc2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com> 4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_GEM_H_ 12#ifndef _EXYNOS_DRM_GEM_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5639353d47b9..8140753ec9c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -25,7 +25,7 @@
25#include "exynos_drm_gsc.h" 25#include "exynos_drm_gsc.h"
26 26
27/* 27/*
28 * GSC is stand for General SCaler and 28 * GSC stands for General SCaler and
29 * supports image scaler/rotator and input/output DMA operations. 29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory. 30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory. 31 * output DMA writes image data to memory.
@@ -711,7 +711,7 @@ static int gsc_src_set_addr(struct device *dev,
711{ 711{
712 struct gsc_context *ctx = get_gsc_context(dev); 712 struct gsc_context *ctx = get_gsc_context(dev);
713 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 713 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 714 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
715 struct drm_exynos_ipp_property *property; 715 struct drm_exynos_ipp_property *property;
716 716
717 if (!c_node) { 717 if (!c_node) {
@@ -720,10 +720,6 @@ static int gsc_src_set_addr(struct device *dev,
720 } 720 }
721 721
722 property = &c_node->property; 722 property = &c_node->property;
723 if (!property) {
724 DRM_ERROR("failed to get property.\n");
725 return -EFAULT;
726 }
727 723
728 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 724 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
729 property->prop_id, buf_id, buf_type); 725 property->prop_id, buf_id, buf_type);
@@ -1171,7 +1167,7 @@ static int gsc_dst_set_addr(struct device *dev,
1171{ 1167{
1172 struct gsc_context *ctx = get_gsc_context(dev); 1168 struct gsc_context *ctx = get_gsc_context(dev);
1173 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1169 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1174 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1170 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1175 struct drm_exynos_ipp_property *property; 1171 struct drm_exynos_ipp_property *property;
1176 1172
1177 if (!c_node) { 1173 if (!c_node) {
@@ -1180,10 +1176,6 @@ static int gsc_dst_set_addr(struct device *dev,
1180 } 1176 }
1181 1177
1182 property = &c_node->property; 1178 property = &c_node->property;
1183 if (!property) {
1184 DRM_ERROR("failed to get property.\n");
1185 return -EFAULT;
1186 }
1187 1179
1188 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 1180 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1189 property->prop_id, buf_id, buf_type); 1181 property->prop_id, buf_id, buf_type);
@@ -1312,7 +1304,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1312{ 1304{
1313 struct gsc_context *ctx = dev_id; 1305 struct gsc_context *ctx = dev_id;
1314 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1306 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1315 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1307 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1316 struct drm_exynos_ipp_event_work *event_work = 1308 struct drm_exynos_ipp_event_work *event_work =
1317 c_node->event_work; 1309 c_node->event_work;
1318 u32 status; 1310 u32 status;
@@ -1399,7 +1391,7 @@ static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1399 case EXYNOS_DRM_FLIP_NONE: 1391 case EXYNOS_DRM_FLIP_NONE:
1400 case EXYNOS_DRM_FLIP_VERTICAL: 1392 case EXYNOS_DRM_FLIP_VERTICAL:
1401 case EXYNOS_DRM_FLIP_HORIZONTAL: 1393 case EXYNOS_DRM_FLIP_HORIZONTAL:
1402 case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL: 1394 case EXYNOS_DRM_FLIP_BOTH:
1403 return true; 1395 return true;
1404 default: 1396 default:
1405 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 1397 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
@@ -1549,7 +1541,7 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1549{ 1541{
1550 struct gsc_context *ctx = get_gsc_context(dev); 1542 struct gsc_context *ctx = get_gsc_context(dev);
1551 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1543 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1552 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 1544 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1553 struct drm_exynos_ipp_property *property; 1545 struct drm_exynos_ipp_property *property;
1554 struct drm_exynos_ipp_config *config; 1546 struct drm_exynos_ipp_config *config;
1555 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; 1547 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
@@ -1565,10 +1557,6 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1565 } 1557 }
1566 1558
1567 property = &c_node->property; 1559 property = &c_node->property;
1568 if (!property) {
1569 DRM_ERROR("failed to get property.\n");
1570 return -EINVAL;
1571 }
1572 1560
1573 gsc_handle_irq(ctx, true, false, true); 1561 gsc_handle_irq(ctx, true, false, true);
1574 1562
@@ -1604,7 +1592,7 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1604 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1592 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1605 1593
1606 /* src local path */ 1594 /* src local path */
1607 cfg = readl(GSC_IN_CON); 1595 cfg = gsc_read(GSC_IN_CON);
1608 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); 1596 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB); 1597 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1610 gsc_write(cfg, GSC_IN_CON); 1598 gsc_write(cfg, GSC_IN_CON);
@@ -1683,7 +1671,7 @@ static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1683 gsc_write(cfg, GSC_ENABLE); 1671 gsc_write(cfg, GSC_ENABLE);
1684} 1672}
1685 1673
1686static int __devinit gsc_probe(struct platform_device *pdev) 1674static int gsc_probe(struct platform_device *pdev)
1687{ 1675{
1688 struct device *dev = &pdev->dev; 1676 struct device *dev = &pdev->dev;
1689 struct gsc_context *ctx; 1677 struct gsc_context *ctx;
@@ -1696,34 +1684,25 @@ static int __devinit gsc_probe(struct platform_device *pdev)
1696 return -ENOMEM; 1684 return -ENOMEM;
1697 1685
1698 /* clock control */ 1686 /* clock control */
1699 ctx->gsc_clk = clk_get(dev, "gscl"); 1687 ctx->gsc_clk = devm_clk_get(dev, "gscl");
1700 if (IS_ERR(ctx->gsc_clk)) { 1688 if (IS_ERR(ctx->gsc_clk)) {
1701 dev_err(dev, "failed to get gsc clock.\n"); 1689 dev_err(dev, "failed to get gsc clock.\n");
1702 ret = PTR_ERR(ctx->gsc_clk); 1690 return PTR_ERR(ctx->gsc_clk);
1703 goto err_ctx;
1704 } 1691 }
1705 1692
1706 /* resource memory */ 1693 /* resource memory */
1707 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1694 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1708 if (!ctx->regs_res) {
1709 dev_err(dev, "failed to find registers.\n");
1710 ret = -ENOENT;
1711 goto err_clk;
1712 }
1713
1714 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res); 1695 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1715 if (!ctx->regs) { 1696 if (!ctx->regs) {
1716 dev_err(dev, "failed to map registers.\n"); 1697 dev_err(dev, "failed to map registers.\n");
1717 ret = -ENXIO; 1698 return -ENXIO;
1718 goto err_clk;
1719 } 1699 }
1720 1700
1721 /* resource irq */ 1701 /* resource irq */
1722 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1702 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1723 if (!res) { 1703 if (!res) {
1724 dev_err(dev, "failed to request irq resource.\n"); 1704 dev_err(dev, "failed to request irq resource.\n");
1725 ret = -ENOENT; 1705 return -ENOENT;
1726 goto err_get_regs;
1727 } 1706 }
1728 1707
1729 ctx->irq = res->start; 1708 ctx->irq = res->start;
@@ -1731,7 +1710,7 @@ static int __devinit gsc_probe(struct platform_device *pdev)
1731 IRQF_ONESHOT, "drm_gsc", ctx); 1710 IRQF_ONESHOT, "drm_gsc", ctx);
1732 if (ret < 0) { 1711 if (ret < 0) {
1733 dev_err(dev, "failed to request irq.\n"); 1712 dev_err(dev, "failed to request irq.\n");
1734 goto err_get_regs; 1713 return ret;
1735 } 1714 }
1736 1715
1737 /* context initailization */ 1716 /* context initailization */
@@ -1775,16 +1754,10 @@ err_ippdrv_register:
1775 pm_runtime_disable(dev); 1754 pm_runtime_disable(dev);
1776err_get_irq: 1755err_get_irq:
1777 free_irq(ctx->irq, ctx); 1756 free_irq(ctx->irq, ctx);
1778err_get_regs:
1779 devm_iounmap(dev, ctx->regs);
1780err_clk:
1781 clk_put(ctx->gsc_clk);
1782err_ctx:
1783 devm_kfree(dev, ctx);
1784 return ret; 1757 return ret;
1785} 1758}
1786 1759
1787static int __devexit gsc_remove(struct platform_device *pdev) 1760static int gsc_remove(struct platform_device *pdev)
1788{ 1761{
1789 struct device *dev = &pdev->dev; 1762 struct device *dev = &pdev->dev;
1790 struct gsc_context *ctx = get_gsc_context(dev); 1763 struct gsc_context *ctx = get_gsc_context(dev);
@@ -1798,11 +1771,6 @@ static int __devexit gsc_remove(struct platform_device *pdev)
1798 pm_runtime_disable(dev); 1771 pm_runtime_disable(dev);
1799 1772
1800 free_irq(ctx->irq, ctx); 1773 free_irq(ctx->irq, ctx);
1801 devm_iounmap(dev, ctx->regs);
1802
1803 clk_put(ctx->gsc_clk);
1804
1805 devm_kfree(dev, ctx);
1806 1774
1807 return 0; 1775 return 0;
1808} 1776}
@@ -1860,7 +1828,7 @@ static const struct dev_pm_ops gsc_pm_ops = {
1860 1828
1861struct platform_driver gsc_driver = { 1829struct platform_driver gsc_driver = {
1862 .probe = gsc_probe, 1830 .probe = gsc_probe,
1863 .remove = __devexit_p(gsc_remove), 1831 .remove = gsc_remove,
1864 .driver = { 1832 .driver = {
1865 .name = "exynos-drm-gsc", 1833 .name = "exynos-drm-gsc",
1866 .owner = THIS_MODULE, 1834 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
index b3c3bc618c0f..29ec1c5efcf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -6,24 +6,10 @@
6 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com> 7 * Sangmin Lee <lsmin.lee@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_GSC_H_ 15#ifndef _EXYNOS_DRM_GSC_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 55793c46e3c2..850e9950b7da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -385,7 +385,7 @@ static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
385 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false); 385 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
386} 386}
387 387
388static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) 388static int exynos_drm_hdmi_probe(struct platform_device *pdev)
389{ 389{
390 struct device *dev = &pdev->dev; 390 struct device *dev = &pdev->dev;
391 struct exynos_drm_subdrv *subdrv; 391 struct exynos_drm_subdrv *subdrv;
@@ -413,7 +413,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
413 return 0; 413 return 0;
414} 414}
415 415
416static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev) 416static int exynos_drm_hdmi_remove(struct platform_device *pdev)
417{ 417{
418 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev); 418 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
419 419
@@ -426,7 +426,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
426 426
427struct platform_driver exynos_drm_common_hdmi_driver = { 427struct platform_driver exynos_drm_common_hdmi_driver = {
428 .probe = exynos_drm_hdmi_probe, 428 .probe = exynos_drm_hdmi_probe,
429 .remove = __devexit_p(exynos_drm_hdmi_remove), 429 .remove = exynos_drm_hdmi_remove,
430 .driver = { 430 .driver = {
431 .name = "exynos-drm-hdmi", 431 .name = "exynos-drm-hdmi",
432 .owner = THIS_MODULE, 432 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index fcc3093ec8fe..784a7e9a766c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com> 4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_HDMI_H_ 12#ifndef _EXYNOS_DRM_HDMI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 2482b7f96341..3799d5c2b5df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drmP.h> 12#include <drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 18a0ca190b98..53b7deea8ab7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com> 4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_IOMMU_H_ 12#ifndef _EXYNOS_DRM_IOMMU_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 49eebe948ed2..0bda96454a02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -27,7 +27,7 @@
27#include "exynos_drm_iommu.h" 27#include "exynos_drm_iommu.h"
28 28
29/* 29/*
30 * IPP is stand for Image Post Processing and 30 * IPP stands for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations. 31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on. 32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w 33 * IPP is integration device driver of same attribute h/w
@@ -1292,7 +1292,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1293 1293
1294 /* store command info in ippdrv */ 1294 /* store command info in ippdrv */
1295 ippdrv->cmd = c_node; 1295 ippdrv->c_node = c_node;
1296 1296
1297 if (!ipp_check_mem_list(c_node)) { 1297 if (!ipp_check_mem_list(c_node)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
@@ -1303,7 +1303,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1303 ret = ipp_set_property(ippdrv, property); 1303 ret = ipp_set_property(ippdrv, property);
1304 if (ret) { 1304 if (ret) {
1305 DRM_ERROR("failed to set property.\n"); 1305 DRM_ERROR("failed to set property.\n");
1306 ippdrv->cmd = NULL; 1306 ippdrv->c_node = NULL;
1307 return ret; 1307 return ret;
1308 } 1308 }
1309 1309
@@ -1487,11 +1487,6 @@ void ipp_sched_cmd(struct work_struct *work)
1487 mutex_lock(&c_node->cmd_lock); 1487 mutex_lock(&c_node->cmd_lock);
1488 1488
1489 property = &c_node->property; 1489 property = &c_node->property;
1490 if (!property) {
1491 DRM_ERROR("failed to get property:prop_id[%d]\n",
1492 c_node->property.prop_id);
1493 goto err_unlock;
1494 }
1495 1490
1496 switch (cmd_work->ctrl) { 1491 switch (cmd_work->ctrl) {
1497 case IPP_CTRL_PLAY: 1492 case IPP_CTRL_PLAY:
@@ -1704,7 +1699,7 @@ void ipp_sched_event(struct work_struct *work)
1704 return; 1699 return;
1705 } 1700 }
1706 1701
1707 c_node = ippdrv->cmd; 1702 c_node = ippdrv->c_node;
1708 if (!c_node) { 1703 if (!c_node) {
1709 DRM_ERROR("failed to get command node.\n"); 1704 DRM_ERROR("failed to get command node.\n");
1710 return; 1705 return;
@@ -1888,14 +1883,14 @@ err_clear:
1888 return; 1883 return;
1889} 1884}
1890 1885
1891static int __devinit ipp_probe(struct platform_device *pdev) 1886static int ipp_probe(struct platform_device *pdev)
1892{ 1887{
1893 struct device *dev = &pdev->dev; 1888 struct device *dev = &pdev->dev;
1894 struct ipp_context *ctx; 1889 struct ipp_context *ctx;
1895 struct exynos_drm_subdrv *subdrv; 1890 struct exynos_drm_subdrv *subdrv;
1896 int ret; 1891 int ret;
1897 1892
1898 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1893 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1899 if (!ctx) 1894 if (!ctx)
1900 return -ENOMEM; 1895 return -ENOMEM;
1901 1896
@@ -1916,8 +1911,7 @@ static int __devinit ipp_probe(struct platform_device *pdev)
1916 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1911 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1917 if (!ctx->event_workq) { 1912 if (!ctx->event_workq) {
1918 dev_err(dev, "failed to create event workqueue\n"); 1913 dev_err(dev, "failed to create event workqueue\n");
1919 ret = -EINVAL; 1914 return -EINVAL;
1920 goto err_clear;
1921 } 1915 }
1922 1916
1923 /* 1917 /*
@@ -1958,12 +1952,10 @@ err_cmd_workq:
1958 destroy_workqueue(ctx->cmd_workq); 1952 destroy_workqueue(ctx->cmd_workq);
1959err_event_workq: 1953err_event_workq:
1960 destroy_workqueue(ctx->event_workq); 1954 destroy_workqueue(ctx->event_workq);
1961err_clear:
1962 kfree(ctx);
1963 return ret; 1955 return ret;
1964} 1956}
1965 1957
1966static int __devexit ipp_remove(struct platform_device *pdev) 1958static int ipp_remove(struct platform_device *pdev)
1967{ 1959{
1968 struct ipp_context *ctx = platform_get_drvdata(pdev); 1960 struct ipp_context *ctx = platform_get_drvdata(pdev);
1969 1961
@@ -1985,8 +1977,6 @@ static int __devexit ipp_remove(struct platform_device *pdev)
1985 destroy_workqueue(ctx->cmd_workq); 1977 destroy_workqueue(ctx->cmd_workq);
1986 destroy_workqueue(ctx->event_workq); 1978 destroy_workqueue(ctx->event_workq);
1987 1979
1988 kfree(ctx);
1989
1990 return 0; 1980 return 0;
1991} 1981}
1992 1982
@@ -2050,7 +2040,7 @@ static const struct dev_pm_ops ipp_pm_ops = {
2050 2040
2051struct platform_driver ipp_driver = { 2041struct platform_driver ipp_driver = {
2052 .probe = ipp_probe, 2042 .probe = ipp_probe,
2053 .remove = __devexit_p(ipp_remove), 2043 .remove = ipp_remove,
2054 .driver = { 2044 .driver = {
2055 .name = "exynos-drm-ipp", 2045 .name = "exynos-drm-ipp",
2056 .owner = THIS_MODULE, 2046 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 28ffac95386c..4cadbea7dbde 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -6,24 +6,10 @@
6 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com> 7 * Sangmin Lee <lsmin.lee@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_IPP_H_ 15#ifndef _EXYNOS_DRM_IPP_H_
@@ -160,7 +146,7 @@ struct exynos_drm_ipp_ops {
160 * @dedicated: dedicated ipp device. 146 * @dedicated: dedicated ipp device.
161 * @ops: source, destination operations. 147 * @ops: source, destination operations.
162 * @event_workq: event work queue. 148 * @event_workq: event work queue.
163 * @cmd: current command information. 149 * @c_node: current command information.
164 * @cmd_list: list head for command information. 150 * @cmd_list: list head for command information.
165 * @prop_list: property informations of current ipp driver. 151 * @prop_list: property informations of current ipp driver.
166 * @check_property: check property about format, size, buffer. 152 * @check_property: check property about format, size, buffer.
@@ -178,7 +164,7 @@ struct exynos_drm_ippdrv {
178 bool dedicated; 164 bool dedicated;
179 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX]; 165 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
180 struct workqueue_struct *event_workq; 166 struct workqueue_struct *event_workq;
181 struct drm_exynos_ipp_cmd_node *cmd; 167 struct drm_exynos_ipp_cmd_node *c_node;
182 struct list_head cmd_list; 168 struct list_head cmd_list;
183 struct drm_exynos_ipp_prop_list *prop_list; 169 struct drm_exynos_ipp_prop_list *prop_list;
184 170
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1c2366083c70..e9e83ef688f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -139,7 +139,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
139{ 139{
140 struct rot_context *rot = arg; 140 struct rot_context *rot = arg;
141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; 141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd; 142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work; 143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
144 enum rot_irq_status irq_status; 144 enum rot_irq_status irq_status;
145 u32 val; 145 u32 val;
@@ -513,6 +513,7 @@ static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
513 case EXYNOS_DRM_FLIP_NONE: 513 case EXYNOS_DRM_FLIP_NONE:
514 case EXYNOS_DRM_FLIP_VERTICAL: 514 case EXYNOS_DRM_FLIP_VERTICAL:
515 case EXYNOS_DRM_FLIP_HORIZONTAL: 515 case EXYNOS_DRM_FLIP_HORIZONTAL:
516 case EXYNOS_DRM_FLIP_BOTH:
516 return true; 517 return true;
517 default: 518 default:
518 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 519 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
@@ -638,7 +639,7 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
638 return 0; 639 return 0;
639} 640}
640 641
641static int __devinit rotator_probe(struct platform_device *pdev) 642static int rotator_probe(struct platform_device *pdev)
642{ 643{
643 struct device *dev = &pdev->dev; 644 struct device *dev = &pdev->dev;
644 struct rot_context *rot; 645 struct rot_context *rot;
@@ -655,34 +656,26 @@ static int __devinit rotator_probe(struct platform_device *pdev)
655 platform_get_device_id(pdev)->driver_data; 656 platform_get_device_id(pdev)->driver_data;
656 657
657 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 658 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
658 if (!rot->regs_res) {
659 dev_err(dev, "failed to find registers\n");
660 ret = -ENOENT;
661 goto err_get_resource;
662 }
663
664 rot->regs = devm_request_and_ioremap(dev, rot->regs_res); 659 rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
665 if (!rot->regs) { 660 if (!rot->regs) {
666 dev_err(dev, "failed to map register\n"); 661 dev_err(dev, "failed to map register\n");
667 ret = -ENXIO; 662 return -ENXIO;
668 goto err_get_resource;
669 } 663 }
670 664
671 rot->irq = platform_get_irq(pdev, 0); 665 rot->irq = platform_get_irq(pdev, 0);
672 if (rot->irq < 0) { 666 if (rot->irq < 0) {
673 dev_err(dev, "failed to get irq\n"); 667 dev_err(dev, "failed to get irq\n");
674 ret = rot->irq; 668 return rot->irq;
675 goto err_get_irq;
676 } 669 }
677 670
678 ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler, 671 ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
679 IRQF_ONESHOT, "drm_rotator", rot); 672 IRQF_ONESHOT, "drm_rotator", rot);
680 if (ret < 0) { 673 if (ret < 0) {
681 dev_err(dev, "failed to request irq\n"); 674 dev_err(dev, "failed to request irq\n");
682 goto err_get_irq; 675 return ret;
683 } 676 }
684 677
685 rot->clock = clk_get(dev, "rotator"); 678 rot->clock = devm_clk_get(dev, "rotator");
686 if (IS_ERR_OR_NULL(rot->clock)) { 679 if (IS_ERR_OR_NULL(rot->clock)) {
687 dev_err(dev, "failed to get clock\n"); 680 dev_err(dev, "failed to get clock\n");
688 ret = PTR_ERR(rot->clock); 681 ret = PTR_ERR(rot->clock);
@@ -720,17 +713,12 @@ static int __devinit rotator_probe(struct platform_device *pdev)
720err_ippdrv_register: 713err_ippdrv_register:
721 devm_kfree(dev, ippdrv->prop_list); 714 devm_kfree(dev, ippdrv->prop_list);
722 pm_runtime_disable(dev); 715 pm_runtime_disable(dev);
723 clk_put(rot->clock);
724err_clk_get: 716err_clk_get:
725 free_irq(rot->irq, rot); 717 free_irq(rot->irq, rot);
726err_get_irq:
727 devm_iounmap(dev, rot->regs);
728err_get_resource:
729 devm_kfree(dev, rot);
730 return ret; 718 return ret;
731} 719}
732 720
733static int __devexit rotator_remove(struct platform_device *pdev) 721static int rotator_remove(struct platform_device *pdev)
734{ 722{
735 struct device *dev = &pdev->dev; 723 struct device *dev = &pdev->dev;
736 struct rot_context *rot = dev_get_drvdata(dev); 724 struct rot_context *rot = dev_get_drvdata(dev);
@@ -740,12 +728,8 @@ static int __devexit rotator_remove(struct platform_device *pdev)
740 exynos_drm_ippdrv_unregister(ippdrv); 728 exynos_drm_ippdrv_unregister(ippdrv);
741 729
742 pm_runtime_disable(dev); 730 pm_runtime_disable(dev);
743 clk_put(rot->clock);
744 731
745 free_irq(rot->irq, rot); 732 free_irq(rot->irq, rot);
746 devm_iounmap(dev, rot->regs);
747
748 devm_kfree(dev, rot);
749 733
750 return 0; 734 return 0;
751} 735}
@@ -845,7 +829,7 @@ static const struct dev_pm_ops rotator_pm_ops = {
845 829
846struct platform_driver rotator_driver = { 830struct platform_driver rotator_driver = {
847 .probe = rotator_probe, 831 .probe = rotator_probe,
848 .remove = __devexit_p(rotator_remove), 832 .remove = rotator_remove,
849 .id_table = rotator_driver_ids, 833 .id_table = rotator_driver_ids,
850 .driver = { 834 .driver = {
851 .name = "exynos-rot", 835 .name = "exynos-rot",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
index a2d7a14a52b6..71a0b4c0c1e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -5,24 +5,10 @@
5 * YoungJun Cho <yj44.cho@samsung.com> 5 * YoungJun Cho <yj44.cho@samsung.com>
6 * Eunchul Kim <chulspro.kim@samsung.com> 6 * Eunchul Kim <chulspro.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_ROTATOR_H_ 14#ifndef _EXYNOS_DRM_ROTATOR_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 99bfc38dfaa2..d0ca3c4e06c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -372,34 +372,6 @@ static struct exynos_drm_manager vidi_manager = {
372 .display_ops = &vidi_display_ops, 372 .display_ops = &vidi_display_ops,
373}; 373};
374 374
375static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
376{
377 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
378 struct drm_pending_vblank_event *e, *t;
379 struct timeval now;
380 unsigned long flags;
381
382 spin_lock_irqsave(&drm_dev->event_lock, flags);
383
384 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
385 base.link) {
386 /* if event's pipe isn't same as crtc then ignore it. */
387 if (crtc != e->pipe)
388 continue;
389
390 do_gettimeofday(&now);
391 e->event.sequence = 0;
392 e->event.tv_sec = now.tv_sec;
393 e->event.tv_usec = now.tv_usec;
394
395 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
396 wake_up_interruptible(&e->base.file_priv->event_wait);
397 drm_vblank_put(drm_dev, crtc);
398 }
399
400 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
401}
402
403static void vidi_fake_vblank_handler(struct work_struct *work) 375static void vidi_fake_vblank_handler(struct work_struct *work)
404{ 376{
405 struct vidi_context *ctx = container_of(work, struct vidi_context, 377 struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -424,7 +396,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
424 396
425 mutex_unlock(&ctx->lock); 397 mutex_unlock(&ctx->lock);
426 398
427 vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); 399 exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
428} 400}
429 401
430static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 402static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -609,7 +581,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
609 return 0; 581 return 0;
610} 582}
611 583
612static int __devinit vidi_probe(struct platform_device *pdev) 584static int vidi_probe(struct platform_device *pdev)
613{ 585{
614 struct device *dev = &pdev->dev; 586 struct device *dev = &pdev->dev;
615 struct vidi_context *ctx; 587 struct vidi_context *ctx;
@@ -645,7 +617,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
645 return 0; 617 return 0;
646} 618}
647 619
648static int __devexit vidi_remove(struct platform_device *pdev) 620static int vidi_remove(struct platform_device *pdev)
649{ 621{
650 struct vidi_context *ctx = platform_get_drvdata(pdev); 622 struct vidi_context *ctx = platform_get_drvdata(pdev);
651 623
@@ -683,7 +655,7 @@ static const struct dev_pm_ops vidi_pm_ops = {
683 655
684struct platform_driver vidi_driver = { 656struct platform_driver vidi_driver = {
685 .probe = vidi_probe, 657 .probe = vidi_probe,
686 .remove = __devexit_p(vidi_remove), 658 .remove = vidi_remove,
687 .driver = { 659 .driver = {
688 .name = "exynos-drm-vidi", 660 .name = "exynos-drm-vidi",
689 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
index a4babe4e65d7..1e5fdaa36ccc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_VIDI_H_ 12#ifndef _EXYNOS_DRM_VIDI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c46b6c0b82c..41ff79d8ac8e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2305,7 +2305,7 @@ static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
2305 return IRQ_HANDLED; 2305 return IRQ_HANDLED;
2306} 2306}
2307 2307
2308static int __devinit hdmi_resources_init(struct hdmi_context *hdata) 2308static int hdmi_resources_init(struct hdmi_context *hdata)
2309{ 2309{
2310 struct device *dev = hdata->dev; 2310 struct device *dev = hdata->dev;
2311 struct hdmi_resources *res = &hdata->res; 2311 struct hdmi_resources *res = &hdata->res;
@@ -2451,7 +2451,7 @@ static struct of_device_id hdmi_match_types[] = {
2451}; 2451};
2452#endif 2452#endif
2453 2453
2454static int __devinit hdmi_probe(struct platform_device *pdev) 2454static int hdmi_probe(struct platform_device *pdev)
2455{ 2455{
2456 struct device *dev = &pdev->dev; 2456 struct device *dev = &pdev->dev;
2457 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 2457 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -2607,7 +2607,7 @@ err_ddc:
2607 return ret; 2607 return ret;
2608} 2608}
2609 2609
2610static int __devexit hdmi_remove(struct platform_device *pdev) 2610static int hdmi_remove(struct platform_device *pdev)
2611{ 2611{
2612 struct device *dev = &pdev->dev; 2612 struct device *dev = &pdev->dev;
2613 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); 2613 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -2708,7 +2708,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
2708 2708
2709struct platform_driver hdmi_driver = { 2709struct platform_driver hdmi_driver = {
2710 .probe = hdmi_probe, 2710 .probe = hdmi_probe,
2711 .remove = __devexit_p(hdmi_remove), 2711 .remove = hdmi_remove,
2712 .id_table = hdmi_driver_types, 2712 .id_table = hdmi_driver_types,
2713 .driver = { 2713 .driver = {
2714 .name = "exynos-hdmi", 2714 .name = "exynos-hdmi",
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
index 1c3b6d8f1fe7..0ddf3957de15 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -5,24 +5,10 @@
5 * Inki Dae <inki.dae@samsung.com> 5 * Inki Dae <inki.dae@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_HDMI_H_ 14#ifndef _EXYNOS_HDMI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6206056f4a33..ea49d132ecf6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -64,7 +64,7 @@ struct i2c_driver hdmiphy_driver = {
64 }, 64 },
65 .id_table = hdmiphy_id, 65 .id_table = hdmiphy_id,
66 .probe = hdmiphy_probe, 66 .probe = hdmiphy_probe,
67 .remove = __devexit_p(hdmiphy_remove), 67 .remove = hdmiphy_remove,
68 .command = NULL, 68 .command = NULL,
69}; 69};
70EXPORT_SYMBOL(hdmiphy_driver); 70EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 21db89530fc7..c187ea33b748 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -35,6 +35,7 @@
35#include <drm/exynos_drm.h> 35#include <drm/exynos_drm.h>
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_crtc.h"
38#include "exynos_drm_hdmi.h" 39#include "exynos_drm_hdmi.h"
39#include "exynos_drm_iommu.h" 40#include "exynos_drm_iommu.h"
40 41
@@ -949,35 +950,6 @@ static struct exynos_mixer_ops mixer_ops = {
949 .win_disable = mixer_win_disable, 950 .win_disable = mixer_win_disable,
950}; 951};
951 952
952/* for pageflip event */
953static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
954{
955 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
956 struct drm_pending_vblank_event *e, *t;
957 struct timeval now;
958 unsigned long flags;
959
960 spin_lock_irqsave(&drm_dev->event_lock, flags);
961
962 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
963 base.link) {
964 /* if event's pipe isn't same as crtc then ignore it. */
965 if (crtc != e->pipe)
966 continue;
967
968 do_gettimeofday(&now);
969 e->event.sequence = 0;
970 e->event.tv_sec = now.tv_sec;
971 e->event.tv_usec = now.tv_usec;
972
973 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
974 wake_up_interruptible(&e->base.file_priv->event_wait);
975 drm_vblank_put(drm_dev, crtc);
976 }
977
978 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
979}
980
981static irqreturn_t mixer_irq_handler(int irq, void *arg) 953static irqreturn_t mixer_irq_handler(int irq, void *arg)
982{ 954{
983 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; 955 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
@@ -1006,7 +978,8 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
1006 } 978 }
1007 979
1008 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); 980 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
1009 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); 981 exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
982 ctx->pipe);
1010 983
1011 /* set wait vsync event to zero and wake up queue. */ 984 /* set wait vsync event to zero and wake up queue. */
1012 if (atomic_read(&ctx->wait_vsync_event)) { 985 if (atomic_read(&ctx->wait_vsync_event)) {
@@ -1029,8 +1002,8 @@ out:
1029 return IRQ_HANDLED; 1002 return IRQ_HANDLED;
1030} 1003}
1031 1004
1032static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, 1005static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
1033 struct platform_device *pdev) 1006 struct platform_device *pdev)
1034{ 1007{
1035 struct mixer_context *mixer_ctx = ctx->ctx; 1008 struct mixer_context *mixer_ctx = ctx->ctx;
1036 struct device *dev = &pdev->dev; 1009 struct device *dev = &pdev->dev;
@@ -1081,8 +1054,8 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
1081 return 0; 1054 return 0;
1082} 1055}
1083 1056
1084static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1057static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1085 struct platform_device *pdev) 1058 struct platform_device *pdev)
1086{ 1059{
1087 struct mixer_context *mixer_ctx = ctx->ctx; 1060 struct mixer_context *mixer_ctx = ctx->ctx;
1088 struct device *dev = &pdev->dev; 1061 struct device *dev = &pdev->dev;
@@ -1155,7 +1128,7 @@ static struct of_device_id mixer_match_types[] = {
1155 } 1128 }
1156}; 1129};
1157 1130
1158static int __devinit mixer_probe(struct platform_device *pdev) 1131static int mixer_probe(struct platform_device *pdev)
1159{ 1132{
1160 struct device *dev = &pdev->dev; 1133 struct device *dev = &pdev->dev;
1161 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 1134 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -1316,6 +1289,6 @@ struct platform_driver mixer_driver = {
1316 .of_match_table = mixer_match_types, 1289 .of_match_table = mixer_match_types,
1317 }, 1290 },
1318 .probe = mixer_probe, 1291 .probe = mixer_probe,
1319 .remove = __devexit_p(mixer_remove), 1292 .remove = mixer_remove,
1320 .id_table = mixer_driver_types, 1293 .id_table = mixer_driver_types,
1321}; 1294};
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 4a07ab596174..771ff66711af 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -700,7 +700,7 @@ static struct i2c_driver tc35876x_bridge_i2c_driver = {
700 }, 700 },
701 .id_table = tc35876x_bridge_id, 701 .id_table = tc35876x_bridge_id,
702 .probe = tc35876x_bridge_probe, 702 .probe = tc35876x_bridge_probe,
703 .remove = __devexit_p(tc35876x_bridge_remove), 703 .remove = tc35876x_bridge_remove,
704}; 704};
705 705
706/* LCD panel I2C */ 706/* LCD panel I2C */
@@ -741,7 +741,7 @@ static struct i2c_driver cmi_lcd_i2c_driver = {
741 }, 741 },
742 .id_table = cmi_lcd_i2c_id, 742 .id_table = cmi_lcd_i2c_id,
743 .probe = cmi_lcd_i2c_probe, 743 .probe = cmi_lcd_i2c_probe,
744 .remove = __devexit_p(cmi_lcd_i2c_remove), 744 .remove = cmi_lcd_i2c_remove,
745}; 745};
746 746
747/* HACK to create I2C device while it's not created by platform code */ 747/* HACK to create I2C device while it's not created by platform code */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6a11ca85eaf..7944d301518a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -641,6 +641,7 @@ static void i915_ring_error_state(struct seq_file *m,
641 seq_printf(m, "%s command stream:\n", ring_str(ring)); 641 seq_printf(m, "%s command stream:\n", ring_str(ring));
642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
644 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
644 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 645 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 646 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 647 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -693,6 +694,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
693 seq_printf(m, "EIR: 0x%08x\n", error->eir); 694 seq_printf(m, "EIR: 0x%08x\n", error->eir);
694 seq_printf(m, "IER: 0x%08x\n", error->ier); 695 seq_printf(m, "IER: 0x%08x\n", error->ier);
695 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 696 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
697 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
698 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
696 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 699 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
697 700
698 for (i = 0; i < dev_priv->num_fence_regs; i++) 701 for (i = 0; i < dev_priv->num_fence_regs; i++)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8f63cd5de4b4..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
989 case I915_PARAM_HAS_SECURE_BATCHES: 989 case I915_PARAM_HAS_SECURE_BATCHES:
990 value = capable(CAP_SYS_ADMIN); 990 value = capable(CAP_SYS_ADMIN);
991 break; 991 break;
992 case I915_PARAM_HAS_PINNED_BATCHES:
993 value = 1;
994 break;
992 default: 995 default:
993 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 996 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
994 param->param); 997 param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 530db83ef320..117265840b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -877,8 +877,7 @@ int i915_reset(struct drm_device *dev)
877 return 0; 877 return 0;
878} 878}
879 879
880static int __devinit 880static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
881i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
882{ 881{
883 struct intel_device_info *intel_info = 882 struct intel_device_info *intel_info =
884 (struct intel_device_info *) ent->driver_data; 883 (struct intel_device_info *) ent->driver_data;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 557843dd4b2e..12ab3bdea54d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,10 +188,13 @@ struct drm_i915_error_state {
188 u32 pgtbl_er; 188 u32 pgtbl_er;
189 u32 ier; 189 u32 ier;
190 u32 ccid; 190 u32 ccid;
191 u32 derrmr;
192 u32 forcewake;
191 bool waiting[I915_NUM_RINGS]; 193 bool waiting[I915_NUM_RINGS];
192 u32 pipestat[I915_MAX_PIPES]; 194 u32 pipestat[I915_MAX_PIPES];
193 u32 tail[I915_NUM_RINGS]; 195 u32 tail[I915_NUM_RINGS];
194 u32 head[I915_NUM_RINGS]; 196 u32 head[I915_NUM_RINGS];
197 u32 ctl[I915_NUM_RINGS];
195 u32 ipeir[I915_NUM_RINGS]; 198 u32 ipeir[I915_NUM_RINGS];
196 u32 ipehr[I915_NUM_RINGS]; 199 u32 ipehr[I915_NUM_RINGS];
197 u32 instdone[I915_NUM_RINGS]; 200 u32 instdone[I915_NUM_RINGS];
@@ -780,6 +783,7 @@ typedef struct drm_i915_private {
780 struct i915_hw_ppgtt *aliasing_ppgtt; 783 struct i915_hw_ppgtt *aliasing_ppgtt;
781 784
782 struct shrinker inactive_shrinker; 785 struct shrinker inactive_shrinker;
786 bool shrinker_no_lock_stealing;
783 787
784 /** 788 /**
785 * List of objects currently involved in rendering. 789 * List of objects currently involved in rendering.
@@ -1100,6 +1104,7 @@ struct drm_i915_gem_object {
1100 */ 1104 */
1101 atomic_t pending_flip; 1105 atomic_t pending_flip;
1102}; 1106};
1107#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1103 1108
1104#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1109#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1105 1110
@@ -1166,6 +1171,9 @@ struct drm_i915_file_private {
1166#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1171#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1167 (dev)->pci_device == 0x0152 || \ 1172 (dev)->pci_device == 0x0152 || \
1168 (dev)->pci_device == 0x015a) 1173 (dev)->pci_device == 0x015a)
1174#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1175 (dev)->pci_device == 0x0106 || \
1176 (dev)->pci_device == 0x010A)
1169#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1177#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1170#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1178#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1171#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1179#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
@@ -1196,6 +1204,9 @@ struct drm_i915_file_private {
1196#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1204#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1197#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1205#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1198 1206
1207/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1208#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1209
1199/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1210/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1200 * rows, which changed the alignment requirements and fence programming. 1211 * rows, which changed the alignment requirements and fence programming.
1201 */ 1212 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 742206e45103..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1517 if (obj->base.map_list.map) 1517 if (obj->base.map_list.map)
1518 return 0; 1518 return 0;
1519 1519
1520 dev_priv->mm.shrinker_no_lock_stealing = true;
1521
1520 ret = drm_gem_create_mmap_offset(&obj->base); 1522 ret = drm_gem_create_mmap_offset(&obj->base);
1521 if (ret != -ENOSPC) 1523 if (ret != -ENOSPC)
1522 return ret; 1524 goto out;
1523 1525
1524 /* Badly fragmented mmap space? The only way we can recover 1526 /* Badly fragmented mmap space? The only way we can recover
1525 * space is by destroying unwanted objects. We can't randomly release 1527 * space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1531 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1533 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1532 ret = drm_gem_create_mmap_offset(&obj->base); 1534 ret = drm_gem_create_mmap_offset(&obj->base);
1533 if (ret != -ENOSPC) 1535 if (ret != -ENOSPC)
1534 return ret; 1536 goto out;
1535 1537
1536 i915_gem_shrink_all(dev_priv); 1538 i915_gem_shrink_all(dev_priv);
1537 return drm_gem_create_mmap_offset(&obj->base); 1539 ret = drm_gem_create_mmap_offset(&obj->base);
1540out:
1541 dev_priv->mm.shrinker_no_lock_stealing = false;
1542
1543 return ret;
1538} 1544}
1539 1545
1540static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1546static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1711,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1711} 1717}
1712 1718
1713static long 1719static long
1714i915_gem_purge(struct drm_i915_private *dev_priv, long target) 1720__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1721 bool purgeable_only)
1715{ 1722{
1716 struct drm_i915_gem_object *obj, *next; 1723 struct drm_i915_gem_object *obj, *next;
1717 long count = 0; 1724 long count = 0;
@@ -1719,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1719 list_for_each_entry_safe(obj, next, 1726 list_for_each_entry_safe(obj, next,
1720 &dev_priv->mm.unbound_list, 1727 &dev_priv->mm.unbound_list,
1721 gtt_list) { 1728 gtt_list) {
1722 if (i915_gem_object_is_purgeable(obj) && 1729 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1723 i915_gem_object_put_pages(obj) == 0) { 1730 i915_gem_object_put_pages(obj) == 0) {
1724 count += obj->base.size >> PAGE_SHIFT; 1731 count += obj->base.size >> PAGE_SHIFT;
1725 if (count >= target) 1732 if (count >= target)
@@ -1730,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1730 list_for_each_entry_safe(obj, next, 1737 list_for_each_entry_safe(obj, next,
1731 &dev_priv->mm.inactive_list, 1738 &dev_priv->mm.inactive_list,
1732 mm_list) { 1739 mm_list) {
1733 if (i915_gem_object_is_purgeable(obj) && 1740 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1734 i915_gem_object_unbind(obj) == 0 && 1741 i915_gem_object_unbind(obj) == 0 &&
1735 i915_gem_object_put_pages(obj) == 0) { 1742 i915_gem_object_put_pages(obj) == 0) {
1736 count += obj->base.size >> PAGE_SHIFT; 1743 count += obj->base.size >> PAGE_SHIFT;
@@ -1742,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1742 return count; 1749 return count;
1743} 1750}
1744 1751
1752static long
1753i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1754{
1755 return __i915_gem_shrink(dev_priv, target, true);
1756}
1757
1745static void 1758static void
1746i915_gem_shrink_all(struct drm_i915_private *dev_priv) 1759i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1747{ 1760{
@@ -2890,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2890{ 2903{
2891 struct drm_device *dev = obj->base.dev; 2904 struct drm_device *dev = obj->base.dev;
2892 drm_i915_private_t *dev_priv = dev->dev_private; 2905 drm_i915_private_t *dev_priv = dev->dev_private;
2893 struct drm_mm_node *free_space; 2906 struct drm_mm_node *node;
2894 u32 size, fence_size, fence_alignment, unfenced_alignment; 2907 u32 size, fence_size, fence_alignment, unfenced_alignment;
2895 bool mappable, fenceable; 2908 bool mappable, fenceable;
2896 int ret; 2909 int ret;
@@ -2936,66 +2949,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2936 2949
2937 i915_gem_object_pin_pages(obj); 2950 i915_gem_object_pin_pages(obj);
2938 2951
2952 node = kzalloc(sizeof(*node), GFP_KERNEL);
2953 if (node == NULL) {
2954 i915_gem_object_unpin_pages(obj);
2955 return -ENOMEM;
2956 }
2957
2939 search_free: 2958 search_free:
2940 if (map_and_fenceable) 2959 if (map_and_fenceable)
2941 free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, 2960 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2942 size, alignment, obj->cache_level, 2961 size, alignment, obj->cache_level,
2943 0, dev_priv->mm.gtt_mappable_end, 2962 0, dev_priv->mm.gtt_mappable_end);
2944 false);
2945 else 2963 else
2946 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2964 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2947 size, alignment, obj->cache_level, 2965 size, alignment, obj->cache_level);
2948 false); 2966 if (ret) {
2949
2950 if (free_space != NULL) {
2951 if (map_and_fenceable)
2952 free_space =
2953 drm_mm_get_block_range_generic(free_space,
2954 size, alignment, obj->cache_level,
2955 0, dev_priv->mm.gtt_mappable_end,
2956 false);
2957 else
2958 free_space =
2959 drm_mm_get_block_generic(free_space,
2960 size, alignment, obj->cache_level,
2961 false);
2962 }
2963 if (free_space == NULL) {
2964 ret = i915_gem_evict_something(dev, size, alignment, 2967 ret = i915_gem_evict_something(dev, size, alignment,
2965 obj->cache_level, 2968 obj->cache_level,
2966 map_and_fenceable, 2969 map_and_fenceable,
2967 nonblocking); 2970 nonblocking);
2968 if (ret) { 2971 if (ret == 0)
2969 i915_gem_object_unpin_pages(obj); 2972 goto search_free;
2970 return ret;
2971 }
2972 2973
2973 goto search_free; 2974 i915_gem_object_unpin_pages(obj);
2975 kfree(node);
2976 return ret;
2974 } 2977 }
2975 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2978 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2976 free_space,
2977 obj->cache_level))) {
2978 i915_gem_object_unpin_pages(obj); 2979 i915_gem_object_unpin_pages(obj);
2979 drm_mm_put_block(free_space); 2980 drm_mm_put_block(node);
2980 return -EINVAL; 2981 return -EINVAL;
2981 } 2982 }
2982 2983
2983 ret = i915_gem_gtt_prepare_object(obj); 2984 ret = i915_gem_gtt_prepare_object(obj);
2984 if (ret) { 2985 if (ret) {
2985 i915_gem_object_unpin_pages(obj); 2986 i915_gem_object_unpin_pages(obj);
2986 drm_mm_put_block(free_space); 2987 drm_mm_put_block(node);
2987 return ret; 2988 return ret;
2988 } 2989 }
2989 2990
2990 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2991 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2991 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2992 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2992 2993
2993 obj->gtt_space = free_space; 2994 obj->gtt_space = node;
2994 obj->gtt_offset = free_space->start; 2995 obj->gtt_offset = node->start;
2995 2996
2996 fenceable = 2997 fenceable =
2997 free_space->size == fence_size && 2998 node->size == fence_size &&
2998 (free_space->start & (fence_alignment - 1)) == 0; 2999 (node->start & (fence_alignment - 1)) == 0;
2999 3000
3000 mappable = 3001 mappable =
3001 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 3002 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3528,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3528 goto out; 3529 goto out;
3529 } 3530 }
3530 3531
3531 obj->user_pin_count++; 3532 if (obj->user_pin_count == 0) {
3532 obj->pin_filp = file;
3533 if (obj->user_pin_count == 1) {
3534 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3533 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3535 if (ret) 3534 if (ret)
3536 goto out; 3535 goto out;
3537 } 3536 }
3538 3537
3538 obj->user_pin_count++;
3539 obj->pin_filp = file;
3540
3539 /* XXX - flush the CPU caches for pinned objects 3541 /* XXX - flush the CPU caches for pinned objects
3540 * as the X server doesn't manage domains yet 3542 * as the X server doesn't manage domains yet
3541 */ 3543 */
@@ -4392,12 +4394,18 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4392 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4394 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4393 return 0; 4395 return 0;
4394 4396
4397 if (dev_priv->mm.shrinker_no_lock_stealing)
4398 return 0;
4399
4395 unlock = false; 4400 unlock = false;
4396 } 4401 }
4397 4402
4398 if (nr_to_scan) { 4403 if (nr_to_scan) {
4399 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4404 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4400 if (nr_to_scan > 0) 4405 if (nr_to_scan > 0)
4406 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4407 false);
4408 if (nr_to_scan > 0)
4401 i915_gem_shrink_all(dev_priv); 4409 i915_gem_shrink_all(dev_priv);
4402 } 4410 }
4403 4411
@@ -4405,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4405 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4413 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4406 if (obj->pages_pin_count == 0) 4414 if (obj->pages_pin_count == 0)
4407 cnt += obj->base.size >> PAGE_SHIFT; 4415 cnt += obj->base.size >> PAGE_SHIFT;
4408 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 4416 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4409 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4417 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4410 cnt += obj->base.size >> PAGE_SHIFT; 4418 cnt += obj->base.size >> PAGE_SHIFT;
4411 4419
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..abeaafef6d7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226{ 226{
227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
228 228
229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
230} 230}
231 231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
266 obj = dma_buf->priv; 266 obj = dma_buf->priv;
267 /* is it from our device? */ 267 /* is it from our device? */
268 if (obj->base.dev == dev) { 268 if (obj->base.dev == dev) {
269 /*
270 * Importing dmabuf exported from out own gem increases
271 * refcount on gem itself instead of f_count of dmabuf.
272 */
269 drm_gem_object_reference(&obj->base); 273 drm_gem_object_reference(&obj->base);
274 dma_buf_put(dma_buf);
270 return &obj->base; 275 return &obj->base;
271 } 276 }
272 } 277 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee8f97f0539e..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
539 total = 0; 539 total = 0;
540 for (i = 0; i < count; i++) { 540 for (i = 0; i < count; i++) {
541 struct drm_i915_gem_relocation_entry __user *user_relocs; 541 struct drm_i915_gem_relocation_entry __user *user_relocs;
542 u64 invalid_offset = (u64)-1;
543 int j;
542 544
543 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 545 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
544 546
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
549 goto err; 551 goto err;
550 } 552 }
551 553
554 /* As we do not update the known relocation offsets after
555 * relocating (due to the complexities in lock handling),
556 * we need to mark them as invalid now so that we force the
557 * relocation processing next time. Just in case the target
558 * object is evicted and then rebound into its old
559 * presumed_offset before the next execbuffer - if that
560 * happened we would make the mistake of assuming that the
561 * relocations were valid.
562 */
563 for (j = 0; j < exec[i].relocation_count; j++) {
564 if (copy_to_user(&user_relocs[j].presumed_offset,
565 &invalid_offset,
566 sizeof(invalid_offset))) {
567 ret = -EFAULT;
568 mutex_lock(&dev->struct_mutex);
569 goto err;
570 }
571 }
572
552 reloc_offset[i] = total; 573 reloc_offset[i] = total;
553 total += exec[i].relocation_count; 574 total += exec[i].relocation_count;
554 } 575 }
@@ -808,6 +829,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
808 829
809 flags |= I915_DISPATCH_SECURE; 830 flags |= I915_DISPATCH_SECURE;
810 } 831 }
832 if (args->flags & I915_EXEC_IS_PINNED)
833 flags |= I915_DISPATCH_PINNED;
811 834
812 switch (args->flags & I915_EXEC_RING_MASK) { 835 switch (args->flags & I915_EXEC_RING_MASK) {
813 case I915_EXEC_DEFAULT: 836 case I915_EXEC_DEFAULT:
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a4dc97f8b9f0..fe843389c7b4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1087 if (!ring->get_seqno) 1087 if (!ring->get_seqno)
1088 return NULL; 1088 return NULL;
1089 1089
1090 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1091 u32 acthd = I915_READ(ACTHD);
1092
1093 if (WARN_ON(ring->id != RCS))
1094 return NULL;
1095
1096 obj = ring->private;
1097 if (acthd >= obj->gtt_offset &&
1098 acthd < obj->gtt_offset + obj->base.size)
1099 return i915_error_object_create(dev_priv, obj);
1100 }
1101
1090 seqno = ring->get_seqno(ring, false); 1102 seqno = ring->get_seqno(ring, false);
1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1103 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1092 if (obj->ring != ring) 1104 if (obj->ring != ring)
@@ -1145,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
1145 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1157 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1146 error->head[ring->id] = I915_READ_HEAD(ring); 1158 error->head[ring->id] = I915_READ_HEAD(ring);
1147 error->tail[ring->id] = I915_READ_TAIL(ring); 1159 error->tail[ring->id] = I915_READ_TAIL(ring);
1160 error->ctl[ring->id] = I915_READ_CTL(ring);
1148 1161
1149 error->cpu_ring_head[ring->id] = ring->head; 1162 error->cpu_ring_head[ring->id] = ring->head;
1150 error->cpu_ring_tail[ring->id] = ring->tail; 1163 error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1239,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
1239 else 1252 else
1240 error->ier = I915_READ(IER); 1253 error->ier = I915_READ(IER);
1241 1254
1255 if (INTEL_INFO(dev)->gen >= 6)
1256 error->derrmr = I915_READ(DERRMR);
1257
1258 if (IS_VALLEYVIEW(dev))
1259 error->forcewake = I915_READ(FORCEWAKE_VLV);
1260 else if (INTEL_INFO(dev)->gen >= 7)
1261 error->forcewake = I915_READ(FORCEWAKE_MT);
1262 else if (INTEL_INFO(dev)->gen == 6)
1263 error->forcewake = I915_READ(FORCEWAKE);
1264
1242 for_each_pipe(pipe) 1265 for_each_pipe(pipe)
1243 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1266 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1244 1267
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3f75cfaf1c3f..b401788e1791 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -512,11 +512,14 @@
512#define GEN7_ERR_INT 0x44040 512#define GEN7_ERR_INT 0x44040
513#define ERR_INT_MMIO_UNCLAIMED (1<<13) 513#define ERR_INT_MMIO_UNCLAIMED (1<<13)
514 514
515#define DERRMR 0x44050
516
515/* GM45+ chicken bits -- debug workaround bits that may be required 517/* GM45+ chicken bits -- debug workaround bits that may be required
516 * for various sorts of correct behavior. The top 16 bits of each are 518 * for various sorts of correct behavior. The top 16 bits of each are
517 * the enables for writing to the corresponding low bit. 519 * the enables for writing to the corresponding low bit.
518 */ 520 */
519#define _3D_CHICKEN 0x02084 521#define _3D_CHICKEN 0x02084
522#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
520#define _3D_CHICKEN2 0x0208c 523#define _3D_CHICKEN2 0x0208c
521/* Disables pipelining of read flushes past the SF-WIZ interface. 524/* Disables pipelining of read flushes past the SF-WIZ interface.
522 * Required on all Ironlake steppings according to the B-Spec, but the 525 * Required on all Ironlake steppings according to the B-Spec, but the
@@ -532,7 +535,8 @@
532# define MI_FLUSH_ENABLE (1 << 12) 535# define MI_FLUSH_ENABLE (1 << 12)
533 536
534#define GEN6_GT_MODE 0x20d0 537#define GEN6_GT_MODE 0x20d0
535#define GEN6_GT_MODE_HI (1 << 9) 538#define GEN6_GT_MODE_HI (1 << 9)
539#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
536 540
537#define GFX_MODE 0x02520 541#define GFX_MODE 0x02520
538#define GFX_MODE_GEN7 0x0229c 542#define GFX_MODE_GEN7 0x0229c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5d127e068950..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
8145 config->mode_changed = true; 8145 config->mode_changed = true;
8146 } 8146 }
8147
8148 /* Disable all disconnected encoders. */
8149 if (connector->base.status == connector_status_disconnected)
8150 connector->new_encoder = NULL;
8151 } 8147 }
8152 /* connector->new_encoder is now updated for all connectors. */ 8148 /* connector->new_encoder is now updated for all connectors. */
8153 8149
@@ -8602,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
8602{ 8598{
8603 int ret; 8599 int ret;
8604 8600
8605 if (obj->tiling_mode == I915_TILING_Y) 8601 if (obj->tiling_mode == I915_TILING_Y) {
8602 DRM_DEBUG("hardware does not support tiling Y\n");
8606 return -EINVAL; 8603 return -EINVAL;
8604 }
8607 8605
8608 if (mode_cmd->pitches[0] & 63) 8606 if (mode_cmd->pitches[0] & 63) {
8607 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8608 mode_cmd->pitches[0]);
8609 return -EINVAL; 8609 return -EINVAL;
8610 }
8610 8611
8611 /* FIXME <= Gen4 stride limits are bit unclear */ 8612 /* FIXME <= Gen4 stride limits are bit unclear */
8612 if (mode_cmd->pitches[0] > 32768) 8613 if (mode_cmd->pitches[0] > 32768) {
8614 DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8615 mode_cmd->pitches[0]);
8613 return -EINVAL; 8616 return -EINVAL;
8617 }
8614 8618
8615 if (obj->tiling_mode != I915_TILING_NONE && 8619 if (obj->tiling_mode != I915_TILING_NONE &&
8616 mode_cmd->pitches[0] != obj->stride) 8620 mode_cmd->pitches[0] != obj->stride) {
8621 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
8622 mode_cmd->pitches[0], obj->stride);
8617 return -EINVAL; 8623 return -EINVAL;
8624 }
8618 8625
8619 /* Reject formats not supported by any plane early. */ 8626 /* Reject formats not supported by any plane early. */
8620 switch (mode_cmd->pixel_format) { 8627 switch (mode_cmd->pixel_format) {
@@ -8625,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
8625 break; 8632 break;
8626 case DRM_FORMAT_XRGB1555: 8633 case DRM_FORMAT_XRGB1555:
8627 case DRM_FORMAT_ARGB1555: 8634 case DRM_FORMAT_ARGB1555:
8628 if (INTEL_INFO(dev)->gen > 3) 8635 if (INTEL_INFO(dev)->gen > 3) {
8636 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8629 return -EINVAL; 8637 return -EINVAL;
8638 }
8630 break; 8639 break;
8631 case DRM_FORMAT_XBGR8888: 8640 case DRM_FORMAT_XBGR8888:
8632 case DRM_FORMAT_ABGR8888: 8641 case DRM_FORMAT_ABGR8888:
@@ -8634,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
8634 case DRM_FORMAT_ARGB2101010: 8643 case DRM_FORMAT_ARGB2101010:
8635 case DRM_FORMAT_XBGR2101010: 8644 case DRM_FORMAT_XBGR2101010:
8636 case DRM_FORMAT_ABGR2101010: 8645 case DRM_FORMAT_ABGR2101010:
8637 if (INTEL_INFO(dev)->gen < 4) 8646 if (INTEL_INFO(dev)->gen < 4) {
8647 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8638 return -EINVAL; 8648 return -EINVAL;
8649 }
8639 break; 8650 break;
8640 case DRM_FORMAT_YUYV: 8651 case DRM_FORMAT_YUYV:
8641 case DRM_FORMAT_UYVY: 8652 case DRM_FORMAT_UYVY:
8642 case DRM_FORMAT_YVYU: 8653 case DRM_FORMAT_YVYU:
8643 case DRM_FORMAT_VYUY: 8654 case DRM_FORMAT_VYUY:
8644 if (INTEL_INFO(dev)->gen < 6) 8655 if (INTEL_INFO(dev)->gen < 5) {
8656 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8645 return -EINVAL; 8657 return -EINVAL;
8658 }
8646 break; 8659 break;
8647 default: 8660 default:
8648 DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); 8661 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
8649 return -EINVAL; 8662 return -EINVAL;
8650 } 8663 }
8651 8664
@@ -9167,6 +9180,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
9167 * the crtc fixup. */ 9180 * the crtc fixup. */
9168} 9181}
9169 9182
9183static void i915_redisable_vga(struct drm_device *dev)
9184{
9185 struct drm_i915_private *dev_priv = dev->dev_private;
9186 u32 vga_reg;
9187
9188 if (HAS_PCH_SPLIT(dev))
9189 vga_reg = CPU_VGACNTRL;
9190 else
9191 vga_reg = VGACNTRL;
9192
9193 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9194 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9195 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9196 POSTING_READ(vga_reg);
9197 }
9198}
9199
9170/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9200/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9171 * and i915 state tracking structures. */ 9201 * and i915 state tracking structures. */
9172void intel_modeset_setup_hw_state(struct drm_device *dev, 9202void intel_modeset_setup_hw_state(struct drm_device *dev,
@@ -9275,6 +9305,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9275 intel_set_mode(&crtc->base, &crtc->base.mode, 9305 intel_set_mode(&crtc->base, &crtc->base.mode,
9276 crtc->base.x, crtc->base.y, crtc->base.fb); 9306 crtc->base.x, crtc->base.y, crtc->base.fb);
9277 } 9307 }
9308
9309 i915_redisable_vga(dev);
9278 } else { 9310 } else {
9279 intel_modeset_update_staged_output_state(dev); 9311 intel_modeset_update_staged_output_state(dev);
9280 } 9312 }
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a0..fb3715b4b09d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
2579 2579
2580static void 2580static void
2581intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2581intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2582 struct intel_dp *intel_dp) 2582 struct intel_dp *intel_dp,
2583 struct edp_power_seq *out)
2583{ 2584{
2584 struct drm_i915_private *dev_priv = dev->dev_private; 2585 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct edp_power_seq cur, vbt, spec, final; 2586 struct edp_power_seq cur, vbt, spec, final;
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2651 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2651#undef get_delay 2652#undef get_delay
2652 2653
2654 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2655 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2656 intel_dp->panel_power_cycle_delay);
2657
2658 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2659 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2660
2661 if (out)
2662 *out = final;
2663}
2664
2665static void
2666intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2667 struct intel_dp *intel_dp,
2668 struct edp_power_seq *seq)
2669{
2670 struct drm_i915_private *dev_priv = dev->dev_private;
2671 u32 pp_on, pp_off, pp_div;
2672
2653 /* And finally store the new values in the power sequencer. */ 2673 /* And finally store the new values in the power sequencer. */
2654 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2674 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2655 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2675 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2656 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2676 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2657 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2677 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2658 /* Compute the divisor for the pp clock, simply match the Bspec 2678 /* Compute the divisor for the pp clock, simply match the Bspec
2659 * formula. */ 2679 * formula. */
2660 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2680 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2661 << PP_REFERENCE_DIVIDER_SHIFT; 2681 << PP_REFERENCE_DIVIDER_SHIFT;
2662 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2682 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2663 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2683 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2664 2684
2665 /* Haswell doesn't have any port selection bits for the panel 2685 /* Haswell doesn't have any port selection bits for the panel
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2695 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2676 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2696 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2677 2697
2678
2679 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2680 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2681 intel_dp->panel_power_cycle_delay);
2682
2683 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2684 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2685
2686 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2698 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2687 I915_READ(PCH_PP_ON_DELAYS), 2699 I915_READ(PCH_PP_ON_DELAYS),
2688 I915_READ(PCH_PP_OFF_DELAYS), 2700 I915_READ(PCH_PP_OFF_DELAYS),
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2699 struct drm_device *dev = intel_encoder->base.dev; 2711 struct drm_device *dev = intel_encoder->base.dev;
2700 struct drm_i915_private *dev_priv = dev->dev_private; 2712 struct drm_i915_private *dev_priv = dev->dev_private;
2701 struct drm_display_mode *fixed_mode = NULL; 2713 struct drm_display_mode *fixed_mode = NULL;
2714 struct edp_power_seq power_seq = { 0 };
2702 enum port port = intel_dig_port->port; 2715 enum port port = intel_dig_port->port;
2703 const char *name = NULL; 2716 const char *name = NULL;
2704 int type; 2717 int type;
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2771 } 2784 }
2772 2785
2773 if (is_edp(intel_dp)) 2786 if (is_edp(intel_dp))
2774 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2787 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2775 2788
2776 intel_dp_i2c_init(intel_dp, intel_connector, name); 2789 intel_dp_i2c_init(intel_dp, intel_connector, name);
2777 2790
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2798 return; 2811 return;
2799 } 2812 }
2800 2813
2814 /* We now know it's not a ghost, init power sequence regs. */
2815 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2816 &power_seq);
2817
2801 ironlake_edp_panel_vdd_on(intel_dp); 2818 ironlake_edp_panel_vdd_on(intel_dp);
2802 edid = drm_get_edid(connector, &intel_dp->adapter); 2819 edid = drm_get_edid(connector, &intel_dp->adapter);
2803 if (edid) { 2820 if (edid) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b9a660a53677..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
776 }, 776 },
777 { 777 {
778 .callback = intel_no_lvds_dmi_callback, 778 .callback = intel_no_lvds_dmi_callback,
779 .ident = "ZOTAC ZBOXSD-ID12/ID13",
780 .matches = {
781 DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
782 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
783 },
784 },
785 {
786 .callback = intel_no_lvds_dmi_callback,
787 .ident = "Gigabyte GA-D525TUD", 779 .ident = "Gigabyte GA-D525TUD",
788 .matches = { 780 .matches = {
789 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), 781 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 496caa73eb70..3280cffe50f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
44 * i915.i915_enable_fbc parameter 44 * i915.i915_enable_fbc parameter
45 */ 45 */
46 46
47static bool intel_crtc_active(struct drm_crtc *crtc)
48{
49 /* Be paranoid as we can arrive here with only partial
50 * state retrieved from the hardware during setup.
51 */
52 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
53}
54
47static void i8xx_disable_fbc(struct drm_device *dev) 55static void i8xx_disable_fbc(struct drm_device *dev)
48{ 56{
49 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
405 * - going to an unsupported config (interlace, pixel multiply, etc.) 413 * - going to an unsupported config (interlace, pixel multiply, etc.)
406 */ 414 */
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 415 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled && 416 if (intel_crtc_active(tmp_crtc) &&
409 !to_intel_crtc(tmp_crtc)->primary_disabled && 417 !to_intel_crtc(tmp_crtc)->primary_disabled) {
410 tmp_crtc->fb) {
411 if (crtc) { 418 if (crtc) {
412 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 419 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
413 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 420 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
992 struct drm_crtc *crtc, *enabled = NULL; 999 struct drm_crtc *crtc, *enabled = NULL;
993 1000
994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1001 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995 if (crtc->enabled && crtc->fb) { 1002 if (intel_crtc_active(crtc)) {
996 if (enabled) 1003 if (enabled)
997 return NULL; 1004 return NULL;
998 enabled = crtc; 1005 enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1086 int entries, tlb_miss; 1093 int entries, tlb_miss;
1087 1094
1088 crtc = intel_get_crtc_for_plane(dev, plane); 1095 crtc = intel_get_crtc_for_plane(dev, plane);
1089 if (crtc->fb == NULL || !crtc->enabled) { 1096 if (!intel_crtc_active(crtc)) {
1090 *cursor_wm = cursor->guard_size; 1097 *cursor_wm = cursor->guard_size;
1091 *plane_wm = display->guard_size; 1098 *plane_wm = display->guard_size;
1092 return false; 1099 return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1215 int entries; 1222 int entries;
1216 1223
1217 crtc = intel_get_crtc_for_plane(dev, plane); 1224 crtc = intel_get_crtc_for_plane(dev, plane);
1218 if (crtc->fb == NULL || !crtc->enabled) 1225 if (!intel_crtc_active(crtc))
1219 return false; 1226 return false;
1220 1227
1221 clock = crtc->mode.clock; /* VESA DOT Clock */ 1228 clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1293,7 @@ static void valleyview_update_wm(struct drm_device *dev)
1286 struct drm_i915_private *dev_priv = dev->dev_private; 1293 struct drm_i915_private *dev_priv = dev->dev_private;
1287 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1294 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1288 int plane_sr, cursor_sr; 1295 int plane_sr, cursor_sr;
1296 int ignore_plane_sr, ignore_cursor_sr;
1289 unsigned int enabled = 0; 1297 unsigned int enabled = 0;
1290 1298
1291 vlv_update_drain_latency(dev); 1299 vlv_update_drain_latency(dev);
@@ -1302,17 +1310,23 @@ static void valleyview_update_wm(struct drm_device *dev)
1302 &planeb_wm, &cursorb_wm)) 1310 &planeb_wm, &cursorb_wm))
1303 enabled |= 2; 1311 enabled |= 2;
1304 1312
1305 plane_sr = cursor_sr = 0;
1306 if (single_plane_enabled(enabled) && 1313 if (single_plane_enabled(enabled) &&
1307 g4x_compute_srwm(dev, ffs(enabled) - 1, 1314 g4x_compute_srwm(dev, ffs(enabled) - 1,
1308 sr_latency_ns, 1315 sr_latency_ns,
1309 &valleyview_wm_info, 1316 &valleyview_wm_info,
1310 &valleyview_cursor_wm_info, 1317 &valleyview_cursor_wm_info,
1311 &plane_sr, &cursor_sr)) 1318 &plane_sr, &ignore_cursor_sr) &&
1319 g4x_compute_srwm(dev, ffs(enabled) - 1,
1320 2*sr_latency_ns,
1321 &valleyview_wm_info,
1322 &valleyview_cursor_wm_info,
1323 &ignore_plane_sr, &cursor_sr)) {
1312 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); 1324 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1313 else 1325 } else {
1314 I915_WRITE(FW_BLC_SELF_VLV, 1326 I915_WRITE(FW_BLC_SELF_VLV,
1315 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); 1327 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1328 plane_sr = cursor_sr = 0;
1329 }
1316 1330
1317 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1331 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1318 planea_wm, cursora_wm, 1332 planea_wm, cursora_wm,
@@ -1352,17 +1366,18 @@ static void g4x_update_wm(struct drm_device *dev)
1352 &planeb_wm, &cursorb_wm)) 1366 &planeb_wm, &cursorb_wm))
1353 enabled |= 2; 1367 enabled |= 2;
1354 1368
1355 plane_sr = cursor_sr = 0;
1356 if (single_plane_enabled(enabled) && 1369 if (single_plane_enabled(enabled) &&
1357 g4x_compute_srwm(dev, ffs(enabled) - 1, 1370 g4x_compute_srwm(dev, ffs(enabled) - 1,
1358 sr_latency_ns, 1371 sr_latency_ns,
1359 &g4x_wm_info, 1372 &g4x_wm_info,
1360 &g4x_cursor_wm_info, 1373 &g4x_cursor_wm_info,
1361 &plane_sr, &cursor_sr)) 1374 &plane_sr, &cursor_sr)) {
1362 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1375 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1363 else 1376 } else {
1364 I915_WRITE(FW_BLC_SELF, 1377 I915_WRITE(FW_BLC_SELF,
1365 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1378 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1379 plane_sr = cursor_sr = 0;
1380 }
1366 1381
1367 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1382 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1368 planea_wm, cursora_wm, 1383 planea_wm, cursora_wm,
@@ -1468,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1468 1483
1469 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1484 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1470 crtc = intel_get_crtc_for_plane(dev, 0); 1485 crtc = intel_get_crtc_for_plane(dev, 0);
1471 if (crtc->enabled && crtc->fb) { 1486 if (intel_crtc_active(crtc)) {
1472 int cpp = crtc->fb->bits_per_pixel / 8; 1487 int cpp = crtc->fb->bits_per_pixel / 8;
1473 if (IS_GEN2(dev)) 1488 if (IS_GEN2(dev))
1474 cpp = 4; 1489 cpp = 4;
@@ -1482,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1482 1497
1483 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1498 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1484 crtc = intel_get_crtc_for_plane(dev, 1); 1499 crtc = intel_get_crtc_for_plane(dev, 1);
1485 if (crtc->enabled && crtc->fb) { 1500 if (intel_crtc_active(crtc)) {
1486 int cpp = crtc->fb->bits_per_pixel / 8; 1501 int cpp = crtc->fb->bits_per_pixel / 8;
1487 if (IS_GEN2(dev)) 1502 if (IS_GEN2(dev))
1488 cpp = 4; 1503 cpp = 4;
@@ -1811,8 +1826,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
1811 enabled |= 2; 1826 enabled |= 2;
1812 } 1827 }
1813 1828
1814 if ((dev_priv->num_pipe == 3) && 1829 /*
1815 g4x_compute_wm0(dev, 2, 1830 * Calculate and update the self-refresh watermark only when one
1831 * display plane is used.
1832 *
1833 * SNB support 3 levels of watermark.
1834 *
1835 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1836 * and disabled in the descending order
1837 *
1838 */
1839 I915_WRITE(WM3_LP_ILK, 0);
1840 I915_WRITE(WM2_LP_ILK, 0);
1841 I915_WRITE(WM1_LP_ILK, 0);
1842
1843 if (!single_plane_enabled(enabled) ||
1844 dev_priv->sprite_scaling_enabled)
1845 return;
1846 enabled = ffs(enabled) - 1;
1847
1848 /* WM1 */
1849 if (!ironlake_compute_srwm(dev, 1, enabled,
1850 SNB_READ_WM1_LATENCY() * 500,
1851 &sandybridge_display_srwm_info,
1852 &sandybridge_cursor_srwm_info,
1853 &fbc_wm, &plane_wm, &cursor_wm))
1854 return;
1855
1856 I915_WRITE(WM1_LP_ILK,
1857 WM1_LP_SR_EN |
1858 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1859 (fbc_wm << WM1_LP_FBC_SHIFT) |
1860 (plane_wm << WM1_LP_SR_SHIFT) |
1861 cursor_wm);
1862
1863 /* WM2 */
1864 if (!ironlake_compute_srwm(dev, 2, enabled,
1865 SNB_READ_WM2_LATENCY() * 500,
1866 &sandybridge_display_srwm_info,
1867 &sandybridge_cursor_srwm_info,
1868 &fbc_wm, &plane_wm, &cursor_wm))
1869 return;
1870
1871 I915_WRITE(WM2_LP_ILK,
1872 WM2_LP_EN |
1873 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1874 (fbc_wm << WM1_LP_FBC_SHIFT) |
1875 (plane_wm << WM1_LP_SR_SHIFT) |
1876 cursor_wm);
1877
1878 /* WM3 */
1879 if (!ironlake_compute_srwm(dev, 3, enabled,
1880 SNB_READ_WM3_LATENCY() * 500,
1881 &sandybridge_display_srwm_info,
1882 &sandybridge_cursor_srwm_info,
1883 &fbc_wm, &plane_wm, &cursor_wm))
1884 return;
1885
1886 I915_WRITE(WM3_LP_ILK,
1887 WM3_LP_EN |
1888 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1889 (fbc_wm << WM1_LP_FBC_SHIFT) |
1890 (plane_wm << WM1_LP_SR_SHIFT) |
1891 cursor_wm);
1892}
1893
1894static void ivybridge_update_wm(struct drm_device *dev)
1895{
1896 struct drm_i915_private *dev_priv = dev->dev_private;
1897 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1898 u32 val;
1899 int fbc_wm, plane_wm, cursor_wm;
1900 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1901 unsigned int enabled;
1902
1903 enabled = 0;
1904 if (g4x_compute_wm0(dev, 0,
1905 &sandybridge_display_wm_info, latency,
1906 &sandybridge_cursor_wm_info, latency,
1907 &plane_wm, &cursor_wm)) {
1908 val = I915_READ(WM0_PIPEA_ILK);
1909 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1910 I915_WRITE(WM0_PIPEA_ILK, val |
1911 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1912 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1913 " plane %d, " "cursor: %d\n",
1914 plane_wm, cursor_wm);
1915 enabled |= 1;
1916 }
1917
1918 if (g4x_compute_wm0(dev, 1,
1919 &sandybridge_display_wm_info, latency,
1920 &sandybridge_cursor_wm_info, latency,
1921 &plane_wm, &cursor_wm)) {
1922 val = I915_READ(WM0_PIPEB_ILK);
1923 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1924 I915_WRITE(WM0_PIPEB_ILK, val |
1925 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1926 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1927 " plane %d, cursor: %d\n",
1928 plane_wm, cursor_wm);
1929 enabled |= 2;
1930 }
1931
1932 if (g4x_compute_wm0(dev, 2,
1816 &sandybridge_display_wm_info, latency, 1933 &sandybridge_display_wm_info, latency,
1817 &sandybridge_cursor_wm_info, latency, 1934 &sandybridge_cursor_wm_info, latency,
1818 &plane_wm, &cursor_wm)) { 1935 &plane_wm, &cursor_wm)) {
@@ -1875,12 +1992,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
1875 (plane_wm << WM1_LP_SR_SHIFT) | 1992 (plane_wm << WM1_LP_SR_SHIFT) |
1876 cursor_wm); 1993 cursor_wm);
1877 1994
1878 /* WM3 */ 1995 /* WM3, note we have to correct the cursor latency */
1879 if (!ironlake_compute_srwm(dev, 3, enabled, 1996 if (!ironlake_compute_srwm(dev, 3, enabled,
1880 SNB_READ_WM3_LATENCY() * 500, 1997 SNB_READ_WM3_LATENCY() * 500,
1881 &sandybridge_display_srwm_info, 1998 &sandybridge_display_srwm_info,
1882 &sandybridge_cursor_srwm_info, 1999 &sandybridge_cursor_srwm_info,
1883 &fbc_wm, &plane_wm, &cursor_wm)) 2000 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2001 !ironlake_compute_srwm(dev, 3, enabled,
2002 2 * SNB_READ_WM3_LATENCY() * 500,
2003 &sandybridge_display_srwm_info,
2004 &sandybridge_cursor_srwm_info,
2005 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
1884 return; 2006 return;
1885 2007
1886 I915_WRITE(WM3_LP_ILK, 2008 I915_WRITE(WM3_LP_ILK,
@@ -1929,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1929 int entries, tlb_miss; 2051 int entries, tlb_miss;
1930 2052
1931 crtc = intel_get_crtc_for_plane(dev, plane); 2053 crtc = intel_get_crtc_for_plane(dev, plane);
1932 if (crtc->fb == NULL || !crtc->enabled) { 2054 if (!intel_crtc_active(crtc)) {
1933 *sprite_wm = display->guard_size; 2055 *sprite_wm = display->guard_size;
1934 return false; 2056 return false;
1935 } 2057 }
@@ -3471,6 +3593,15 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3471 I915_READ(ILK_DISPLAY_CHICKEN2) | 3593 I915_READ(ILK_DISPLAY_CHICKEN2) |
3472 ILK_ELPIN_409_SELECT); 3594 ILK_ELPIN_409_SELECT);
3473 3595
3596 /* WaDisableHiZPlanesWhenMSAAEnabled */
3597 I915_WRITE(_3D_CHICKEN,
3598 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
3599
3600 /* WaSetupGtModeTdRowDispatch */
3601 if (IS_SNB_GT1(dev))
3602 I915_WRITE(GEN6_GT_MODE,
3603 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
3604
3474 I915_WRITE(WM3_LP_ILK, 0); 3605 I915_WRITE(WM3_LP_ILK, 0);
3475 I915_WRITE(WM2_LP_ILK, 0); 3606 I915_WRITE(WM2_LP_ILK, 0);
3476 I915_WRITE(WM1_LP_ILK, 0); 3607 I915_WRITE(WM1_LP_ILK, 0);
@@ -3999,7 +4130,7 @@ void intel_init_pm(struct drm_device *dev)
3999 } else if (IS_IVYBRIDGE(dev)) { 4130 } else if (IS_IVYBRIDGE(dev)) {
4000 /* FIXME: detect B0+ stepping and use auto training */ 4131 /* FIXME: detect B0+ stepping and use auto training */
4001 if (SNB_READ_WM0_LATENCY()) { 4132 if (SNB_READ_WM0_LATENCY()) {
4002 dev_priv->display.update_wm = sandybridge_update_wm; 4133 dev_priv->display.update_wm = ivybridge_update_wm;
4003 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 4134 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4004 } else { 4135 } else {
4005 DRM_DEBUG_KMS("Failed to read display plane latency. " 4136 DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -4119,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4119static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 4250static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4120{ 4251{
4121 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4122 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4253 /* something from same cacheline, but !FORCEWAKE_MT */
4254 POSTING_READ(ECOBUS);
4123} 4255}
4124 4256
4125static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 4257static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -4136,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4136 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4268 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4137 4269
4138 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4270 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4139 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4271 /* something from same cacheline, but !FORCEWAKE_MT */
4272 POSTING_READ(ECOBUS);
4140 4273
4141 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4274 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4142 FORCEWAKE_ACK_TIMEOUT_MS)) 4275 FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4173,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4173static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4306static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4174{ 4307{
4175 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4308 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4176 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4309 /* something from same cacheline, but !FORCEWAKE */
4310 POSTING_READ(ECOBUS);
4177 gen6_gt_check_fifodbg(dev_priv); 4311 gen6_gt_check_fifodbg(dev_priv);
4178} 4312}
4179 4313
4180static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4314static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4181{ 4315{
4182 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4316 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4183 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4317 /* something from same cacheline, but !FORCEWAKE_MT */
4318 POSTING_READ(ECOBUS);
4184 gen6_gt_check_fifodbg(dev_priv); 4319 gen6_gt_check_fifodbg(dev_priv);
4185} 4320}
4186 4321
@@ -4220,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4220static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 4355static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4221{ 4356{
4222 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); 4357 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4358 /* something from same cacheline, but !FORCEWAKE_VLV */
4359 POSTING_READ(FORCEWAKE_ACK_VLV);
4223} 4360}
4224 4361
4225static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4362static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
@@ -4240,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4240static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4377static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4241{ 4378{
4242 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4379 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4243 /* The below doubles as a POSTING_READ */ 4380 /* something from same cacheline, but !FORCEWAKE_VLV */
4381 POSTING_READ(FORCEWAKE_ACK_VLV);
4244 gen6_gt_check_fifodbg(dev_priv); 4382 gen6_gt_check_fifodbg(dev_priv);
4245} 4383}
4246 4384
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2346b920bd86..ae253e04c391 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
547 547
548static void render_ring_cleanup(struct intel_ring_buffer *ring) 548static void render_ring_cleanup(struct intel_ring_buffer *ring)
549{ 549{
550 struct drm_device *dev = ring->dev;
551
550 if (!ring->private) 552 if (!ring->private)
551 return; 553 return;
552 554
555 if (HAS_BROKEN_CS_TLB(dev))
556 drm_gem_object_unreference(to_gem_object(ring->private));
557
553 cleanup_pipe_control(ring); 558 cleanup_pipe_control(ring);
554} 559}
555 560
@@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
969 return 0; 974 return 0;
970} 975}
971 976
977/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
978#define I830_BATCH_LIMIT (256*1024)
972static int 979static int
973i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 980i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
974 u32 offset, u32 len, 981 u32 offset, u32 len,
@@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
976{ 983{
977 int ret; 984 int ret;
978 985
979 ret = intel_ring_begin(ring, 4); 986 if (flags & I915_DISPATCH_PINNED) {
980 if (ret) 987 ret = intel_ring_begin(ring, 4);
981 return ret; 988 if (ret)
989 return ret;
982 990
983 intel_ring_emit(ring, MI_BATCH_BUFFER); 991 intel_ring_emit(ring, MI_BATCH_BUFFER);
984 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 992 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
985 intel_ring_emit(ring, offset + len - 8); 993 intel_ring_emit(ring, offset + len - 8);
986 intel_ring_emit(ring, 0); 994 intel_ring_emit(ring, MI_NOOP);
987 intel_ring_advance(ring); 995 intel_ring_advance(ring);
996 } else {
997 struct drm_i915_gem_object *obj = ring->private;
998 u32 cs_offset = obj->gtt_offset;
999
1000 if (len > I830_BATCH_LIMIT)
1001 return -ENOSPC;
1002
1003 ret = intel_ring_begin(ring, 9+3);
1004 if (ret)
1005 return ret;
1006 /* Blit the batch (which has now all relocs applied) to the stable batch
1007 * scratch bo area (so that the CS never stumbles over its tlb
1008 * invalidation bug) ... */
1009 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1010 XY_SRC_COPY_BLT_WRITE_ALPHA |
1011 XY_SRC_COPY_BLT_WRITE_RGB);
1012 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1013 intel_ring_emit(ring, 0);
1014 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1015 intel_ring_emit(ring, cs_offset);
1016 intel_ring_emit(ring, 0);
1017 intel_ring_emit(ring, 4096);
1018 intel_ring_emit(ring, offset);
1019 intel_ring_emit(ring, MI_FLUSH);
1020
1021 /* ... and execute it. */
1022 intel_ring_emit(ring, MI_BATCH_BUFFER);
1023 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1024 intel_ring_emit(ring, cs_offset + len - 8);
1025 intel_ring_advance(ring);
1026 }
988 1027
989 return 0; 1028 return 0;
990} 1029}
@@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1596 ring->init = init_render_ring; 1635 ring->init = init_render_ring;
1597 ring->cleanup = render_ring_cleanup; 1636 ring->cleanup = render_ring_cleanup;
1598 1637
1638 /* Workaround batchbuffer to combat CS tlb bug. */
1639 if (HAS_BROKEN_CS_TLB(dev)) {
1640 struct drm_i915_gem_object *obj;
1641 int ret;
1642
1643 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1644 if (obj == NULL) {
1645 DRM_ERROR("Failed to allocate batch bo\n");
1646 return -ENOMEM;
1647 }
1648
1649 ret = i915_gem_object_pin(obj, 0, true, false);
1650 if (ret != 0) {
1651 drm_gem_object_unreference(&obj->base);
1652 DRM_ERROR("Failed to ping batch bo\n");
1653 return ret;
1654 }
1655
1656 ring->private = obj;
1657 }
1658
1599 return intel_init_ring_buffer(dev, ring); 1659 return intel_init_ring_buffer(dev, ring);
1600} 1660}
1601 1661
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 526182ed0c6d..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -94,6 +94,7 @@ struct intel_ring_buffer {
94 u32 offset, u32 length, 94 u32 offset, u32 length,
95 unsigned flags); 95 unsigned flags);
96#define I915_DISPATCH_SECURE 0x1 96#define I915_DISPATCH_SECURE 0x1
97#define I915_DISPATCH_PINNED 0x2
97 void (*cleanup)(struct intel_ring_buffer *ring); 98 void (*cleanup)(struct intel_ring_buffer *ring);
98 int (*sync_to)(struct intel_ring_buffer *ring, 99 int (*sync_to)(struct intel_ring_buffer *ring,
99 struct intel_ring_buffer *to, 100 struct intel_ring_buffer *to,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 827dcd4edf1c..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
122 122
123 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 123 linear_offset = y * fb->pitches[0] + x * pixel_size;
124 sprsurf_offset = 124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y, 125 intel_gen4_compute_offset_xtiled(&x, &y,
126 fb->bits_per_pixel / 8, 126 pixel_size, fb->pitches[0]);
127 fb->pitches[0]);
128 linear_offset -= sprsurf_offset; 127 linear_offset -= sprsurf_offset;
129 128
130 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
286 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 285 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
287 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 286 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
288 287
289 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 288 linear_offset = y * fb->pitches[0] + x * pixel_size;
290 dvssurf_offset = 289 dvssurf_offset =
291 intel_gen4_compute_offset_xtiled(&x, &y, 290 intel_gen4_compute_offset_xtiled(&x, &y,
292 fb->bits_per_pixel / 8, 291 pixel_size, fb->pitches[0]);
293 fb->pitches[0]);
294 linear_offset -= dvssurf_offset; 292 linear_offset -= dvssurf_offset;
295 293
296 if (obj->tiling_mode != I915_TILING_NONE) 294 if (obj->tiling_mode != I915_TILING_NONE)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1e910117b0a2..122b571ccc7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -60,8 +60,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
60} 60}
61 61
62 62
63static int __devinit 63static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65{ 64{
66 mgag200_kick_out_firmware_fb(pdev); 65 mgag200_kick_out_firmware_fb(pdev);
67 66
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
66 66
67 ret = nouveau_handle_create(nv_object(client), ~0, ~0, 67 ret = nouveau_handle_create(nv_object(client), ~0, ~0,
68 nv_object(client), &client->root); 68 nv_object(client), &client->root);
69 if (ret) { 69 if (ret)
70 nouveau_namedb_destroy(&client->base);
71 return ret; 70 return ret;
72 }
73 71
74 /* prevent init/fini being called, os in in charge of this */ 72 /* prevent init/fini being called, os in in charge of this */
75 atomic_set(&nv_object(client)->usecount, 2); 73 atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) 109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
110 namedb = namedb->parent; 110 namedb = namedb->parent;
111 111
112 handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL); 112 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
113 if (!handle) 113 if (!handle)
114 return -ENOMEM; 114 return -ENOMEM;
115 115
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
146 } 146 }
147 147
148 hprintk(handle, TRACE, "created\n"); 148 hprintk(handle, TRACE, "created\n");
149
150 *phandle = handle;
151
149 return 0; 152 return 0;
150} 153}
151 154
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 0f09af135415..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
853 853
854 if (nv_device(priv)->chipset < 0x90 || 854 if (!(ctrl & (1 << head))) {
855 nv_device(priv)->chipset == 0x92 || 855 if (nv_device(priv)->chipset < 0x90 ||
856 nv_device(priv)->chipset == 0xa0) { 856 nv_device(priv)->chipset == 0x92 ||
857 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 857 nv_device(priv)->chipset == 0xa0) {
858 ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); 858 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
859 i += 3; 859 ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
860 } else { 860 i += 4;
861 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 861 } else {
862 ctrl = nv_rd32(priv, 0x610798 + (i * 8)); 862 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
863 i += 3; 863 ctrl = nv_rd32(priv, 0x610798 + (i * 8));
864 i += 4;
865 }
864 } 866 }
865 867
866 if (!(ctrl & (1 << head))) 868 if (!(ctrl & (1 << head)))
867 return false; 869 return false;
870 i--;
868 871
869 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 872 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
870 if (data) { 873 if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
898 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 901 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
899 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 902 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
900 903
901 if (nv_device(priv)->chipset < 0x90 || 904 if (!(ctrl & (1 << head))) {
902 nv_device(priv)->chipset == 0x92 || 905 if (nv_device(priv)->chipset < 0x90 ||
903 nv_device(priv)->chipset == 0xa0) { 906 nv_device(priv)->chipset == 0x92 ||
904 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 907 nv_device(priv)->chipset == 0xa0) {
905 ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); 908 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
906 i += 3; 909 ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
907 } else { 910 i += 4;
908 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 911 } else {
909 ctrl = nv_rd32(priv, 0x610794 + (i * 8)); 912 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
910 i += 3; 913 ctrl = nv_rd32(priv, 0x610794 + (i * 8));
914 i += 4;
915 }
911 } 916 }
912 917
913 if (!(ctrl & (1 << head))) 918 if (!(ctrl & (1 << head)))
914 return 0x0000; 919 return 0x0000;
920 i--;
915 921
916 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); 922 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
917 if (!data) 923 if (!data)
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda2763..62ab231cd6b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
57.b16 #nve4_gpc_mmio_tail 57.b16 #nve4_gpc_mmio_tail
58.b16 #nve4_tpc_mmio_head 58.b16 #nve4_tpc_mmio_head
59.b16 #nve4_tpc_mmio_tail 59.b16 #nve4_tpc_mmio_tail
60.b8 0xe6 0 0 0
61.b16 #nve4_gpc_mmio_head
62.b16 #nve4_gpc_mmio_tail
63.b16 #nve4_tpc_mmio_head
64.b16 #nve4_tpc_mmio_tail
60.b8 0 0 0 0 65.b8 0 0 0 0
61 66
62// GPC mmio lists 67// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0f..09ee4702c8b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
34 0x00000000, 34 0x00000000,
35/* 0x0064: chipsets */ 35/* 0x0064: chipsets */
36 0x000000e4, 36 0x000000e4,
37 0x01040080, 37 0x0110008c,
38 0x014c0104, 38 0x01580110,
39 0x000000e7, 39 0x000000e7,
40 0x01040080, 40 0x0110008c,
41 0x014c0104, 41 0x01580110,
42 0x000000e6,
43 0x0110008c,
44 0x01580110,
42 0x00000000, 45 0x00000000,
43/* 0x0080: nve4_gpc_mmio_head */ 46/* 0x008c: nve4_gpc_mmio_head */
44 0x00000380, 47 0x00000380,
45 0x04000400, 48 0x04000400,
46 0x0800040c, 49 0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
74 0x14003100, 77 0x14003100,
75 0x000031d0, 78 0x000031d0,
76 0x040031e0, 79 0x040031e0,
77/* 0x0104: nve4_gpc_mmio_tail */ 80/* 0x0110: nve4_gpc_mmio_tail */
78/* 0x0104: nve4_tpc_mmio_head */ 81/* 0x0110: nve4_tpc_mmio_head */
79 0x00000048, 82 0x00000048,
80 0x00000064, 83 0x00000064,
81 0x00000088, 84 0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654bd..0bcfa4d447e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
754// on load it means: "a save preceeded this load" 754// on load it means: "a save preceeded this load"
755// 755//
756ctx_xfer: 756ctx_xfer:
757 // according to mwk, some kind of wait for idle
758 mov $r15 0xc00
759 shl b32 $r15 6
760 mov $r14 4
761 iowr I[$r15 + 0x200] $r14
762 ctx_xfer_idle:
763 iord $r14 I[$r15 + 0x000]
764 and $r14 0x2000
765 bra ne #ctx_xfer_idle
766
757 bra not $p1 #ctx_xfer_pre 767 bra not $p1 #ctx_xfer_pre
758 bra $p2 #ctx_xfer_pre_load 768 bra $p2 #ctx_xfer_pre_load
759 ctx_xfer_pre: 769 ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f484..bb03d2a1d57b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
799 0x01fa0613, 799 0x01fa0613,
800 0xf803f806, 800 0xf803f806,
801/* 0x0829: ctx_xfer */ 801/* 0x0829: ctx_xfer */
802 0x0611f400, 802 0x00f7f100,
803/* 0x082f: ctx_xfer_pre */ 803 0x06f4b60c,
804 0xf01102f4, 804 0xd004e7f0,
805 0x21f510f7, 805/* 0x0836: ctx_xfer_idle */
806 0x21f50698, 806 0xfecf80fe,
807 0x11f40631, 807 0x00e4f100,
808/* 0x083d: ctx_xfer_pre_load */ 808 0xf91bf420,
809 0x02f7f01c, 809 0xf40611f4,
810 0x065721f5, 810/* 0x0846: ctx_xfer_pre */
811 0x066621f5, 811 0xf7f01102,
812 0x067821f5, 812 0x9821f510,
813 0x21f5f4bd, 813 0x3121f506,
814 0x21f50657, 814 0x1c11f406,
815/* 0x0856: ctx_xfer_exec */ 815/* 0x0854: ctx_xfer_pre_load */
816 0x019806b8, 816 0xf502f7f0,
817 0x1427f116, 817 0xf5065721,
818 0x0624b604, 818 0xf5066621,
819 0xf10020d0, 819 0xbd067821,
820 0xf0a500e7, 820 0x5721f5f4,
821 0x1fb941e3, 821 0xb821f506,
822 0x8d21f402, 822/* 0x086d: ctx_xfer_exec */
823 0xf004e0b6, 823 0x16019806,
824 0x2cf001fc, 824 0x041427f1,
825 0x0124b602, 825 0xd00624b6,
826 0xf405f2fd, 826 0xe7f10020,
827 0x17f18d21, 827 0xe3f0a500,
828 0x13f04afc, 828 0x021fb941,
829 0x0c27f002, 829 0xb68d21f4,
830 0xf50012d0, 830 0xfcf004e0,
831 0xf1020721, 831 0x022cf001,
832 0xf047fc27, 832 0xfd0124b6,
833 0x20d00223, 833 0x21f405f2,
834 0x012cf000, 834 0xfc17f18d,
835 0xd00320b6, 835 0x0213f04a,
836 0xacf00012, 836 0xd00c27f0,
837 0x06a5f001, 837 0x21f50012,
838 0x9800b7f0, 838 0x27f10207,
839 0x0d98140c, 839 0x23f047fc,
840 0x00e7f015, 840 0x0020d002,
841 0x015c21f5, 841 0xb6012cf0,
842 0xf508a7f0, 842 0x12d00320,
843 0xf5010321, 843 0x01acf000,
844 0xf4020721, 844 0xf006a5f0,
845 0xa7f02201, 845 0x0c9800b7,
846 0xc921f40c, 846 0x150d9814,
847 0x0a1017f1, 847 0xf500e7f0,
848 0xf00614b6, 848 0xf0015c21,
849 0x12d00527, 849 0x21f508a7,
850/* 0x08dd: ctx_xfer_post_save_wait */ 850 0x21f50103,
851 0x0012cf00, 851 0x01f40207,
852 0xf40522fd, 852 0x0ca7f022,
853 0x02f4fa1b, 853 0xf1c921f4,
854/* 0x08e9: ctx_xfer_post */ 854 0xb60a1017,
855 0x02f7f032, 855 0x27f00614,
856 0x065721f5, 856 0x0012d005,
857 0x21f5f4bd, 857/* 0x08f4: ctx_xfer_post_save_wait */
858 0x21f50698, 858 0xfd0012cf,
859 0x21f50226, 859 0x1bf40522,
860 0xf4bd0666, 860 0x3202f4fa,
861 0x065721f5, 861/* 0x0900: ctx_xfer_post */
862 0x981011f4, 862 0xf502f7f0,
863 0x11fd8001, 863 0xbd065721,
864 0x070bf405, 864 0x9821f5f4,
865 0x07df21f5, 865 0x2621f506,
866/* 0x0914: ctx_xfer_no_post_mmio */ 866 0x6621f502,
867 0x064921f5, 867 0xf5f4bd06,
868/* 0x0918: ctx_xfer_done */ 868 0xf4065721,
869 0x000000f8, 869 0x01981011,
870 0x00000000, 870 0x0511fd80,
871 0x00000000, 871 0xf5070bf4,
872 0x00000000, 872/* 0x092b: ctx_xfer_no_post_mmio */
873 0x00000000, 873 0xf507df21,
874 0x00000000, 874/* 0x092f: ctx_xfer_done */
875 0xf8064921,
875 0x00000000, 876 0x00000000,
876 0x00000000, 877 0x00000000,
877 0x00000000, 878 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa28665..7fe9d7cf486b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
44.b8 0xe7 0 0 0 44.b8 0xe7 0 0 0
45.b16 #nve4_hub_mmio_head 45.b16 #nve4_hub_mmio_head
46.b16 #nve4_hub_mmio_tail 46.b16 #nve4_hub_mmio_tail
47.b8 0xe6 0 0 0
48.b16 #nve4_hub_mmio_head
49.b16 #nve4_hub_mmio_tail
47.b8 0 0 0 0 50.b8 0 0 0 0
48 51
49nve4_hub_mmio_head: 52nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
680// on load it means: "a save preceeded this load" 683// on load it means: "a save preceeded this load"
681// 684//
682ctx_xfer: 685ctx_xfer:
686 // according to mwk, some kind of wait for idle
687 mov $r15 0xc00
688 shl b32 $r15 6
689 mov $r14 4
690 iowr I[$r15 + 0x200] $r14
691 ctx_xfer_idle:
692 iord $r14 I[$r15 + 0x000]
693 and $r14 0x2000
694 bra ne #ctx_xfer_idle
695
683 bra not $p1 #ctx_xfer_pre 696 bra not $p1 #ctx_xfer_pre
684 bra $p2 #ctx_xfer_pre_load 697 bra $p2 #ctx_xfer_pre_load
685 ctx_xfer_pre: 698 ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3b..e3421af68ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
30 0x00000000, 30 0x00000000,
31/* 0x005c: chipsets */ 31/* 0x005c: chipsets */
32 0x000000e4, 32 0x000000e4,
33 0x013c0070, 33 0x01440078,
34 0x000000e7, 34 0x000000e7,
35 0x013c0070, 35 0x01440078,
36 0x000000e6,
37 0x01440078,
36 0x00000000, 38 0x00000000,
37/* 0x0070: nve4_hub_mmio_head */ 39/* 0x0078: nve4_hub_mmio_head */
38 0x0417e91c, 40 0x0417e91c,
39 0x04400204, 41 0x04400204,
40 0x18404010, 42 0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
86 0x00408840, 88 0x00408840,
87 0x08408900, 89 0x08408900,
88 0x00408980, 90 0x00408980,
89/* 0x013c: nve4_hub_mmio_tail */ 91/* 0x0144: nve4_hub_mmio_tail */
90 0x00000000,
91 0x00000000,
92 0x00000000, 92 0x00000000,
93 0x00000000, 93 0x00000000,
94 0x00000000, 94 0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
781 0x0613f002, 781 0x0613f002,
782 0xf80601fa, 782 0xf80601fa,
783/* 0x07fb: ctx_xfer */ 783/* 0x07fb: ctx_xfer */
784 0xf400f803, 784 0xf100f803,
785 0x02f40611, 785 0xb60c00f7,
786/* 0x0801: ctx_xfer_pre */ 786 0xe7f006f4,
787 0x10f7f00d, 787 0x80fed004,
788 0x067221f5, 788/* 0x0808: ctx_xfer_idle */
789/* 0x080b: ctx_xfer_pre_load */ 789 0xf100fecf,
790 0xf01c11f4, 790 0xf42000e4,
791 0x21f502f7, 791 0x11f4f91b,
792 0x21f50631, 792 0x0d02f406,
793 0x21f50640, 793/* 0x0818: ctx_xfer_pre */
794 0xf4bd0652, 794 0xf510f7f0,
795 0x063121f5, 795 0xf4067221,
796 0x069221f5, 796/* 0x0822: ctx_xfer_pre_load */
797/* 0x0824: ctx_xfer_exec */ 797 0xf7f01c11,
798 0xf1160198, 798 0x3121f502,
799 0xb6041427, 799 0x4021f506,
800 0x20d00624, 800 0x5221f506,
801 0x00e7f100, 801 0xf5f4bd06,
802 0x41e3f0a5, 802 0xf5063121,
803 0xf4021fb9, 803/* 0x083b: ctx_xfer_exec */
804 0xe0b68d21, 804 0x98069221,
805 0x01fcf004, 805 0x27f11601,
806 0xb6022cf0, 806 0x24b60414,
807 0xf2fd0124, 807 0x0020d006,
808 0x8d21f405, 808 0xa500e7f1,
809 0x4afc17f1, 809 0xb941e3f0,
810 0xf00213f0, 810 0x21f4021f,
811 0x12d00c27, 811 0x04e0b68d,
812 0x0721f500, 812 0xf001fcf0,
813 0xfc27f102, 813 0x24b6022c,
814 0x0223f047, 814 0x05f2fd01,
815 0xf00020d0, 815 0xf18d21f4,
816 0x20b6012c, 816 0xf04afc17,
817 0x0012d003, 817 0x27f00213,
818 0xf001acf0, 818 0x0012d00c,
819 0xb7f006a5, 819 0x020721f5,
820 0x140c9800, 820 0x47fc27f1,
821 0xf0150d98, 821 0xd00223f0,
822 0x21f500e7, 822 0x2cf00020,
823 0xa7f0015c, 823 0x0320b601,
824 0x0321f508, 824 0xf00012d0,
825 0x0721f501, 825 0xa5f001ac,
826 0x2201f402, 826 0x00b7f006,
827 0xf40ca7f0, 827 0x98140c98,
828 0x17f1c921, 828 0xe7f0150d,
829 0x14b60a10, 829 0x5c21f500,
830 0x0527f006, 830 0x08a7f001,
831/* 0x08ab: ctx_xfer_post_save_wait */ 831 0x010321f5,
832 0xcf0012d0, 832 0x020721f5,
833 0x22fd0012, 833 0xf02201f4,
834 0xfa1bf405, 834 0x21f40ca7,
835/* 0x08b7: ctx_xfer_post */ 835 0x1017f1c9,
836 0xf02e02f4, 836 0x0614b60a,
837 0x21f502f7, 837 0xd00527f0,
838 0xf4bd0631, 838/* 0x08c2: ctx_xfer_post_save_wait */
839 0x067221f5, 839 0x12cf0012,
840 0x022621f5, 840 0x0522fd00,
841 0x064021f5, 841 0xf4fa1bf4,
842 0x21f5f4bd, 842/* 0x08ce: ctx_xfer_post */
843 0x11f40631, 843 0xf7f02e02,
844 0x80019810, 844 0x3121f502,
845 0xf40511fd, 845 0xf5f4bd06,
846 0x21f5070b, 846 0xf5067221,
847/* 0x08e2: ctx_xfer_no_post_mmio */ 847 0xf5022621,
848/* 0x08e2: ctx_xfer_done */ 848 0xbd064021,
849 0x00f807b1, 849 0x3121f5f4,
850 0x00000000, 850 0x1011f406,
851 0x00000000, 851 0xfd800198,
852 0x00000000, 852 0x0bf40511,
853 0x00000000, 853 0xb121f507,
854 0x00000000, 854/* 0x08f9: ctx_xfer_no_post_mmio */
855 0x00000000, 855/* 0x08f9: ctx_xfer_done */
856 0x0000f807,
856 0x00000000, 857 0x00000000,
857}; 858};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 47a02081d708..45aff5f5085a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
516{ 516{
517 struct nouveau_device *device = nv_device(parent); 517 struct nouveau_device *device = nv_device(parent);
518 struct nvc0_graph_priv *priv; 518 struct nvc0_graph_priv *priv;
519 bool enable = true;
520 int ret, i; 519 int ret, i;
521 520
522 switch (device->chipset) { 521 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
523 case 0xd9: /* known broken without binary driver firmware */
524 enable = false;
525 break;
526 default:
527 break;
528 }
529
530 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
531 *pobject = nv_object(priv); 522 *pobject = nv_object(priv);
532 if (ret) 523 if (ret)
533 return ret; 524 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12eb..a1e78de46456 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
121 return 0x9297; 121 return 0x9297;
122 case 0xe4: 122 case 0xe4:
123 case 0xe7: 123 case 0xe7:
124 case 0xe6:
124 return 0xa097; 125 return 0xa097;
125 default: 126 default:
126 return 0; 127 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f192..9f82e9702b46 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nvc0_graph_priv *priv; 203 struct nvc0_graph_priv *priv;
204 int ret, i; 204 int ret, i;
205 205
206 ret = nouveau_graph_create(parent, engine, oclass, false, &priv); 206 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
207 *pobject = nv_object(priv); 207 *pobject = nv_object(priv);
208 if (ret) 208 if (ret)
209 return ret; 209 return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
252 priv->magic_not_rop_nr = 1; 252 priv->magic_not_rop_nr = 1;
253 break; 253 break;
254 case 0xe7: 254 case 0xe7:
255 case 0xe6:
255 priv->magic_not_rop_nr = 1; 256 priv->magic_not_rop_nr = 1;
256 break; 257 break;
257 default: 258 default:
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
36 36
37int nouveau_client_create_(const char *name, u64 device, const char *cfg, 37int nouveau_client_create_(const char *name, u64 device, const char *cfg,
38 const char *dbg, int, void **); 38 const char *dbg, int, void **);
39#define nouveau_client_destroy(p) \
40 nouveau_namedb_destroy(&(p)->base)
41
39int nouveau_client_init(struct nouveau_client *); 42int nouveau_client_init(struct nouveau_client *);
40int nouveau_client_fini(struct nouveau_client *, bool suspend); 43int nouveau_client_fini(struct nouveau_client *, bool suspend);
41 44
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be4..5bd1ca8cd20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
17 u8 chip; 17 u8 chip;
18 u8 minor; 18 u8 minor;
19 u8 micro; 19 u8 micro;
20 u8 patch;
20 } version; 21 } version;
21}; 22};
22 23
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a36..e6563b5cb08e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
25 u8 param; 25 u8 param;
26}; 26};
27 27
28u16 dcb_gpio_table(struct nouveau_bios *); 28u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver); 29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
30int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line, 30u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
31 struct dcb_gpio_func *); 31 struct dcb_gpio_func *);
32u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
33 u8 *ver, u8 *len, struct dcb_gpio_func *);
32 34
33#endif 35#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e97..ca2f6bf37f46 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
13 u32 nested; 13 u32 nested;
14 u16 repeat; 14 u16 repeat;
15 u16 repend; 15 u16 repend;
16 u32 ramcfg;
16}; 17};
17 18
18int nvbios_exec(struct nvbios_init *); 19int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
38 PLL_UNK42 = 0x42, 38 PLL_UNK42 = 0x42,
39 PLL_VPLL0 = 0x80, 39 PLL_VPLL0 = 0x80,
40 PLL_VPLL1 = 0x81, 40 PLL_VPLL1 = 0x81,
41 PLL_VPLL2 = 0x82,
42 PLL_VPLL3 = 0x83,
41 PLL_MAX = 0xff 43 PLL_MAX = 0xff
42}; 44};
43 45
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15d..b75e8f18e52c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
11 struct nouveau_subdev base; 11 struct nouveau_subdev base;
12 12
13 /* hardware interfaces */ 13 /* hardware interfaces */
14 void (*reset)(struct nouveau_gpio *); 14 void (*reset)(struct nouveau_gpio *, u8 func);
15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out); 15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
16 int (*sense)(struct nouveau_gpio *, int line); 16 int (*sense)(struct nouveau_gpio *, int line);
17 void (*irq_enable)(struct nouveau_gpio *, int line, bool); 17 void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index dd111947eb86..f621f69fa1a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -447,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
447 bios->version.chip = nv_ro08(bios, bit_i.offset + 2); 447 bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
448 bios->version.minor = nv_ro08(bios, bit_i.offset + 1); 448 bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
449 bios->version.micro = nv_ro08(bios, bit_i.offset + 0); 449 bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
450 bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
450 } else 451 } else
451 if (bmp_version(bios)) { 452 if (bmp_version(bios)) {
452 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13); 453 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -455,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
455 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10); 456 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
456 } 457 }
457 458
458 nv_info(bios, "version %02x.%02x.%02x.%02x\n", 459 nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
459 bios->version.major, bios->version.chip, 460 bios->version.major, bios->version.chip,
460 bios->version.minor, bios->version.micro); 461 bios->version.minor, bios->version.micro, bios->version.patch);
461 462
462 return 0; 463 return 0;
463} 464}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c90d4aa3ae4f..c84e93fa6d95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
27#include <subdev/bios/gpio.h> 27#include <subdev/bios/gpio.h>
28 28
29u16 29u16
30dcb_gpio_table(struct nouveau_bios *bios) 30dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{ 31{
32 u8 ver, hdr, cnt, len; 32 u16 data = 0x0000;
33 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len); 33 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
34 if (dcb) { 34 if (dcb) {
35 if (ver >= 0x30 && hdr >= 0x0c) 35 if (*ver >= 0x30 && *hdr >= 0x0c)
36 return nv_ro16(bios, dcb + 0x0a); 36 data = nv_ro16(bios, dcb + 0x0a);
37 if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13) 37 else
38 return nv_ro16(bios, dcb - 0x0f); 38 if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
39 data = nv_ro16(bios, dcb - 0x0f);
40
41 if (data) {
42 *ver = nv_ro08(bios, data + 0x00);
43 if (*ver < 0x30) {
44 *hdr = 3;
45 *cnt = nv_ro08(bios, data + 0x02);
46 *len = nv_ro08(bios, data + 0x01);
47 } else
48 if (*ver <= 0x41) {
49 *hdr = nv_ro08(bios, data + 0x01);
50 *cnt = nv_ro08(bios, data + 0x02);
51 *len = nv_ro08(bios, data + 0x03);
52 } else {
53 data = 0x0000;
54 }
55 }
39 } 56 }
40 return 0x0000; 57 return data;
41} 58}
42 59
43u16 60u16
44dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver) 61dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
45{ 62{
46 u16 gpio = dcb_gpio_table(bios); 63 u8 hdr, cnt;
47 if (gpio) { 64 u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
48 *ver = nv_ro08(bios, gpio); 65 if (gpio && ent < cnt)
49 if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2)) 66 return gpio + hdr + (ent * *len);
50 return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
51 else if (ent < nv_ro08(bios, gpio + 2))
52 return gpio + nv_ro08(bios, gpio + 1) +
53 (ent * nv_ro08(bios, gpio + 3));
54 }
55 return 0x0000; 67 return 0x0000;
56} 68}
57 69
58int 70u16
59dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line, 71dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
60 struct dcb_gpio_func *gpio) 72 struct dcb_gpio_func *gpio)
61{ 73{
62 u8 ver, hdr, cnt, len; 74 u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
63 u16 entry; 75 if (data) {
64 int i = -1; 76 if (*ver < 0x40) {
65 77 u16 info = nv_ro16(bios, data);
66 while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
67 if (ver < 0x40) {
68 u16 data = nv_ro16(bios, entry);
69 *gpio = (struct dcb_gpio_func) { 78 *gpio = (struct dcb_gpio_func) {
70 .line = (data & 0x001f) >> 0, 79 .line = (info & 0x001f) >> 0,
71 .func = (data & 0x07e0) >> 5, 80 .func = (info & 0x07e0) >> 5,
72 .log[0] = (data & 0x1800) >> 11, 81 .log[0] = (info & 0x1800) >> 11,
73 .log[1] = (data & 0x6000) >> 13, 82 .log[1] = (info & 0x6000) >> 13,
74 .param = !!(data & 0x8000), 83 .param = !!(info & 0x8000),
75 }; 84 };
76 } else 85 } else
77 if (ver < 0x41) { 86 if (*ver < 0x41) {
78 u32 data = nv_ro32(bios, entry); 87 u32 info = nv_ro32(bios, data);
79 *gpio = (struct dcb_gpio_func) { 88 *gpio = (struct dcb_gpio_func) {
80 .line = (data & 0x0000001f) >> 0, 89 .line = (info & 0x0000001f) >> 0,
81 .func = (data & 0x0000ff00) >> 8, 90 .func = (info & 0x0000ff00) >> 8,
82 .log[0] = (data & 0x18000000) >> 27, 91 .log[0] = (info & 0x18000000) >> 27,
83 .log[1] = (data & 0x60000000) >> 29, 92 .log[1] = (info & 0x60000000) >> 29,
84 .param = !!(data & 0x80000000), 93 .param = !!(info & 0x80000000),
85 }; 94 };
86 } else { 95 } else {
87 u32 data = nv_ro32(bios, entry + 0); 96 u32 info = nv_ro32(bios, data + 0);
88 u8 data1 = nv_ro32(bios, entry + 4); 97 u8 info1 = nv_ro32(bios, data + 4);
89 *gpio = (struct dcb_gpio_func) { 98 *gpio = (struct dcb_gpio_func) {
90 .line = (data & 0x0000003f) >> 0, 99 .line = (info & 0x0000003f) >> 0,
91 .func = (data & 0x0000ff00) >> 8, 100 .func = (info & 0x0000ff00) >> 8,
92 .log[0] = (data1 & 0x30) >> 4, 101 .log[0] = (info1 & 0x30) >> 4,
93 .log[1] = (data1 & 0xc0) >> 6, 102 .log[1] = (info1 & 0xc0) >> 6,
94 .param = !!(data & 0x80000000), 103 .param = !!(info & 0x80000000),
95 }; 104 };
96 } 105 }
106 }
107
108 return data;
109}
97 110
111u16
112dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
113 u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
114{
115 u8 hdr, cnt, i = 0;
116 u16 data;
117
118 while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
98 if ((line == 0xff || line == gpio->line) && 119 if ((line == 0xff || line == gpio->line) &&
99 (func == 0xff || func == gpio->func)) 120 (func == 0xff || func == gpio->func))
100 return 0; 121 return data;
101 } 122 }
102 123
103 /* DCB 2.2, fixed TVDAC GPIO data */ 124 /* DCB 2.2, fixed TVDAC GPIO data */
104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) { 125 if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
105 if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) { 126 if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
106 u8 conf = nv_ro08(bios, entry - 5); 127 u8 conf = nv_ro08(bios, data - 5);
107 u8 addr = nv_ro08(bios, entry - 4); 128 u8 addr = nv_ro08(bios, data - 4);
108 if (conf & 0x01) { 129 if (conf & 0x01) {
109 *gpio = (struct dcb_gpio_func) { 130 *gpio = (struct dcb_gpio_func) {
110 .func = DCB_GPIO_TVDAC0, 131 .func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
112 .log[0] = !!(conf & 0x02), 133 .log[0] = !!(conf & 0x02),
113 .log[1] = !(conf & 0x02), 134 .log[1] = !(conf & 0x02),
114 }; 135 };
115 return 0; 136 *ver = 0x00;
137 return data;
116 } 138 }
117 } 139 }
118 } 140 }
119 141
120 return -EINVAL; 142 return 0x0000;
121} 143}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index ae168bbb86d8..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
2#include <core/device.h> 2#include <core/device.h>
3 3
4#include <subdev/bios.h> 4#include <subdev/bios.h>
5#include <subdev/bios/conn.h>
6#include <subdev/bios/bmp.h> 5#include <subdev/bios/bmp.h>
7#include <subdev/bios/bit.h> 6#include <subdev/bios/bit.h>
7#include <subdev/bios/conn.h>
8#include <subdev/bios/dcb.h> 8#include <subdev/bios/dcb.h>
9#include <subdev/bios/dp.h> 9#include <subdev/bios/dp.h>
10#include <subdev/bios/gpio.h>
10#include <subdev/bios/init.h> 11#include <subdev/bios/init.h>
11#include <subdev/devinit.h> 12#include <subdev/devinit.h>
12#include <subdev/clock.h> 13#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
410} 411}
411 412
412static u8 413static u8
414init_ram_restrict_strap(struct nvbios_init *init)
415{
416 /* This appears to be the behaviour of the VBIOS parser, and *is*
417 * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
418 * avoid fucking up the memory controller (somehow) by reading it
419 * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
420 *
421 * Preserving the non-caching behaviour on earlier chipsets just
422 * in case *not* re-reading the strap causes similar breakage.
423 */
424 if (!init->ramcfg || init->bios->version.major < 0x70)
425 init->ramcfg = init_rd32(init, 0x101000);
426 return (init->ramcfg & 0x00000003c) >> 2;
427}
428
429static u8
413init_ram_restrict(struct nvbios_init *init) 430init_ram_restrict(struct nvbios_init *init)
414{ 431{
415 u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2; 432 u8 strap = init_ram_restrict_strap(init);
416 u16 table = init_ram_restrict_table(init); 433 u16 table = init_ram_restrict_table(init);
417 if (table) 434 if (table)
418 return nv_ro08(init->bios, table + strap); 435 return nv_ro08(init->bios, table + strap);
@@ -1517,7 +1534,6 @@ init_io(struct nvbios_init *init)
1517 mdelay(10); 1534 mdelay(10);
1518 init_wr32(init, 0x614100, 0x10000018); 1535 init_wr32(init, 0x614100, 0x10000018);
1519 init_wr32(init, 0x614900, 0x10000018); 1536 init_wr32(init, 0x614900, 0x10000018);
1520 return;
1521 } 1537 }
1522 1538
1523 value = init_rdport(init, port) & mask; 1539 value = init_rdport(init, port) & mask;
@@ -1781,7 +1797,7 @@ init_gpio(struct nvbios_init *init)
1781 init->offset += 1; 1797 init->offset += 1;
1782 1798
1783 if (init_exec(init) && gpio && gpio->reset) 1799 if (init_exec(init) && gpio && gpio->reset)
1784 gpio->reset(gpio); 1800 gpio->reset(gpio, DCB_GPIO_UNUSED);
1785} 1801}
1786 1802
1787/** 1803/**
@@ -1995,6 +2011,47 @@ init_i2c_long_if(struct nvbios_init *init)
1995 init_exec_set(init, false); 2011 init_exec_set(init, false);
1996} 2012}
1997 2013
2014/**
2015 * INIT_GPIO_NE - opcode 0xa9
2016 *
2017 */
2018static void
2019init_gpio_ne(struct nvbios_init *init)
2020{
2021 struct nouveau_bios *bios = init->bios;
2022 struct nouveau_gpio *gpio = nouveau_gpio(bios);
2023 struct dcb_gpio_func func;
2024 u8 count = nv_ro08(bios, init->offset + 1);
2025 u8 idx = 0, ver, len;
2026 u16 data, i;
2027
2028 trace("GPIO_NE\t");
2029 init->offset += 2;
2030
2031 for (i = init->offset; i < init->offset + count; i++)
2032 cont("0x%02x ", nv_ro08(bios, i));
2033 cont("\n");
2034
2035 while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
2036 if (func.func != DCB_GPIO_UNUSED) {
2037 for (i = init->offset; i < init->offset + count; i++) {
2038 if (func.func == nv_ro08(bios, i))
2039 break;
2040 }
2041
2042 trace("\tFUNC[0x%02x]", func.func);
2043 if (i == (init->offset + count)) {
2044 cont(" *");
2045 if (init_exec(init) && gpio && gpio->reset)
2046 gpio->reset(gpio, func.func);
2047 }
2048 cont("\n");
2049 }
2050 }
2051
2052 init->offset += count;
2053}
2054
1998static struct nvbios_init_opcode { 2055static struct nvbios_init_opcode {
1999 void (*exec)(struct nvbios_init *); 2056 void (*exec)(struct nvbios_init *);
2000} init_opcode[] = { 2057} init_opcode[] = {
@@ -2059,6 +2116,7 @@ static struct nvbios_init_opcode {
2059 [0x98] = { init_auxch }, 2116 [0x98] = { init_auxch },
2060 [0x99] = { init_zm_auxch }, 2117 [0x99] = { init_zm_auxch },
2061 [0x9a] = { init_i2c_long_if }, 2118 [0x9a] = { init_i2c_long_if },
2119 [0xa9] = { init_gpio_ne },
2062}; 2120};
2063 2121
2064#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) 2122#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
52 switch (info.type) { 52 switch (info.type) {
53 case PLL_VPLL0: 53 case PLL_VPLL0:
54 case PLL_VPLL1: 54 case PLL_VPLL1:
55 case PLL_VPLL2:
56 case PLL_VPLL3:
55 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); 57 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
56 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); 58 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
57 nv_wr32(priv, info.reg + 0x10, fN << 16); 59 nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 9b7881e76634..03a652876e73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -109,6 +109,34 @@ nve0_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 109 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
110 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 110 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
111 break; 111 break;
112 case 0xe6:
113 device->cname = "GK106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
125 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
128 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
134 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
135 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
136 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
137 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
138 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
139 break;
112 default: 140 default:
113 nv_fatal(device, "unknown Kepler chipset\n"); 141 nv_fatal(device, "unknown Kepler chipset\n");
114 return -EINVAL; 142 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 306bdf121452..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
145 mem->memtype = type; 145 mem->memtype = type;
146 mem->size = size; 146 mem->size = size;
147 147
148 mutex_lock(&mm->mutex); 148 mutex_lock(&pfb->base.mutex);
149 do { 149 do {
150 if (back) 150 if (back)
151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
152 else 152 else
153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); 153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
154 if (ret) { 154 if (ret) {
155 mutex_unlock(&mm->mutex); 155 mutex_unlock(&pfb->base.mutex);
156 pfb->ram.put(pfb, &mem); 156 pfb->ram.put(pfb, &mem);
157 return ret; 157 return ret;
158 } 158 }
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
160 list_add_tail(&r->rl_entry, &mem->regions); 160 list_add_tail(&r->rl_entry, &mem->regions);
161 size -= r->length; 161 size -= r->length;
162 } while (size); 162 } while (size);
163 mutex_unlock(&mm->mutex); 163 mutex_unlock(&pfb->base.mutex);
164 164
165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); 165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
166 mem->offset = (u64)r->offset << 12; 166 mem->offset = (u64)r->offset << 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf0..9fb0f9b92d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, 43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
44 struct dcb_gpio_func *func) 44 struct dcb_gpio_func *func)
45{ 45{
46 struct nouveau_bios *bios = nouveau_bios(gpio);
47 u8 ver, len;
48 u16 data;
49
46 if (line == 0xff && tag == 0xff) 50 if (line == 0xff && tag == 0xff)
47 return -EINVAL; 51 return -EINVAL;
48 52
49 if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func)) 53 data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
54 if (data)
50 return 0; 55 return 0;
51 56
52 /* Apple iMac G4 NV18 */ 57 /* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
265 int ret = nouveau_subdev_init(&gpio->base); 270 int ret = nouveau_subdev_init(&gpio->base);
266 if (ret == 0 && gpio->reset) { 271 if (ret == 0 && gpio->reset) {
267 if (dmi_check_system(gpio_reset_ids)) 272 if (dmi_check_system(gpio_reset_ids))
268 gpio->reset(gpio); 273 gpio->reset(gpio, DCB_GPIO_UNUSED);
269 } 274 }
270 return ret; 275 return ret;
271} 276}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd9..bf13a1200f26 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
29}; 29};
30 30
31static void 31static void
32nv50_gpio_reset(struct nouveau_gpio *gpio) 32nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 33{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nv50_gpio_priv *priv = (void *)gpio; 35 struct nv50_gpio_priv *priv = (void *)gpio;
36 u8 ver, len;
36 u16 entry; 37 u16 entry;
37 u8 ver;
38 int ent = -1; 38 int ent = -1;
39 39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) { 40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
41 static const u32 regs[] = { 0xe100, 0xe28c }; 41 static const u32 regs[] = { 0xe100, 0xe28c };
42 u32 data = nv_ro32(bios, entry); 42 u32 data = nv_ro32(bios, entry);
43 u8 line = (data & 0x0000001f); 43 u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
48 u32 val = (unk1 << 16) | unk0; 48 u32 val = (unk1 << 16) | unk0;
49 u32 reg = regs[line >> 4]; line &= 0x0f; 49 u32 reg = regs[line >> 4]; line &= 0x0f;
50 50
51 if (func == 0xff) 51 if ( func == DCB_GPIO_UNUSED ||
52 (match != DCB_GPIO_UNUSED && match != func))
52 continue; 53 continue;
53 54
54 gpio->set(gpio, 0, func, line, defs); 55 gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e0..83e8b8f16e6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
29}; 29};
30 30
31static void 31static void
32nvd0_gpio_reset(struct nouveau_gpio *gpio) 32nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 33{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nvd0_gpio_priv *priv = (void *)gpio; 35 struct nvd0_gpio_priv *priv = (void *)gpio;
36 u8 ver, len;
36 u16 entry; 37 u16 entry;
37 u8 ver;
38 int ent = -1; 38 int ent = -1;
39 39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) { 40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
41 u32 data = nv_ro32(bios, entry); 41 u32 data = nv_ro32(bios, entry);
42 u8 line = (data & 0x0000003f); 42 u8 line = (data & 0x0000003f);
43 u8 defs = !!(data & 0x00000080); 43 u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
45 u8 unk0 = (data & 0x00ff0000) >> 16; 45 u8 unk0 = (data & 0x00ff0000) >> 16;
46 u8 unk1 = (data & 0x1f000000) >> 24; 46 u8 unk1 = (data & 0x1f000000) >> 24;
47 47
48 if (func == 0xff) 48 if ( func == DCB_GPIO_UNUSED ||
49 (match != DCB_GPIO_UNUSED && match != func))
49 continue; 50 continue;
50 51
51 gpio->set(gpio, 0, func, line, defs); 52 gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
40 if (ret) 40 if (ret)
41 return ret; 41 return ret;
42 42
43 mutex_lock(&imem->base.mutex);
43 list_add(&iobj->head, &imem->list); 44 list_add(&iobj->head, &imem->list);
45 mutex_unlock(&imem->base.mutex);
44 return 0; 46 return 0;
45} 47}
46 48
47void 49void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj) 50nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{ 51{
50 if (iobj->head.prev) 52 struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
51 list_del(&iobj->head); 53
54 mutex_lock(&subdev->mutex);
55 list_del(&iobj->head);
56 mutex_unlock(&subdev->mutex);
57
52 return nouveau_object_destroy(&iobj->base); 58 return nouveau_object_destroy(&iobj->base);
53} 59}
54 60
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
88 if (ret) 94 if (ret)
89 return ret; 95 return ret;
90 96
97 mutex_lock(&imem->base.mutex);
98
91 list_for_each_entry(iobj, &imem->list, head) { 99 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) { 100 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4) 101 for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
97 } 105 }
98 } 106 }
99 107
108 mutex_unlock(&imem->base.mutex);
109
100 return 0; 110 return 0;
101} 111}
102 112
@@ -104,17 +114,26 @@ int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) 114nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{ 115{
106 struct nouveau_instobj *iobj; 116 struct nouveau_instobj *iobj;
107 int i; 117 int i, ret = 0;
108 118
109 if (suspend) { 119 if (suspend) {
120 mutex_lock(&imem->base.mutex);
121
110 list_for_each_entry(iobj, &imem->list, head) { 122 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size); 123 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) { 124 if (!iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4) 125 ret = -ENOMEM;
114 iobj->suspend[i / 4] = nv_ro32(iobj, i); 126 break;
115 } else 127 }
116 return -ENOMEM; 128
129 for (i = 0; i < iobj->size; i += 4)
130 iobj->suspend[i / 4] = nv_ro32(iobj, i);
117 } 131 }
132
133 mutex_unlock(&imem->base.mutex);
134
135 if (ret)
136 return ret;
118 } 137 }
119 138
120 return nouveau_subdev_fini(&imem->base, suspend); 139 return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303a..e286e132c7e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
260 260
261 data = mxm_table(bios, &ver, &len); 261 data = mxm_table(bios, &ver, &len);
262 if (!data || !(ver = nv_ro08(bios, data))) { 262 if (!data || !(ver = nv_ro08(bios, data))) {
263 nv_info(mxm, "no VBIOS data, nothing to do\n"); 263 nv_debug(mxm, "no VBIOS data, nothing to do\n");
264 return 0; 264 return 0;
265 } 265 }
266 266
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
352 u64 mm_length = (offset + length) - mm_offset; 352 u64 mm_length = (offset + length) - mm_offset;
353 int ret; 353 int ret;
354 354
355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
356 if (!vm) 356 if (!vm)
357 return -ENOMEM; 357 return -ENOMEM;
358 358
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
376 return ret; 376 return ret;
377 } 377 }
378 378
379 *pvm = vm;
380
379 return 0; 381 return 0;
380} 382}
381 383
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5614c89148cb..69d7b1d0b9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1276,7 +1276,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1276 if (drm->agp.stat == ENABLED) { 1276 if (drm->agp.stat == ENABLED) {
1277 mem->bus.offset = mem->start << PAGE_SHIFT; 1277 mem->bus.offset = mem->start << PAGE_SHIFT;
1278 mem->bus.base = drm->agp.base; 1278 mem->bus.base = drm->agp.base;
1279 mem->bus.is_iomem = true; 1279 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1280 } 1280 }
1281#endif 1281#endif
1282 break; 1282 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ac340ba32017..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
127 struct nouveau_encoder **pnv_encoder) 127 struct nouveau_encoder **pnv_encoder)
128{ 128{
129 struct drm_device *dev = connector->dev; 129 struct drm_device *dev = connector->dev;
130 struct nouveau_connector *nv_connector = nouveau_connector(connector);
130 struct nouveau_drm *drm = nouveau_drm(dev); 131 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
131 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 133 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
132 int i; 134 struct nouveau_i2c_port *port = NULL;
135 int i, panel = -ENODEV;
136
137 /* eDP panels need powering on by us (if the VBIOS doesn't default it
138 * to on) before doing any AUX channel transactions. LVDS panel power
139 * is handled by the SOR itself, and not required for LVDS DDC.
140 */
141 if (nv_connector->type == DCB_CONNECTOR_eDP) {
142 panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
143 if (panel == 0) {
144 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
145 msleep(300);
146 }
147 }
133 148
134 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 149 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
135 struct nouveau_i2c_port *port = NULL;
136 struct nouveau_encoder *nv_encoder; 150 struct nouveau_encoder *nv_encoder;
137 struct drm_mode_object *obj; 151 struct drm_mode_object *obj;
138 int id; 152 int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
150 port = i2c->find(i2c, nv_encoder->dcb->i2c_index); 164 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
151 if (port && nv_probe_i2c(port, 0x50)) { 165 if (port && nv_probe_i2c(port, 0x50)) {
152 *pnv_encoder = nv_encoder; 166 *pnv_encoder = nv_encoder;
153 return port; 167 break;
154 } 168 }
169
170 port = NULL;
155 } 171 }
156 172
157 return NULL; 173 /* eDP panel not detected, restore panel power GPIO to previous
174 * state to avoid confusing the SOR for other output types.
175 */
176 if (!port && panel == 0)
177 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
178
179 return port;
158} 180}
159 181
160static struct nouveau_encoder * 182static struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4188f24fc75..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
225 if (ret) 225 if (ret)
226 return ret; 226 return ret;
227 227
228 /* power on internal panel if it's not already. the init tables of
229 * some vbios default this to off for some reason, causing the
230 * panel to not work after resume
231 */
232 if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
233 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
234 msleep(300);
235 }
236
237 /* enable polling for external displays */ 228 /* enable polling for external displays */
238 drm_kms_helper_poll_enable(dev); 229 drm_kms_helper_poll_enable(dev);
239 230
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 01c403ddb99b..8b090f1eb51d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
84 struct nouveau_cli *cli; 84 struct nouveau_cli *cli;
85 int ret; 85 int ret;
86 86
87 *pcli = NULL;
87 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, 88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
88 nouveau_debug, size, pcli); 89 nouveau_debug, size, pcli);
89 cli = *pcli; 90 cli = *pcli;
90 if (ret) 91 if (ret) {
92 if (cli)
93 nouveau_client_destroy(&cli->base);
94 *pcli = NULL;
91 return ret; 95 return ret;
96 }
92 97
93 mutex_init(&cli->mutex); 98 mutex_init(&cli->mutex);
94 return 0; 99 return 0;
@@ -189,8 +194,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
189 nouveau_bo_move_init(drm); 194 nouveau_bo_move_init(drm);
190} 195}
191 196
192static int __devinit 197static int nouveau_drm_probe(struct pci_dev *pdev,
193nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) 198 const struct pci_device_id *pent)
194{ 199{
195 struct nouveau_device *device; 200 struct nouveau_device *device;
196 struct apertures_struct *aper; 201 struct apertures_struct *aper;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *); 60void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *); 61void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *); 62int nv10_fence_create(struct nouveau_drm *);
63void nv17_fence_resume(struct nouveau_drm *drm);
63 64
64int nv50_fence_create(struct nouveau_drm *); 65int nv50_fence_create(struct nouveau_drm *);
65int nv84_fence_create(struct nouveau_drm *); 66int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 5566172774df..a701ff5ffa5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -698,10 +698,10 @@ static int
698nouveau_hwmon_init(struct drm_device *dev) 698nouveau_hwmon_init(struct drm_device *dev)
699{ 699{
700 struct nouveau_pm *pm = nouveau_pm(dev); 700 struct nouveau_pm *pm = nouveau_pm(dev);
701 struct nouveau_drm *drm = nouveau_drm(dev);
702 struct nouveau_therm *therm = nouveau_therm(drm->device);
703 701
704#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 702#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
703 struct nouveau_drm *drm = nouveau_drm(dev);
704 struct nouveau_therm *therm = nouveau_therm(drm->device);
705 struct device *hwmon_dev; 705 struct device *hwmon_dev;
706 int ret = 0; 706 int ret = 0;
707 707
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 3543fec2355e..b8e05ae38212 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -193,6 +193,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
193 if (nvbo->gem) { 193 if (nvbo->gem) {
194 if (nvbo->gem->dev == dev) { 194 if (nvbo->gem->dev == dev) {
195 drm_gem_object_reference(nvbo->gem); 195 drm_gem_object_reference(nvbo->gem);
196 dma_buf_put(dma_buf);
196 return nvbo->gem; 197 return nvbo->gem;
197 } 198 }
198 } 199 }
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
505 505
506static inline bool is_powersaving_dpms(int mode) 506static inline bool is_powersaving_dpms(int mode)
507{ 507{
508 return (mode != DRM_MODE_DPMS_ON); 508 return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
509} 509}
510 510
511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) 511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 7ae7f97a6d4d..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
162 kfree(priv); 162 kfree(priv);
163} 163}
164 164
165void nv17_fence_resume(struct nouveau_drm *drm)
166{
167 struct nv10_fence_priv *priv = drm->fence;
168
169 nouveau_bo_wr32(priv->bo, 0, priv->sequence);
170}
171
165int 172int
166nv10_fence_create(struct nouveau_drm *drm) 173nv10_fence_create(struct nouveau_drm *drm)
167{ 174{
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
197 if (ret == 0) { 204 if (ret == 0) {
198 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 205 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
199 priv->base.sync = nv17_fence_sync; 206 priv->base.sync = nv17_fence_sync;
207 priv->base.resume = nv17_fence_resume;
200 } 208 }
201 } 209 }
202 210
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index c20f2727ea0b..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
122 if (ret == 0) { 122 if (ret == 0) {
123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
124 priv->base.sync = nv17_fence_sync; 124 priv->base.sync = nv17_fence_sync;
125 priv->base.resume = nv17_fence_resume;
125 } 126 }
126 127
127 if (ret) 128 if (ret)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f95d7fc1f5e0..4d0e60adbc6d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2306,22 +2306,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
2306 return radeon_ring_test_lockup(rdev, ring); 2306 return radeon_ring_test_lockup(rdev, ring);
2307} 2307}
2308 2308
2309static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2309static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2310{ 2310{
2311 struct evergreen_mc_save save;
2312 u32 grbm_reset = 0; 2311 u32 grbm_reset = 0;
2313 2312
2314 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2313 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2315 return 0; 2314 return;
2316 2315
2317 dev_info(rdev->dev, "GPU softreset \n"); 2316 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
2318 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2319 RREG32(GRBM_STATUS)); 2317 RREG32(GRBM_STATUS));
2320 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2318 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
2321 RREG32(GRBM_STATUS_SE0)); 2319 RREG32(GRBM_STATUS_SE0));
2322 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2320 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
2323 RREG32(GRBM_STATUS_SE1)); 2321 RREG32(GRBM_STATUS_SE1));
2324 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2322 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
2325 RREG32(SRBM_STATUS)); 2323 RREG32(SRBM_STATUS));
2326 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2324 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2327 RREG32(CP_STALLED_STAT1)); 2325 RREG32(CP_STALLED_STAT1));
@@ -2331,10 +2329,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2331 RREG32(CP_BUSY_STAT)); 2329 RREG32(CP_BUSY_STAT));
2332 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2330 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2333 RREG32(CP_STAT)); 2331 RREG32(CP_STAT));
2334 evergreen_mc_stop(rdev, &save); 2332
2335 if (evergreen_mc_wait_for_idle(rdev)) {
2336 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2337 }
2338 /* Disable CP parsing/prefetching */ 2333 /* Disable CP parsing/prefetching */
2339 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 2334 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2340 2335
@@ -2358,15 +2353,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2358 udelay(50); 2353 udelay(50);
2359 WREG32(GRBM_SOFT_RESET, 0); 2354 WREG32(GRBM_SOFT_RESET, 0);
2360 (void)RREG32(GRBM_SOFT_RESET); 2355 (void)RREG32(GRBM_SOFT_RESET);
2361 /* Wait a little for things to settle down */ 2356
2362 udelay(50); 2357 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
2363 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2364 RREG32(GRBM_STATUS)); 2358 RREG32(GRBM_STATUS));
2365 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2359 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
2366 RREG32(GRBM_STATUS_SE0)); 2360 RREG32(GRBM_STATUS_SE0));
2367 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2361 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
2368 RREG32(GRBM_STATUS_SE1)); 2362 RREG32(GRBM_STATUS_SE1));
2369 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2363 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
2370 RREG32(SRBM_STATUS)); 2364 RREG32(SRBM_STATUS));
2371 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2365 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2372 RREG32(CP_STALLED_STAT1)); 2366 RREG32(CP_STALLED_STAT1));
@@ -2376,13 +2370,71 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2376 RREG32(CP_BUSY_STAT)); 2370 RREG32(CP_BUSY_STAT));
2377 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2371 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2378 RREG32(CP_STAT)); 2372 RREG32(CP_STAT));
2373}
2374
2375static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
2376{
2377 u32 tmp;
2378
2379 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2380 return;
2381
2382 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2383 RREG32(DMA_STATUS_REG));
2384
2385 /* Disable DMA */
2386 tmp = RREG32(DMA_RB_CNTL);
2387 tmp &= ~DMA_RB_ENABLE;
2388 WREG32(DMA_RB_CNTL, tmp);
2389
2390 /* Reset dma */
2391 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2392 RREG32(SRBM_SOFT_RESET);
2393 udelay(50);
2394 WREG32(SRBM_SOFT_RESET, 0);
2395
2396 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2397 RREG32(DMA_STATUS_REG));
2398}
2399
2400static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2401{
2402 struct evergreen_mc_save save;
2403
2404 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2405 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2406
2407 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2408 reset_mask &= ~RADEON_RESET_DMA;
2409
2410 if (reset_mask == 0)
2411 return 0;
2412
2413 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2414
2415 evergreen_mc_stop(rdev, &save);
2416 if (evergreen_mc_wait_for_idle(rdev)) {
2417 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2418 }
2419
2420 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
2421 evergreen_gpu_soft_reset_gfx(rdev);
2422
2423 if (reset_mask & RADEON_RESET_DMA)
2424 evergreen_gpu_soft_reset_dma(rdev);
2425
2426 /* Wait a little for things to settle down */
2427 udelay(50);
2428
2379 evergreen_mc_resume(rdev, &save); 2429 evergreen_mc_resume(rdev, &save);
2380 return 0; 2430 return 0;
2381} 2431}
2382 2432
2383int evergreen_asic_reset(struct radeon_device *rdev) 2433int evergreen_asic_reset(struct radeon_device *rdev)
2384{ 2434{
2385 return evergreen_gpu_soft_reset(rdev); 2435 return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
2436 RADEON_RESET_COMPUTE |
2437 RADEON_RESET_DMA));
2386} 2438}
2387 2439
2388/* Interrupts */ 2440/* Interrupts */
@@ -3215,7 +3267,7 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3215 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 3267 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3216 /* flush HDP */ 3268 /* flush HDP */
3217 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 3269 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
3218 radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); 3270 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3219 radeon_ring_write(ring, 1); 3271 radeon_ring_write(ring, 1);
3220} 3272}
3221 3273
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 74c6b42d2597..7a445666e71f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2654,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2654 ib[idx+4] = upper_32_bits(offset) & 0xff; 2654 ib[idx+4] = upper_32_bits(offset) & 0xff;
2655 } 2655 }
2656 break; 2656 break;
2657 case PACKET3_MEM_WRITE:
2658 {
2659 u64 offset;
2660
2661 if (pkt->count != 3) {
2662 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2663 return -EINVAL;
2664 }
2665 r = evergreen_cs_packet_next_reloc(p, &reloc);
2666 if (r) {
2667 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2668 return -EINVAL;
2669 }
2670 offset = radeon_get_ib_value(p, idx+0);
2671 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2672 if (offset & 0x7) {
2673 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2674 return -EINVAL;
2675 }
2676 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2677 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2678 offset + 8, radeon_bo_size(reloc->robj));
2679 return -EINVAL;
2680 }
2681 offset += reloc->lobj.gpu_offset;
2682 ib[idx+0] = offset;
2683 ib[idx+1] = upper_32_bits(offset) & 0xff;
2684 break;
2685 }
2657 case PACKET3_COPY_DW: 2686 case PACKET3_COPY_DW:
2658 if (pkt->count != 4) { 2687 if (pkt->count != 4) {
2659 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2688 DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -3287,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
3287 3316
3288 /* check config regs */ 3317 /* check config regs */
3289 switch (reg) { 3318 switch (reg) {
3319 case WAIT_UNTIL:
3290 case GRBM_GFX_INDEX: 3320 case GRBM_GFX_INDEX:
3291 case CP_STRMOUT_CNTL: 3321 case CP_STRMOUT_CNTL:
3292 case CP_COHER_CNTL: 3322 case CP_COHER_CNTL:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index cb9baaac9e85..0bfd0e9e469b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -742,8 +742,9 @@
742#define SOFT_RESET_ROM (1 << 14) 742#define SOFT_RESET_ROM (1 << 14)
743#define SOFT_RESET_SEM (1 << 15) 743#define SOFT_RESET_SEM (1 << 15)
744#define SOFT_RESET_VMC (1 << 17) 744#define SOFT_RESET_VMC (1 << 17)
745#define SOFT_RESET_DMA (1 << 20)
745#define SOFT_RESET_TST (1 << 21) 746#define SOFT_RESET_TST (1 << 21)
746#define SOFT_RESET_REGBB (1 << 22) 747#define SOFT_RESET_REGBB (1 << 22)
747#define SOFT_RESET_ORB (1 << 23) 748#define SOFT_RESET_ORB (1 << 23)
748 749
749/* display watermarks */ 750/* display watermarks */
@@ -2027,4 +2028,15 @@
2027/* cayman packet3 addition */ 2028/* cayman packet3 addition */
2028#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 2029#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
2029 2030
2031/* DMA regs common on r6xx/r7xx/evergreen/ni */
2032#define DMA_RB_CNTL 0xd000
2033# define DMA_RB_ENABLE (1 << 0)
2034# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
2035# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
2036# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
2037# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
2038# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
2039#define DMA_STATUS_REG 0xd034
2040# define DMA_IDLE (1 << 0)
2041
2030#endif 2042#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7bdbcb00aaf2..59acabb45c9b 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1306,22 +1306,20 @@ void cayman_dma_fini(struct radeon_device *rdev)
1306 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 1306 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1307} 1307}
1308 1308
1309static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1309static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
1310{ 1310{
1311 struct evergreen_mc_save save;
1312 u32 grbm_reset = 0; 1311 u32 grbm_reset = 0;
1313 1312
1314 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1313 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1315 return 0; 1314 return;
1316 1315
1317 dev_info(rdev->dev, "GPU softreset \n"); 1316 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
1318 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1319 RREG32(GRBM_STATUS)); 1317 RREG32(GRBM_STATUS));
1320 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1318 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
1321 RREG32(GRBM_STATUS_SE0)); 1319 RREG32(GRBM_STATUS_SE0));
1322 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1320 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1323 RREG32(GRBM_STATUS_SE1)); 1321 RREG32(GRBM_STATUS_SE1));
1324 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1322 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1325 RREG32(SRBM_STATUS)); 1323 RREG32(SRBM_STATUS));
1326 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1324 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1327 RREG32(CP_STALLED_STAT1)); 1325 RREG32(CP_STALLED_STAT1));
@@ -1331,19 +1329,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1331 RREG32(CP_BUSY_STAT)); 1329 RREG32(CP_BUSY_STAT));
1332 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1330 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1333 RREG32(CP_STAT)); 1331 RREG32(CP_STAT));
1334 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1335 RREG32(0x14F8));
1336 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1337 RREG32(0x14D8));
1338 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1339 RREG32(0x14FC));
1340 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1341 RREG32(0x14DC));
1342 1332
1343 evergreen_mc_stop(rdev, &save);
1344 if (evergreen_mc_wait_for_idle(rdev)) {
1345 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1346 }
1347 /* Disable CP parsing/prefetching */ 1333 /* Disable CP parsing/prefetching */
1348 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1334 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1349 1335
@@ -1368,16 +1354,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1368 udelay(50); 1354 udelay(50);
1369 WREG32(GRBM_SOFT_RESET, 0); 1355 WREG32(GRBM_SOFT_RESET, 0);
1370 (void)RREG32(GRBM_SOFT_RESET); 1356 (void)RREG32(GRBM_SOFT_RESET);
1371 /* Wait a little for things to settle down */
1372 udelay(50);
1373 1357
1374 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1358 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
1375 RREG32(GRBM_STATUS)); 1359 RREG32(GRBM_STATUS));
1376 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1360 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
1377 RREG32(GRBM_STATUS_SE0)); 1361 RREG32(GRBM_STATUS_SE0));
1378 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1362 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1379 RREG32(GRBM_STATUS_SE1)); 1363 RREG32(GRBM_STATUS_SE1));
1380 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1364 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1381 RREG32(SRBM_STATUS)); 1365 RREG32(SRBM_STATUS));
1382 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1366 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1383 RREG32(CP_STALLED_STAT1)); 1367 RREG32(CP_STALLED_STAT1));
@@ -1387,13 +1371,87 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1387 RREG32(CP_BUSY_STAT)); 1371 RREG32(CP_BUSY_STAT));
1388 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1372 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1389 RREG32(CP_STAT)); 1373 RREG32(CP_STAT));
1374
1375}
1376
1377static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
1378{
1379 u32 tmp;
1380
1381 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1382 return;
1383
1384 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1385 RREG32(DMA_STATUS_REG));
1386
1387 /* dma0 */
1388 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1389 tmp &= ~DMA_RB_ENABLE;
1390 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1391
1392 /* dma1 */
1393 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1394 tmp &= ~DMA_RB_ENABLE;
1395 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1396
1397 /* Reset dma */
1398 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1399 RREG32(SRBM_SOFT_RESET);
1400 udelay(50);
1401 WREG32(SRBM_SOFT_RESET, 0);
1402
1403 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1404 RREG32(DMA_STATUS_REG));
1405
1406}
1407
1408static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1409{
1410 struct evergreen_mc_save save;
1411
1412 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1413 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1414
1415 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1416 reset_mask &= ~RADEON_RESET_DMA;
1417
1418 if (reset_mask == 0)
1419 return 0;
1420
1421 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1422
1423 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1424 RREG32(0x14F8));
1425 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1426 RREG32(0x14D8));
1427 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1428 RREG32(0x14FC));
1429 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1430 RREG32(0x14DC));
1431
1432 evergreen_mc_stop(rdev, &save);
1433 if (evergreen_mc_wait_for_idle(rdev)) {
1434 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1435 }
1436
1437 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1438 cayman_gpu_soft_reset_gfx(rdev);
1439
1440 if (reset_mask & RADEON_RESET_DMA)
1441 cayman_gpu_soft_reset_dma(rdev);
1442
1443 /* Wait a little for things to settle down */
1444 udelay(50);
1445
1390 evergreen_mc_resume(rdev, &save); 1446 evergreen_mc_resume(rdev, &save);
1391 return 0; 1447 return 0;
1392} 1448}
1393 1449
1394int cayman_asic_reset(struct radeon_device *rdev) 1450int cayman_asic_reset(struct radeon_device *rdev)
1395{ 1451{
1396 return cayman_gpu_soft_reset(rdev); 1452 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1453 RADEON_RESET_COMPUTE |
1454 RADEON_RESET_DMA));
1397} 1455}
1398 1456
1399/** 1457/**
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index b93186b8ee4b..48e5022ee921 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -65,7 +65,7 @@
65#define SOFT_RESET_VMC (1 << 17) 65#define SOFT_RESET_VMC (1 << 17)
66#define SOFT_RESET_DMA (1 << 20) 66#define SOFT_RESET_DMA (1 << 20)
67#define SOFT_RESET_TST (1 << 21) 67#define SOFT_RESET_TST (1 << 21)
68#define SOFT_RESET_REGBB (1 << 22) 68#define SOFT_RESET_REGBB (1 << 22)
69#define SOFT_RESET_ORB (1 << 23) 69#define SOFT_RESET_ORB (1 << 23)
70 70
71#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 71#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -675,4 +675,3 @@
675#define DMA_PACKET_NOP 0xf 675#define DMA_PACKET_NOP 0xf
676 676
677#endif 677#endif
678
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2aaf147969bd..3cb9d6089373 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
1258 * reset, it's up to the caller to determine if the GPU needs one. We 1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that. 1259 * might add an helper function to check that.
1260 */ 1260 */
1261static int r600_gpu_soft_reset(struct radeon_device *rdev) 1261static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
1262{ 1262{
1263 struct rv515_mc_save save;
1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1263 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1265 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | 1264 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1266 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | 1265 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1280 u32 tmp; 1279 u32 tmp;
1281 1280
1282 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1281 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1283 return 0; 1282 return;
1284 1283
1285 dev_info(rdev->dev, "GPU softreset \n"); 1284 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1286 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1287 RREG32(R_008010_GRBM_STATUS)); 1285 RREG32(R_008010_GRBM_STATUS));
1288 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 1286 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1289 RREG32(R_008014_GRBM_STATUS2)); 1287 RREG32(R_008014_GRBM_STATUS2));
1290 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 1288 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1291 RREG32(R_000E50_SRBM_STATUS)); 1289 RREG32(R_000E50_SRBM_STATUS));
1292 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1290 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1293 RREG32(CP_STALLED_STAT1)); 1291 RREG32(CP_STALLED_STAT1));
@@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1297 RREG32(CP_BUSY_STAT)); 1295 RREG32(CP_BUSY_STAT));
1298 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1296 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1299 RREG32(CP_STAT)); 1297 RREG32(CP_STAT));
1300 rv515_mc_stop(rdev, &save); 1298
1301 if (r600_mc_wait_for_idle(rdev)) {
1302 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1303 }
1304 /* Disable CP parsing/prefetching */ 1299 /* Disable CP parsing/prefetching */
1305 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1300 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1301
1306 /* Check if any of the rendering block is busy and reset it */ 1302 /* Check if any of the rendering block is busy and reset it */
1307 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 1303 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1308 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 1304 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1332 RREG32(R_008020_GRBM_SOFT_RESET); 1328 RREG32(R_008020_GRBM_SOFT_RESET);
1333 mdelay(15); 1329 mdelay(15);
1334 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1335 /* Wait a little for things to settle down */ 1331
1336 mdelay(1); 1332 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1337 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1338 RREG32(R_008010_GRBM_STATUS)); 1333 RREG32(R_008010_GRBM_STATUS));
1339 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 1334 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1340 RREG32(R_008014_GRBM_STATUS2)); 1335 RREG32(R_008014_GRBM_STATUS2));
1341 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 1336 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1342 RREG32(R_000E50_SRBM_STATUS)); 1337 RREG32(R_000E50_SRBM_STATUS));
1343 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1338 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1344 RREG32(CP_STALLED_STAT1)); 1339 RREG32(CP_STALLED_STAT1));
@@ -1348,6 +1343,66 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1348 RREG32(CP_BUSY_STAT)); 1343 RREG32(CP_BUSY_STAT));
1349 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1344 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1350 RREG32(CP_STAT)); 1345 RREG32(CP_STAT));
1346
1347}
1348
1349static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
1350{
1351 u32 tmp;
1352
1353 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1354 return;
1355
1356 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1357 RREG32(DMA_STATUS_REG));
1358
1359 /* Disable DMA */
1360 tmp = RREG32(DMA_RB_CNTL);
1361 tmp &= ~DMA_RB_ENABLE;
1362 WREG32(DMA_RB_CNTL, tmp);
1363
1364 /* Reset dma */
1365 if (rdev->family >= CHIP_RV770)
1366 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
1367 else
1368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
1369 RREG32(SRBM_SOFT_RESET);
1370 udelay(50);
1371 WREG32(SRBM_SOFT_RESET, 0);
1372
1373 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1374 RREG32(DMA_STATUS_REG));
1375}
1376
1377static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1378{
1379 struct rv515_mc_save save;
1380
1381 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1382 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1383
1384 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1385 reset_mask &= ~RADEON_RESET_DMA;
1386
1387 if (reset_mask == 0)
1388 return 0;
1389
1390 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1391
1392 rv515_mc_stop(rdev, &save);
1393 if (r600_mc_wait_for_idle(rdev)) {
1394 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1395 }
1396
1397 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1398 r600_gpu_soft_reset_gfx(rdev);
1399
1400 if (reset_mask & RADEON_RESET_DMA)
1401 r600_gpu_soft_reset_dma(rdev);
1402
1403 /* Wait a little for things to settle down */
1404 mdelay(1);
1405
1351 rv515_mc_resume(rdev, &save); 1406 rv515_mc_resume(rdev, &save);
1352 return 0; 1407 return 0;
1353} 1408}
@@ -1395,7 +1450,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1395 1450
1396int r600_asic_reset(struct radeon_device *rdev) 1451int r600_asic_reset(struct radeon_device *rdev)
1397{ 1452{
1398 return r600_gpu_soft_reset(rdev); 1453 return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1454 RADEON_RESET_COMPUTE |
1455 RADEON_RESET_DMA));
1399} 1456}
1400 1457
1401u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1458u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@@ -2595,7 +2652,7 @@ int r600_copy_blit(struct radeon_device *rdev,
2595 * @num_gpu_pages: number of GPU pages to xfer 2652 * @num_gpu_pages: number of GPU pages to xfer
2596 * @fence: radeon fence object 2653 * @fence: radeon fence object
2597 * 2654 *
2598 * Copy GPU paging using the DMA engine (r6xx-r7xx). 2655 * Copy GPU paging using the DMA engine (r6xx).
2599 * Used by the radeon ttm implementation to move pages if 2656 * Used by the radeon ttm implementation to move pages if
2600 * registered as the asic copy callback. 2657 * registered as the asic copy callback.
2601 */ 2658 */
@@ -2618,8 +2675,8 @@ int r600_copy_dma(struct radeon_device *rdev,
2618 } 2675 }
2619 2676
2620 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 2677 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
2621 num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); 2678 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
2622 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 2679 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
2623 if (r) { 2680 if (r) {
2624 DRM_ERROR("radeon: moving bo (%d).\n", r); 2681 DRM_ERROR("radeon: moving bo (%d).\n", r);
2625 radeon_semaphore_free(rdev, &sem, NULL); 2682 radeon_semaphore_free(rdev, &sem, NULL);
@@ -2636,14 +2693,14 @@ int r600_copy_dma(struct radeon_device *rdev,
2636 2693
2637 for (i = 0; i < num_loops; i++) { 2694 for (i = 0; i < num_loops; i++) {
2638 cur_size_in_dw = size_in_dw; 2695 cur_size_in_dw = size_in_dw;
2639 if (cur_size_in_dw > 0xFFFF) 2696 if (cur_size_in_dw > 0xFFFE)
2640 cur_size_in_dw = 0xFFFF; 2697 cur_size_in_dw = 0xFFFE;
2641 size_in_dw -= cur_size_in_dw; 2698 size_in_dw -= cur_size_in_dw;
2642 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 2699 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2643 radeon_ring_write(ring, dst_offset & 0xfffffffc); 2700 radeon_ring_write(ring, dst_offset & 0xfffffffc);
2644 radeon_ring_write(ring, src_offset & 0xfffffffc); 2701 radeon_ring_write(ring, src_offset & 0xfffffffc);
2645 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 2702 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
2646 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 2703 (upper_32_bits(src_offset) & 0xff)));
2647 src_offset += cur_size_in_dw * 4; 2704 src_offset += cur_size_in_dw * 4;
2648 dst_offset += cur_size_in_dw * 4; 2705 dst_offset += cur_size_in_dw * 4;
2649 } 2706 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0be768be530c..69ec24ab8d63 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2294,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2294 ib[idx+4] = upper_32_bits(offset) & 0xff; 2294 ib[idx+4] = upper_32_bits(offset) & 0xff;
2295 } 2295 }
2296 break; 2296 break;
2297 case PACKET3_MEM_WRITE:
2298 {
2299 u64 offset;
2300
2301 if (pkt->count != 3) {
2302 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2303 return -EINVAL;
2304 }
2305 r = r600_cs_packet_next_reloc(p, &reloc);
2306 if (r) {
2307 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2308 return -EINVAL;
2309 }
2310 offset = radeon_get_ib_value(p, idx+0);
2311 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2312 if (offset & 0x7) {
2313 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2314 return -EINVAL;
2315 }
2316 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2317 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2318 offset + 8, radeon_bo_size(reloc->robj));
2319 return -EINVAL;
2320 }
2321 offset += reloc->lobj.gpu_offset;
2322 ib[idx+0] = offset;
2323 ib[idx+1] = upper_32_bits(offset) & 0xff;
2324 break;
2325 }
2297 case PACKET3_COPY_DW: 2326 case PACKET3_COPY_DW:
2298 if (pkt->count != 4) { 2327 if (pkt->count != 4) {
2299 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2328 DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2447,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2447 kfree(parser->relocs); 2476 kfree(parser->relocs);
2448 for (i = 0; i < parser->nchunks; i++) { 2477 for (i = 0; i < parser->nchunks; i++) {
2449 kfree(parser->chunks[i].kdata); 2478 kfree(parser->chunks[i].kdata);
2450 kfree(parser->chunks[i].kpage[0]); 2479 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2451 kfree(parser->chunks[i].kpage[1]); 2480 kfree(parser->chunks[i].kpage[0]);
2481 kfree(parser->chunks[i].kpage[1]);
2482 }
2452 } 2483 }
2453 kfree(parser->chunks); 2484 kfree(parser->chunks);
2454 kfree(parser->chunks_array); 2485 kfree(parser->chunks_array);
@@ -2532,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2532 struct radeon_cs_chunk *relocs_chunk; 2563 struct radeon_cs_chunk *relocs_chunk;
2533 unsigned idx; 2564 unsigned idx;
2534 2565
2566 *cs_reloc = NULL;
2535 if (p->chunk_relocs_idx == -1) { 2567 if (p->chunk_relocs_idx == -1) {
2536 DRM_ERROR("No relocation chunk !\n"); 2568 DRM_ERROR("No relocation chunk !\n");
2537 return -EINVAL; 2569 return -EINVAL;
2538 } 2570 }
2539 *cs_reloc = NULL;
2540 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2571 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2541 idx = p->dma_reloc_idx; 2572 idx = p->dma_reloc_idx;
2542 if (idx >= relocs_chunk->length_dw) { 2573 if (idx >= p->nrelocs) {
2543 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2574 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2544 idx, relocs_chunk->length_dw); 2575 idx, p->nrelocs);
2545 return -EINVAL; 2576 return -EINVAL;
2546 } 2577 }
2547 *cs_reloc = p->relocs_ptr[idx]; 2578 *cs_reloc = p->relocs_ptr[idx];
@@ -2648,16 +2679,29 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2648 } 2679 }
2649 p->idx += 7; 2680 p->idx += 7;
2650 } else { 2681 } else {
2651 src_offset = ib[idx+2]; 2682 if (p->family >= CHIP_RV770) {
2652 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 2683 src_offset = ib[idx+2];
2653 dst_offset = ib[idx+1]; 2684 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2654 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2685 dst_offset = ib[idx+1];
2686 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2655 2687
2656 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2657 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2658 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2690 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2659 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2660 p->idx += 5; 2692 p->idx += 5;
2693 } else {
2694 src_offset = ib[idx+2];
2695 src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2696 dst_offset = ib[idx+1];
2697 dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
2698
2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2701 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2702 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2703 p->idx += 4;
2704 }
2661 } 2705 }
2662 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2706 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2663 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2707 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5dc744d43d12..a08f657329a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -132,6 +132,11 @@ extern int radeon_lockup_timeout;
132#define RADEON_VA_RESERVED_SIZE (8 << 20) 132#define RADEON_VA_RESERVED_SIZE (8 << 20)
133#define RADEON_IB_VM_MAX_SIZE (64 << 10) 133#define RADEON_IB_VM_MAX_SIZE (64 << 10)
134 134
135/* reset flags */
136#define RADEON_RESET_GFX (1 << 0)
137#define RADEON_RESET_COMPUTE (1 << 1)
138#define RADEON_RESET_DMA (1 << 2)
139
135/* 140/*
136 * Errata workarounds. 141 * Errata workarounds.
137 */ 142 */
@@ -225,12 +230,13 @@ struct radeon_fence {
225int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 230int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
226int radeon_fence_driver_init(struct radeon_device *rdev); 231int radeon_fence_driver_init(struct radeon_device *rdev);
227void radeon_fence_driver_fini(struct radeon_device *rdev); 232void radeon_fence_driver_fini(struct radeon_device *rdev);
233void radeon_fence_driver_force_completion(struct radeon_device *rdev);
228int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 234int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
229void radeon_fence_process(struct radeon_device *rdev, int ring); 235void radeon_fence_process(struct radeon_device *rdev, int ring);
230bool radeon_fence_signaled(struct radeon_fence *fence); 236bool radeon_fence_signaled(struct radeon_fence *fence);
231int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 237int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
232int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 238int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
233void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 239int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
234int radeon_fence_wait_any(struct radeon_device *rdev, 240int radeon_fence_wait_any(struct radeon_device *rdev,
235 struct radeon_fence **fences, 241 struct radeon_fence **fences,
236 bool intr); 242 bool intr);
@@ -318,7 +324,6 @@ struct radeon_bo {
318 struct list_head list; 324 struct list_head list;
319 /* Protected by tbo.reserved */ 325 /* Protected by tbo.reserved */
320 u32 placements[3]; 326 u32 placements[3];
321 u32 busy_placements[3];
322 struct ttm_placement placement; 327 struct ttm_placement placement;
323 struct ttm_buffer_object tbo; 328 struct ttm_buffer_object tbo;
324 struct ttm_bo_kmap_obj kmap; 329 struct ttm_bo_kmap_obj kmap;
@@ -648,6 +653,8 @@ struct radeon_ring {
648 u32 ptr_reg_mask; 653 u32 ptr_reg_mask;
649 u32 nop; 654 u32 nop;
650 u32 idx; 655 u32 idx;
656 u64 last_semaphore_signal_addr;
657 u64 last_semaphore_wait_addr;
651}; 658};
652 659
653/* 660/*
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 596bcbe80ed0..9056fafb00ea 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = {
1140 .copy = { 1140 .copy = {
1141 .blit = &r600_copy_blit, 1141 .blit = &r600_copy_blit,
1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1143 .dma = &r600_copy_dma, 1143 .dma = &rv770_copy_dma,
1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1145 .copy = &r600_copy_dma, 1145 .copy = &rv770_copy_dma,
1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1147 }, 1147 },
1148 .surface = { 1148 .surface = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5f4882cc2152..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
403void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 403void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
404void r700_cp_stop(struct radeon_device *rdev); 404void r700_cp_stop(struct radeon_device *rdev);
405void r700_cp_fini(struct radeon_device *rdev); 405void r700_cp_fini(struct radeon_device *rdev);
406int rv770_copy_dma(struct radeon_device *rdev,
407 uint64_t src_offset, uint64_t dst_offset,
408 unsigned num_gpu_pages,
409 struct radeon_fence **fence);
406 410
407/* 411/*
408 * evergreen 412 * evergreen
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 4af89126e223..33a56a09ff10 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1548 of_machine_is_compatible("PowerBook6,7")) { 1548 of_machine_is_compatible("PowerBook6,7")) {
1549 /* ibook */ 1549 /* ibook */
1550 rdev->mode_info.connector_table = CT_IBOOK; 1550 rdev->mode_info.connector_table = CT_IBOOK;
1551 } else if (of_machine_is_compatible("PowerMac3,5")) {
1552 /* PowerMac G4 Silver radeon 7500 */
1553 rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
1551 } else if (of_machine_is_compatible("PowerMac4,4")) { 1554 } else if (of_machine_is_compatible("PowerMac4,4")) {
1552 /* emac */ 1555 /* emac */
1553 rdev->mode_info.connector_table = CT_EMAC; 1556 rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2212 CONNECTOR_OBJECT_ID_SVIDEO, 2215 CONNECTOR_OBJECT_ID_SVIDEO,
2213 &hpd); 2216 &hpd);
2214 break; 2217 break;
2218 case CT_MAC_G4_SILVER:
2219 DRM_INFO("Connector Table: %d (mac g4 silver)\n",
2220 rdev->mode_info.connector_table);
2221 /* DVI-I - tv dac, int tmds */
2222 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2223 hpd.hpd = RADEON_HPD_1; /* ??? */
2224 radeon_add_legacy_encoder(dev,
2225 radeon_get_encoder_enum(dev,
2226 ATOM_DEVICE_DFP1_SUPPORT,
2227 0),
2228 ATOM_DEVICE_DFP1_SUPPORT);
2229 radeon_add_legacy_encoder(dev,
2230 radeon_get_encoder_enum(dev,
2231 ATOM_DEVICE_CRT2_SUPPORT,
2232 2),
2233 ATOM_DEVICE_CRT2_SUPPORT);
2234 radeon_add_legacy_connector(dev, 0,
2235 ATOM_DEVICE_DFP1_SUPPORT |
2236 ATOM_DEVICE_CRT2_SUPPORT,
2237 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2238 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2239 &hpd);
2240 /* VGA - primary dac */
2241 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2242 hpd.hpd = RADEON_HPD_NONE;
2243 radeon_add_legacy_encoder(dev,
2244 radeon_get_encoder_enum(dev,
2245 ATOM_DEVICE_CRT1_SUPPORT,
2246 1),
2247 ATOM_DEVICE_CRT1_SUPPORT);
2248 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
2249 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2250 CONNECTOR_OBJECT_ID_VGA,
2251 &hpd);
2252 /* TV - TV DAC */
2253 ddc_i2c.valid = false;
2254 hpd.hpd = RADEON_HPD_NONE;
2255 radeon_add_legacy_encoder(dev,
2256 radeon_get_encoder_enum(dev,
2257 ATOM_DEVICE_TV1_SUPPORT,
2258 2),
2259 ATOM_DEVICE_TV1_SUPPORT);
2260 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
2261 DRM_MODE_CONNECTOR_SVIDEO,
2262 &ddc_i2c,
2263 CONNECTOR_OBJECT_ID_SVIDEO,
2264 &hpd);
2265 break;
2215 default: 2266 default:
2216 DRM_INFO("Connector table: %d (invalid)\n", 2267 DRM_INFO("Connector table: %d (invalid)\n",
2217 rdev->mode_info.connector_table); 2268 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47bf162ab9c6..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
741 ret = connector_status_disconnected; 741 ret = connector_status_disconnected;
742 742
743 if (radeon_connector->ddc_bus) 743 if (radeon_connector->ddc_bus)
744 dret = radeon_ddc_probe(radeon_connector); 744 dret = radeon_ddc_probe(radeon_connector, false);
745 if (dret) { 745 if (dret) {
746 radeon_connector->detected_by_load = false; 746 radeon_connector->detected_by_load = false;
747 if (radeon_connector->edid) { 747 if (radeon_connector->edid) {
@@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
947 return connector->status; 947 return connector->status;
948 948
949 if (radeon_connector->ddc_bus) 949 if (radeon_connector->ddc_bus)
950 dret = radeon_ddc_probe(radeon_connector); 950 dret = radeon_ddc_probe(radeon_connector, false);
951 if (dret) { 951 if (dret) {
952 radeon_connector->detected_by_load = false; 952 radeon_connector->detected_by_load = false;
953 if (radeon_connector->edid) { 953 if (radeon_connector->edid) {
@@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1401 if (encoder) { 1401 if (encoder) {
1402 /* setup ddc on the bridge */ 1402 /* setup ddc on the bridge */
1403 radeon_atom_ext_encoder_setup_ddc(encoder); 1403 radeon_atom_ext_encoder_setup_ddc(encoder);
1404 if (radeon_ddc_probe(radeon_connector)) /* try DDC */ 1404 /* bridge chips are always aux */
1405 if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
1405 ret = connector_status_connected; 1406 ret = connector_status_connected;
1406 else if (radeon_connector->dac_load_detect) { /* try load detection */ 1407 else if (radeon_connector->dac_load_detect) { /* try load detection */
1407 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 1408 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1419 if (radeon_dp_getdpcd(radeon_connector)) 1420 if (radeon_dp_getdpcd(radeon_connector))
1420 ret = connector_status_connected; 1421 ret = connector_status_connected;
1421 } else { 1422 } else {
1422 if (radeon_ddc_probe(radeon_connector)) 1423 /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
1424 if (radeon_ddc_probe(radeon_connector, false))
1423 ret = connector_status_connected; 1425 ret = connector_status_connected;
1424 } 1426 }
1425 } 1427 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 396baba0141a..469661fd1903 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -279,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
279 p->chunks[p->chunk_ib_idx].length_dw); 279 p->chunks[p->chunk_ib_idx].length_dw);
280 return -EINVAL; 280 return -EINVAL;
281 } 281 }
282 if ((p->rdev->flags & RADEON_IS_AGP)) { 282 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
287 kfree(p->chunks[i].kpage[0]); 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
288 kfree(p->chunks[i].kpage[1]); 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 } 291 }
@@ -583,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
584 int i; 584 int i;
585 int size = PAGE_SIZE; 585 int size = PAGE_SIZE;
586 bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; 586 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
587 false : true;
587 588
588 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 589 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
589 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), 590 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 49b06590001e..edfc54e41842 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -897,6 +897,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
897} 897}
898 898
899/** 899/**
900 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
901 * needed for waking up.
902 *
903 * @pdev: pci dev pointer
904 */
905static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
906{
907
908 /* 6600m in a macbook pro */
909 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
910 pdev->subsystem_device == 0x00e2) {
911 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
912 return true;
913 }
914
915 return false;
916}
917
918/**
900 * radeon_switcheroo_set_state - set switcheroo state 919 * radeon_switcheroo_set_state - set switcheroo state
901 * 920 *
902 * @pdev: pci dev pointer 921 * @pdev: pci dev pointer
@@ -910,10 +929,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
910 struct drm_device *dev = pci_get_drvdata(pdev); 929 struct drm_device *dev = pci_get_drvdata(pdev);
911 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 930 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
912 if (state == VGA_SWITCHEROO_ON) { 931 if (state == VGA_SWITCHEROO_ON) {
932 unsigned d3_delay = dev->pdev->d3_delay;
933
913 printk(KERN_INFO "radeon: switched on\n"); 934 printk(KERN_INFO "radeon: switched on\n");
914 /* don't suspend or resume card normally */ 935 /* don't suspend or resume card normally */
915 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 936 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
937
938 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
939 dev->pdev->d3_delay = 20;
940
916 radeon_resume_kms(dev); 941 radeon_resume_kms(dev);
942
943 dev->pdev->d3_delay = d3_delay;
944
917 dev->switch_power_state = DRM_SWITCH_POWER_ON; 945 dev->switch_power_state = DRM_SWITCH_POWER_ON;
918 drm_kms_helper_poll_enable(dev); 946 drm_kms_helper_poll_enable(dev);
919 } else { 947 } else {
@@ -1164,6 +1192,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1164 struct drm_crtc *crtc; 1192 struct drm_crtc *crtc;
1165 struct drm_connector *connector; 1193 struct drm_connector *connector;
1166 int i, r; 1194 int i, r;
1195 bool force_completion = false;
1167 1196
1168 if (dev == NULL || dev->dev_private == NULL) { 1197 if (dev == NULL || dev->dev_private == NULL) {
1169 return -ENODEV; 1198 return -ENODEV;
@@ -1206,8 +1235,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1206 1235
1207 mutex_lock(&rdev->ring_lock); 1236 mutex_lock(&rdev->ring_lock);
1208 /* wait for gpu to finish processing current batch */ 1237 /* wait for gpu to finish processing current batch */
1209 for (i = 0; i < RADEON_NUM_RINGS; i++) 1238 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1210 radeon_fence_wait_empty_locked(rdev, i); 1239 r = radeon_fence_wait_empty_locked(rdev, i);
1240 if (r) {
1241 /* delay GPU reset to resume */
1242 force_completion = true;
1243 }
1244 }
1245 if (force_completion) {
1246 radeon_fence_driver_force_completion(rdev);
1247 }
1211 mutex_unlock(&rdev->ring_lock); 1248 mutex_unlock(&rdev->ring_lock);
1212 1249
1213 radeon_save_bios_scratch_regs(rdev); 1250 radeon_save_bios_scratch_regs(rdev);
@@ -1338,7 +1375,6 @@ retry:
1338 } 1375 }
1339 1376
1340 radeon_restore_bios_scratch_regs(rdev); 1377 radeon_restore_bios_scratch_regs(rdev);
1341 drm_helper_resume_force_mode(rdev->ddev);
1342 1378
1343 if (!r) { 1379 if (!r) {
1344 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1380 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1358,11 +1394,14 @@ retry:
1358 } 1394 }
1359 } 1395 }
1360 } else { 1396 } else {
1397 radeon_fence_driver_force_completion(rdev);
1361 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1398 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1362 kfree(ring_data[i]); 1399 kfree(ring_data[i]);
1363 } 1400 }
1364 } 1401 }
1365 1402
1403 drm_helper_resume_force_mode(rdev->ddev);
1404
1366 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1405 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1367 if (r) { 1406 if (r) {
1368 /* bad news, how to tell it to userspace ? */ 1407 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 310c0e5254ba..1da2386d7cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -699,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
699 if (radeon_connector->router.ddc_valid) 699 if (radeon_connector->router.ddc_valid)
700 radeon_router_select_ddc_port(radeon_connector); 700 radeon_router_select_ddc_port(radeon_connector);
701 701
702 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 702 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
703 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || 703 ENCODER_OBJECT_ID_NONE) {
704 (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != 704 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
705 ENCODER_OBJECT_ID_NONE)) { 705
706 if (dig->dp_i2c_bus)
707 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
708 &dig->dp_i2c_bus->adapter);
709 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
706 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 711 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
707 712
708 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 713 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9b1a727d3c9e..d9bf96ee299a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -68,9 +68,11 @@
68 * 2.25.0 - eg+: new info request for num SE and num SH 68 * 2.25.0 - eg+: new info request for num SE and num SH
69 * 2.26.0 - r600-eg: fix htile size computation 69 * 2.26.0 - r600-eg: fix htile size computation
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA 70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers
71 */ 73 */
72#define KMS_DRIVER_MAJOR 2 74#define KMS_DRIVER_MAJOR 2
73#define KMS_DRIVER_MINOR 27 75#define KMS_DRIVER_MINOR 29
74#define KMS_DRIVER_PATCHLEVEL 0 76#define KMS_DRIVER_PATCHLEVEL 0
75int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 77int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
76int radeon_driver_unload_kms(struct drm_device *dev); 78int radeon_driver_unload_kms(struct drm_device *dev);
@@ -305,8 +307,8 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
305 return 0; 307 return 0;
306} 308}
307 309
308static int __devinit 310static int radeon_pci_probe(struct pci_dev *pdev,
309radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 311 const struct pci_device_id *ent)
310{ 312{
311 int ret; 313 int ret;
312 314
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 410a975a8eec..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
609 * Returns 0 if the fences have passed, error for all other cases. 609 * Returns 0 if the fences have passed, error for all other cases.
610 * Caller must hold ring lock. 610 * Caller must hold ring lock.
611 */ 611 */
612void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 612int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
613{ 613{
614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
615 int r;
615 616
616 while(1) { 617 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
617 int r; 618 if (r) {
618 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
619 if (r == -EDEADLK) { 619 if (r == -EDEADLK) {
620 mutex_unlock(&rdev->ring_lock); 620 return -EDEADLK;
621 r = radeon_gpu_reset(rdev);
622 mutex_lock(&rdev->ring_lock);
623 if (!r)
624 continue;
625 }
626 if (r) {
627 dev_err(rdev->dev, "error waiting for ring to become"
628 " idle (%d)\n", r);
629 } 621 }
630 return; 622 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
623 ring, r);
631 } 624 }
625 return 0;
632} 626}
633 627
634/** 628/**
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
854 */ 848 */
855void radeon_fence_driver_fini(struct radeon_device *rdev) 849void radeon_fence_driver_fini(struct radeon_device *rdev)
856{ 850{
857 int ring; 851 int ring, r;
858 852
859 mutex_lock(&rdev->ring_lock); 853 mutex_lock(&rdev->ring_lock);
860 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 854 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
861 if (!rdev->fence_drv[ring].initialized) 855 if (!rdev->fence_drv[ring].initialized)
862 continue; 856 continue;
863 radeon_fence_wait_empty_locked(rdev, ring); 857 r = radeon_fence_wait_empty_locked(rdev, ring);
858 if (r) {
859 /* no need to trigger GPU reset as we are unloading */
860 radeon_fence_driver_force_completion(rdev);
861 }
864 wake_up_all(&rdev->fence_queue); 862 wake_up_all(&rdev->fence_queue);
865 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 863 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
866 rdev->fence_drv[ring].initialized = false; 864 rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
868 mutex_unlock(&rdev->ring_lock); 866 mutex_unlock(&rdev->ring_lock);
869} 867}
870 868
869/**
870 * radeon_fence_driver_force_completion - force all fence waiter to complete
871 *
872 * @rdev: radeon device pointer
873 *
874 * In case of GPU reset failure make sure no process keep waiting on fence
875 * that will never complete.
876 */
877void radeon_fence_driver_force_completion(struct radeon_device *rdev)
878{
879 int ring;
880
881 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
882 if (!rdev->fence_drv[ring].initialized)
883 continue;
884 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
885 }
886}
887
871 888
872/* 889/*
873 * Fence debugfs 890 * Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
39 * radeon_ddc_probe 39 * radeon_ddc_probe
40 * 40 *
41 */ 41 */
42bool radeon_ddc_probe(struct radeon_connector *radeon_connector) 42bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
43{ 43{
44 u8 out = 0x0; 44 u8 out = 0x0;
45 u8 buf[8]; 45 u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
63 if (radeon_connector->router.ddc_valid) 63 if (radeon_connector->router.ddc_valid)
64 radeon_router_select_ddc_port(radeon_connector); 64 radeon_router_select_ddc_port(radeon_connector);
65 65
66 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 66 if (use_aux) {
67 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
68 ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
69 } else {
70 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
71 }
72
67 if (ret != 2) 73 if (ret != 2)
68 /* Couldn't find an accessible DDC on this connector */ 74 /* Couldn't find an accessible DDC on this connector */
69 return false; 75 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
640 enum drm_connector_status found = connector_status_disconnected; 640 enum drm_connector_status found = connector_status_disconnected;
641 bool color = true; 641 bool color = true;
642 642
643 /* just don't bother on RN50 those chip are often connected to remoting
644 * console hw and often we get failure to load detect those. So to make
645 * everyone happy report the encoder as always connected.
646 */
647 if (ASIC_IS_RN50(rdev)) {
648 return connector_status_connected;
649 }
650
643 /* save the regs we need */ 651 /* save the regs we need */
644 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 652 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
645 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 653 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d818b503b42f..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
209 CT_RN50_POWER, 209 CT_RN50_POWER,
210 CT_MAC_X800, 210 CT_MAC_X800,
211 CT_MAC_G5_9600, 211 CT_MAC_G5_9600,
212 CT_SAM440EP 212 CT_SAM440EP,
213 CT_MAC_G4_SILVER
213}; 214};
214 215
215enum radeon_dvo_chip { 216enum radeon_dvo_chip {
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
558 u8 val); 559 u8 val);
559extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); 560extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
560extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); 561extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
561extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 562extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
562extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 563extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
563 564
564extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); 565extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 883c95d8d90f..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
84 rbo->placement.fpfn = 0; 84 rbo->placement.fpfn = 0;
85 rbo->placement.lpfn = 0; 85 rbo->placement.lpfn = 0;
86 rbo->placement.placement = rbo->placements; 86 rbo->placement.placement = rbo->placements;
87 rbo->placement.busy_placement = rbo->placements;
87 if (domain & RADEON_GEM_DOMAIN_VRAM) 88 if (domain & RADEON_GEM_DOMAIN_VRAM)
88 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 89 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
89 TTM_PL_FLAG_VRAM; 90 TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
104 if (!c) 105 if (!c)
105 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 106 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
106 rbo->placement.num_placement = c; 107 rbo->placement.num_placement = c;
107
108 c = 0;
109 rbo->placement.busy_placement = rbo->busy_placements;
110 if (rbo->rdev->flags & RADEON_IS_AGP) {
111 rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
112 } else {
113 rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
114 }
115 rbo->placement.num_busy_placement = c; 108 rbo->placement.num_busy_placement = c;
116} 109}
117 110
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
357{ 350{
358 struct radeon_bo_list *lobj; 351 struct radeon_bo_list *lobj;
359 struct radeon_bo *bo; 352 struct radeon_bo *bo;
353 u32 domain;
360 int r; 354 int r;
361 355
362 r = ttm_eu_reserve_buffers(head); 356 r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
366 list_for_each_entry(lobj, head, tv.head) { 360 list_for_each_entry(lobj, head, tv.head) {
367 bo = lobj->bo; 361 bo = lobj->bo;
368 if (!bo->pin_count) { 362 if (!bo->pin_count) {
363 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
364
365 retry:
366 radeon_ttm_placement_from_domain(bo, domain);
369 r = ttm_bo_validate(&bo->tbo, &bo->placement, 367 r = ttm_bo_validate(&bo->tbo, &bo->placement,
370 true, false); 368 true, false);
371 if (unlikely(r)) { 369 if (unlikely(r)) {
370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
371 domain |= RADEON_GEM_DOMAIN_GTT;
372 goto retry;
373 }
372 return r; 374 return r;
373 } 375 }
374 } 376 }
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4fb..0bfa656aa87d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
234 234
235static void radeon_pm_set_clocks(struct radeon_device *rdev) 235static void radeon_pm_set_clocks(struct radeon_device *rdev)
236{ 236{
237 int i; 237 int i, r;
238 238
239 /* no need to take locks, etc. if nothing's going to change */ 239 /* no need to take locks, etc. if nothing's going to change */
240 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 240 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
248 /* wait for the rings to drain */ 248 /* wait for the rings to drain */
249 for (i = 0; i < RADEON_NUM_RINGS; i++) { 249 for (i = 0; i < RADEON_NUM_RINGS; i++) {
250 struct radeon_ring *ring = &rdev->ring[i]; 250 struct radeon_ring *ring = &rdev->ring[i];
251 if (ring->ready) 251 if (!ring->ready) {
252 radeon_fence_wait_empty_locked(rdev, i); 252 continue;
253 }
254 r = radeon_fence_wait_empty_locked(rdev, i);
255 if (r) {
256 /* needs a GPU reset dont reset here */
257 mutex_unlock(&rdev->ring_lock);
258 up_write(&rdev->pm.mclk_lock);
259 mutex_unlock(&rdev->ddev->struct_mutex);
260 return;
261 }
253 } 262 }
254 263
255 radeon_unmap_vram_bos(rdev); 264 radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..26c23bb651c6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
194 bo = dma_buf->priv; 194 bo = dma_buf->priv;
195 if (bo->gem_base.dev == dev) { 195 if (bo->gem_base.dev == dev) {
196 drm_gem_object_reference(&bo->gem_base); 196 drm_gem_object_reference(&bo->gem_base);
197 dma_buf_put(dma_buf);
197 return &bo->gem_base; 198 return &bo->gem_base;
198 } 199 }
199 } 200 }
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ebd69562ef6c..2430d80b1871 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -770,22 +770,30 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
770 int ridx = *(int*)node->info_ent->data; 770 int ridx = *(int*)node->info_ent->data;
771 struct radeon_ring *ring = &rdev->ring[ridx]; 771 struct radeon_ring *ring = &rdev->ring[ridx];
772 unsigned count, i, j; 772 unsigned count, i, j;
773 u32 tmp;
773 774
774 radeon_ring_free_size(rdev, ring); 775 radeon_ring_free_size(rdev, ring);
775 count = (ring->ring_size / 4) - ring->ring_free_dw; 776 count = (ring->ring_size / 4) - ring->ring_free_dw;
776 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); 777 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
777 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); 778 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
779 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
780 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
778 if (ring->rptr_save_reg) { 781 if (ring->rptr_save_reg) {
779 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 782 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
780 RREG32(ring->rptr_save_reg)); 783 RREG32(ring->rptr_save_reg));
781 } 784 }
782 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); 785 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
783 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); 786 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
787 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
788 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
784 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 789 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
785 seq_printf(m, "%u dwords in ring\n", count); 790 seq_printf(m, "%u dwords in ring\n", count);
786 i = ring->rptr; 791 /* print 8 dw before current rptr as often it's the last executed
787 for (j = 0; j <= count; j++) { 792 * packet that is the root issue
788 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 793 */
794 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
795 for (j = 0; j <= (count + 32); j++) {
796 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
789 i = (i + 1) & ring->ptr_mask; 797 i = (i + 1) & ring->ptr_mask;
790 } 798 }
791 return 0; 799 return 0;
@@ -794,11 +802,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
794static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 802static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
795static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 803static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
796static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 804static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
805static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
806static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
797 807
798static struct drm_info_list radeon_debugfs_ring_info_list[] = { 808static struct drm_info_list radeon_debugfs_ring_info_list[] = {
799 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, 809 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
800 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, 810 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
801 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 811 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
812 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
813 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
802}; 814};
803 815
804static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 816static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
95 /* we assume caller has already allocated space on waiters ring */ 95 /* we assume caller has already allocated space on waiters ring */
96 radeon_semaphore_emit_wait(rdev, waiter, semaphore); 96 radeon_semaphore_emit_wait(rdev, waiter, semaphore);
97 97
98 /* for debugging lockup only, used by sysfs debug files */
99 rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
100 rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
101
98 return 0; 102 return 0;
99} 103}
100 104
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
3240x46AC US_OUT_FMT_2 3240x46AC US_OUT_FMT_2
3250x46B0 US_OUT_FMT_3 3250x46B0 US_OUT_FMT_3
3260x46B4 US_W_FMT 3260x46B4 US_W_FMT
3270x46C0 RB3D_COLOR_CLEAR_VALUE_AR
3280x46C4 RB3D_COLOR_CLEAR_VALUE_GB
3270x4BC0 FG_FOG_BLEND 3290x4BC0 FG_FOG_BLEND
3280x4BC4 FG_FOG_FACTOR 3300x4BC4 FG_FOG_FACTOR
3290x4BC8 FG_FOG_COLOR_R 3310x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87c979c4f721..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
887 return 0; 887 return 0;
888} 888}
889 889
890/**
891 * rv770_copy_dma - copy pages using the DMA engine
892 *
893 * @rdev: radeon_device pointer
894 * @src_offset: src GPU address
895 * @dst_offset: dst GPU address
896 * @num_gpu_pages: number of GPU pages to xfer
897 * @fence: radeon fence object
898 *
899 * Copy GPU paging using the DMA engine (r7xx).
900 * Used by the radeon ttm implementation to move pages if
901 * registered as the asic copy callback.
902 */
903int rv770_copy_dma(struct radeon_device *rdev,
904 uint64_t src_offset, uint64_t dst_offset,
905 unsigned num_gpu_pages,
906 struct radeon_fence **fence)
907{
908 struct radeon_semaphore *sem = NULL;
909 int ring_index = rdev->asic->copy.dma_ring_index;
910 struct radeon_ring *ring = &rdev->ring[ring_index];
911 u32 size_in_dw, cur_size_in_dw;
912 int i, num_loops;
913 int r = 0;
914
915 r = radeon_semaphore_create(rdev, &sem);
916 if (r) {
917 DRM_ERROR("radeon: moving bo (%d).\n", r);
918 return r;
919 }
920
921 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
922 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
923 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
924 if (r) {
925 DRM_ERROR("radeon: moving bo (%d).\n", r);
926 radeon_semaphore_free(rdev, &sem, NULL);
927 return r;
928 }
929
930 if (radeon_fence_need_sync(*fence, ring->idx)) {
931 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
932 ring->idx);
933 radeon_fence_note_sync(*fence, ring->idx);
934 } else {
935 radeon_semaphore_free(rdev, &sem, NULL);
936 }
937
938 for (i = 0; i < num_loops; i++) {
939 cur_size_in_dw = size_in_dw;
940 if (cur_size_in_dw > 0xFFFF)
941 cur_size_in_dw = 0xFFFF;
942 size_in_dw -= cur_size_in_dw;
943 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
944 radeon_ring_write(ring, dst_offset & 0xfffffffc);
945 radeon_ring_write(ring, src_offset & 0xfffffffc);
946 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
947 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
948 src_offset += cur_size_in_dw * 4;
949 dst_offset += cur_size_in_dw * 4;
950 }
951
952 r = radeon_fence_emit(rdev, fence, ring->idx);
953 if (r) {
954 radeon_ring_unlock_undo(rdev, ring);
955 return r;
956 }
957
958 radeon_ring_unlock_commit(rdev, ring);
959 radeon_semaphore_free(rdev, &sem, *fence);
960
961 return r;
962}
963
890static int rv770_startup(struct radeon_device *rdev) 964static int rv770_startup(struct radeon_device *rdev)
891{ 965{
892 struct radeon_ring *ring; 966 struct radeon_ring *ring;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ef683653f0b7..ae8b48205a6c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2126,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2126 return radeon_ring_test_lockup(rdev, ring); 2126 return radeon_ring_test_lockup(rdev, ring);
2127} 2127}
2128 2128
2129static int si_gpu_soft_reset(struct radeon_device *rdev) 2129static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
2130{ 2130{
2131 struct evergreen_mc_save save;
2132 u32 grbm_reset = 0; 2131 u32 grbm_reset = 0;
2133 2132
2134 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2133 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2135 return 0; 2134 return;
2136 2135
2137 dev_info(rdev->dev, "GPU softreset \n");
2138 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2136 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2139 RREG32(GRBM_STATUS)); 2137 RREG32(GRBM_STATUS));
2140 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", 2138 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2145,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2145 RREG32(GRBM_STATUS_SE1)); 2143 RREG32(GRBM_STATUS_SE1));
2146 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2144 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2147 RREG32(SRBM_STATUS)); 2145 RREG32(SRBM_STATUS));
2148 evergreen_mc_stop(rdev, &save); 2146
2149 if (radeon_mc_wait_for_idle(rdev)) {
2150 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2151 }
2152 /* Disable CP parsing/prefetching */ 2147 /* Disable CP parsing/prefetching */
2153 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 2148 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
2154 2149
@@ -2173,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2173 udelay(50); 2168 udelay(50);
2174 WREG32(GRBM_SOFT_RESET, 0); 2169 WREG32(GRBM_SOFT_RESET, 0);
2175 (void)RREG32(GRBM_SOFT_RESET); 2170 (void)RREG32(GRBM_SOFT_RESET);
2176 /* Wait a little for things to settle down */ 2171
2177 udelay(50);
2178 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2172 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2179 RREG32(GRBM_STATUS)); 2173 RREG32(GRBM_STATUS));
2180 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", 2174 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2185,13 +2179,81 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2185 RREG32(GRBM_STATUS_SE1)); 2179 RREG32(GRBM_STATUS_SE1));
2186 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2180 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2187 RREG32(SRBM_STATUS)); 2181 RREG32(SRBM_STATUS));
2182}
2183
2184static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
2185{
2186 u32 tmp;
2187
2188 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2189 return;
2190
2191 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
2192 RREG32(DMA_STATUS_REG));
2193
2194 /* dma0 */
2195 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
2196 tmp &= ~DMA_RB_ENABLE;
2197 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
2198
2199 /* dma1 */
2200 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
2201 tmp &= ~DMA_RB_ENABLE;
2202 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
2203
2204 /* Reset dma */
2205 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
2206 RREG32(SRBM_SOFT_RESET);
2207 udelay(50);
2208 WREG32(SRBM_SOFT_RESET, 0);
2209
2210 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
2211 RREG32(DMA_STATUS_REG));
2212}
2213
2214static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2215{
2216 struct evergreen_mc_save save;
2217
2218 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2219 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2220
2221 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2222 reset_mask &= ~RADEON_RESET_DMA;
2223
2224 if (reset_mask == 0)
2225 return 0;
2226
2227 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2228
2229 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
2230 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
2231 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
2232 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
2233
2234 evergreen_mc_stop(rdev, &save);
2235 if (radeon_mc_wait_for_idle(rdev)) {
2236 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2237 }
2238
2239 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
2240 si_gpu_soft_reset_gfx(rdev);
2241
2242 if (reset_mask & RADEON_RESET_DMA)
2243 si_gpu_soft_reset_dma(rdev);
2244
2245 /* Wait a little for things to settle down */
2246 udelay(50);
2247
2188 evergreen_mc_resume(rdev, &save); 2248 evergreen_mc_resume(rdev, &save);
2189 return 0; 2249 return 0;
2190} 2250}
2191 2251
2192int si_asic_reset(struct radeon_device *rdev) 2252int si_asic_reset(struct radeon_device *rdev)
2193{ 2253{
2194 return si_gpu_soft_reset(rdev); 2254 return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
2255 RADEON_RESET_COMPUTE |
2256 RADEON_RESET_DMA));
2195} 2257}
2196 2258
2197/* MC */ 2259/* MC */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 62b46215d423..c056aae814f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -62,6 +62,22 @@
62 62
63#define SRBM_STATUS 0xE50 63#define SRBM_STATUS 0xE50
64 64
65#define SRBM_SOFT_RESET 0x0E60
66#define SOFT_RESET_BIF (1 << 1)
67#define SOFT_RESET_DC (1 << 5)
68#define SOFT_RESET_DMA1 (1 << 6)
69#define SOFT_RESET_GRBM (1 << 8)
70#define SOFT_RESET_HDP (1 << 9)
71#define SOFT_RESET_IH (1 << 10)
72#define SOFT_RESET_MC (1 << 11)
73#define SOFT_RESET_ROM (1 << 14)
74#define SOFT_RESET_SEM (1 << 15)
75#define SOFT_RESET_VMC (1 << 17)
76#define SOFT_RESET_DMA (1 << 20)
77#define SOFT_RESET_TST (1 << 21)
78#define SOFT_RESET_REGBB (1 << 22)
79#define SOFT_RESET_ORB (1 << 23)
80
65#define CC_SYS_RB_BACKEND_DISABLE 0xe80 81#define CC_SYS_RB_BACKEND_DISABLE 0xe80
66#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 82#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
67 83
@@ -1013,6 +1029,8 @@
1013# define DATA_SWAP_ENABLE (1 << 3) 1029# define DATA_SWAP_ENABLE (1 << 3)
1014# define FENCE_SWAP_ENABLE (1 << 4) 1030# define FENCE_SWAP_ENABLE (1 << 4)
1015# define CTXEMPTY_INT_ENABLE (1 << 28) 1031# define CTXEMPTY_INT_ENABLE (1 << 28)
1032#define DMA_STATUS_REG 0xd034
1033# define DMA_IDLE (1 << 0)
1016#define DMA_TILING_CONFIG 0xd0b8 1034#define DMA_TILING_CONFIG 0xd0b8
1017 1035
1018#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ 1036#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 1c350fc4e449..d1d5306ebf24 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -33,7 +33,7 @@
33 * Hardware initialization 33 * Hardware initialization
34 */ 34 */
35 35
36static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev) 36static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
37{ 37{
38 static const u32 ldmt1r[] = { 38 static const u32 ldmt1r[] = {
39 [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8, 39 [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
@@ -67,7 +67,7 @@ static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
67 return 0; 67 return 0;
68} 68}
69 69
70static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev, 70static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
71 enum shmob_drm_clk_source clksrc) 71 enum shmob_drm_clk_source clksrc)
72{ 72{
73 struct clk *clk; 73 struct clk *clk;
@@ -330,12 +330,12 @@ static const struct dev_pm_ops shmob_drm_pm_ops = {
330 * Platform driver 330 * Platform driver
331 */ 331 */
332 332
333static int __devinit shmob_drm_probe(struct platform_device *pdev) 333static int shmob_drm_probe(struct platform_device *pdev)
334{ 334{
335 return drm_platform_init(&shmob_drm_driver, pdev); 335 return drm_platform_init(&shmob_drm_driver, pdev);
336} 336}
337 337
338static int __devexit shmob_drm_remove(struct platform_device *pdev) 338static int shmob_drm_remove(struct platform_device *pdev)
339{ 339{
340 drm_platform_exit(&shmob_drm_driver, pdev); 340 drm_platform_exit(&shmob_drm_driver, pdev);
341 341
@@ -344,7 +344,7 @@ static int __devexit shmob_drm_remove(struct platform_device *pdev)
344 344
345static struct platform_driver shmob_drm_platform_driver = { 345static struct platform_driver shmob_drm_platform_driver = {
346 .probe = shmob_drm_probe, 346 .probe = shmob_drm_probe,
347 .remove = __devexit_p(shmob_drm_remove), 347 .remove = shmob_drm_remove,
348 .driver = { 348 .driver = {
349 .owner = THIS_MODULE, 349 .owner = THIS_MODULE,
350 .name = "shmob-drm", 350 .name = "shmob-drm",
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 074410371e2a..656b2e3334a6 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -102,12 +102,12 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
102 ((mode->hsync_end - mode->hsync_start) << 0); 102 ((mode->hsync_end - mode->hsync_start) << 0);
103 tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH); 103 tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
104 104
105 value = ((mode->vsync_start - mode->vdisplay) << 16) |
106 ((mode->hsync_start - mode->hdisplay) << 0);
107 tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
108
109 value = ((mode->vtotal - mode->vsync_end) << 16) | 105 value = ((mode->vtotal - mode->vsync_end) << 16) |
110 ((mode->htotal - mode->hsync_end) << 0); 106 ((mode->htotal - mode->hsync_end) << 0);
107 tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
108
109 value = ((mode->vsync_start - mode->vdisplay) << 16) |
110 ((mode->hsync_start - mode->hdisplay) << 0);
111 tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH); 111 tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
112 112
113 value = (mode->vdisplay << 16) | mode->hdisplay; 113 value = (mode->vdisplay << 16) | mode->hdisplay;
@@ -221,8 +221,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
221 win.stride = crtc->fb->pitches[0]; 221 win.stride = crtc->fb->pitches[0];
222 222
223 /* program window registers */ 223 /* program window registers */
224 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER); 224 value = WINDOW_A_SELECT;
225 value |= WINDOW_A_SELECT;
226 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 225 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
227 226
228 tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH); 227 tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 3a843a77ddc7..741b5dc2742c 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -204,24 +204,6 @@ extern int tegra_output_parse_dt(struct tegra_output *output);
204extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); 204extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
205extern int tegra_output_exit(struct tegra_output *output); 205extern int tegra_output_exit(struct tegra_output *output);
206 206
207/* from gem.c */
208extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
209 size_t size);
210extern int tegra_gem_handle_create(struct drm_device *drm,
211 struct drm_file *file, size_t size,
212 unsigned long flags, uint32_t *handle);
213extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
214 struct drm_mode_create_dumb *args);
215extern int tegra_gem_dumb_map_offset(struct drm_file *file,
216 struct drm_device *drm, uint32_t handle,
217 uint64_t *offset);
218extern int tegra_gem_dumb_destroy(struct drm_file *file,
219 struct drm_device *drm, uint32_t handle);
220extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
221extern int tegra_gem_init_object(struct drm_gem_object *obj);
222extern void tegra_gem_free_object(struct drm_gem_object *obj);
223extern struct vm_operations_struct tegra_gem_vm_ops;
224
225/* from fb.c */ 207/* from fb.c */
226extern int tegra_drm_fb_init(struct drm_device *drm); 208extern int tegra_drm_fb_init(struct drm_device *drm);
227extern void tegra_drm_fb_exit(struct drm_device *drm); 209extern void tegra_drm_fb_exit(struct drm_device *drm);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ab4016412bbf..e060c7e6434d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -149,7 +149,7 @@ struct tmds_config {
149}; 149};
150 150
151static const struct tmds_config tegra2_tmds_config[] = { 151static const struct tmds_config tegra2_tmds_config[] = {
152 { /* 480p modes */ 152 { /* slow pixel clock modes */
153 .pclk = 27000000, 153 .pclk = 27000000,
154 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 154 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
155 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | 155 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
@@ -163,21 +163,8 @@ static const struct tmds_config tegra2_tmds_config[] = {
163 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | 163 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
164 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | 164 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
165 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), 165 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
166 }, { /* 720p modes */ 166 },
167 .pclk = 74250000, 167 { /* high pixel clock modes */
168 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
169 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
170 SOR_PLL_TX_REG_LOAD(3),
171 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
172 .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
173 PE_CURRENT1(PE_CURRENT_6_0_mA) |
174 PE_CURRENT2(PE_CURRENT_6_0_mA) |
175 PE_CURRENT3(PE_CURRENT_6_0_mA),
176 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
177 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
178 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
179 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
180 }, { /* 1080p modes */
181 .pclk = UINT_MAX, 168 .pclk = UINT_MAX,
182 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 169 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
183 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | 170 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
@@ -479,7 +466,7 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
479 return; 466 return;
480 } 467 }
481 468
482 h_front_porch = mode->htotal - mode->hsync_end; 469 h_front_porch = mode->hsync_start - mode->hdisplay;
483 memset(&frame, 0, sizeof(frame)); 470 memset(&frame, 0, sizeof(frame));
484 frame.r = HDMI_AVI_R_SAME; 471 frame.r = HDMI_AVI_R_SAME;
485 472
@@ -634,8 +621,8 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
634 621
635 pclk = mode->clock * 1000; 622 pclk = mode->clock * 1000;
636 h_sync_width = mode->hsync_end - mode->hsync_start; 623 h_sync_width = mode->hsync_end - mode->hsync_start;
637 h_front_porch = mode->htotal - mode->hsync_end; 624 h_back_porch = mode->htotal - mode->hsync_end;
638 h_back_porch = mode->hsync_start - mode->hdisplay; 625 h_front_porch = mode->hsync_start - mode->hdisplay;
639 626
640 err = regulator_enable(hdmi->vdd); 627 err = regulator_enable(hdmi->vdd);
641 if (err < 0) { 628 if (err < 0) {
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
index bdb97a564d82..5d17b113a6fc 100644
--- a/drivers/gpu/drm/tegra/host1x.c
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -239,6 +239,8 @@ int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
239 } 239 }
240 } 240 }
241 241
242 client->host1x = host1x;
243
242 return 0; 244 return 0;
243} 245}
244 246
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 33d20be87db5..52b20b12c83a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
434 bo->mem = tmp_mem; 434 bo->mem = tmp_mem;
435 bdev->driver->move_notify(bo, mem); 435 bdev->driver->move_notify(bo, mem);
436 bo->mem = *mem; 436 bo->mem = *mem;
437 *mem = tmp_mem;
437 } 438 }
438 439
439 goto out_err; 440 goto out_err;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 9e9c5d2a5c74..44420fca7dfa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
344 344
345 if (ttm->state == tt_unpopulated) { 345 if (ttm->state == tt_unpopulated) {
346 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 346 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347 if (ret) 347 if (ret) {
348 /* if we fail here don't nuke the mm node
349 * as the bo still owns it */
350 old_copy.mm_node = NULL;
348 goto out1; 351 goto out1;
352 }
349 } 353 }
350 354
351 add = 0; 355 add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
371 prot); 375 prot);
372 } else 376 } else
373 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 377 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
374 if (ret) 378 if (ret) {
379 /* failing here, means keep old copy as-is */
380 old_copy.mm_node = NULL;
375 goto out1; 381 goto out1;
382 }
376 } 383 }
377 mb(); 384 mb();
378out2: 385out2:
@@ -654,11 +661,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
654 */ 661 */
655 662
656 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 663 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
664
665 /* ttm_buffer_object_transfer accesses bo->sync_obj */
666 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
657 spin_unlock(&bdev->fence_lock); 667 spin_unlock(&bdev->fence_lock);
658 if (tmp_obj) 668 if (tmp_obj)
659 driver->sync_obj_unref(&tmp_obj); 669 driver->sync_obj_unref(&tmp_obj);
660 670
661 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
662 if (ret) 671 if (ret)
663 return ret; 672 return ret;
664 673
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 512f44add89f..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
22static u8 *udl_get_edid(struct udl_device *udl) 22static u8 *udl_get_edid(struct udl_device *udl)
23{ 23{
24 u8 *block; 24 u8 *block;
25 char rbuf[3]; 25 char *rbuf;
26 int ret, i; 26 int ret, i;
27 27
28 block = kmalloc(EDID_LENGTH, GFP_KERNEL); 28 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
29 if (block == NULL) 29 if (block == NULL)
30 return NULL; 30 return NULL;
31 31
32 rbuf = kmalloc(2, GFP_KERNEL);
33 if (rbuf == NULL)
34 goto error;
35
32 for (i = 0; i < EDID_LENGTH; i++) { 36 for (i = 0; i < EDID_LENGTH; i++) {
33 ret = usb_control_msg(udl->ddev->usbdev, 37 ret = usb_control_msg(udl->ddev->usbdev,
34 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), 38 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
36 HZ); 40 HZ);
37 if (ret < 1) { 41 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 42 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 i--;
40 goto error; 43 goto error;
41 } 44 }
42 block[i] = rbuf[1]; 45 block[i] = rbuf[1];
43 } 46 }
44 47
48 kfree(rbuf);
45 return block; 49 return block;
46 50
47error: 51error:
48 kfree(block); 52 kfree(block);
53 kfree(rbuf);
49 return NULL; 54 return NULL;
50} 55}
51 56
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
57 62
58 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
59 64
65 /*
66 * We only read the main block, but if the monitor reports extension
67 * blocks then the drm edid code expects them to be present, so patch
68 * the extension count to 0.
69 */
70 edid->checksum += edid->extensions;
71 edid->extensions = 0;
72
60 drm_mode_connector_update_edid_property(connector, edid); 73 drm_mode_connector_update_edid_property(connector, edid);
61 ret = drm_add_edid_modes(connector, edid); 74 ret = drm_add_edid_modes(connector, edid);
62 kfree(edid); 75 kfree(edid);