diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-06 19:54:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-06 19:54:22 -0500 |
commit | af13e867133a084aea536870ce39843e862c8aaa (patch) | |
tree | bb24ca627719be6d39ba07f9d1050a250197040e | |
parent | 84399bb075a6fe320d4221970dc36314e46229fe (diff) | |
parent | f6bec6ea6816ece9d229c4d37c44e3222de14044 (diff) |
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"Radeon, imx, msm, and i915 fixes.
The msm, imx and i915 ones are fairly run of the mill.
Radeon had some DP audio and posting reads for irq fixes, along with a
fix for 32-bit kernels with new cards, we were using unsigned long to
represent GPU side memory space, but since that changed size on 32 vs
64 cards with lots of VRAM failed, so the change has no effect on
x86-64, just moves to using uint64_t instead"
* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (35 commits)
drm/msm: kexec fixes
drm/msm/mdp5: fix cursor blending
drm/msm/mdp5: fix cursor ROI
drm/msm/atomic: Don't leak atomic commit object when commit fails
drm/msm/mdp5: Avoid flushing registers when CRTC is disabled
drm/msm: update generated headers (add 6th lm.base entry)
drm/msm/mdp5: fixup "drm/msm: fix fallout of atomic dpms changes"
drm/ttm: device address space != CPU address space
drm/mm: Support 4 GiB and larger ranges
drm/i915: gen4: work around hang during hibernation
drm/i915: Check for driver readyness before handling an underrun interrupt
drm/radeon: fix interlaced modes on DCE8
drm/radeon: fix DRM_IOCTL_RADEON_CS oops
drm/radeon: do a posting read in cik_set_irq
drm/radeon: do a posting read in si_set_irq
drm/radeon: do a posting read in evergreen_set_irq
drm/radeon: do a posting read in r600_set_irq
drm/radeon: do a posting read in rs600_set_irq
drm/radeon: do a posting read in r100_set_irq
radeon/audio: fix DP audio on DCE6
...
34 files changed, 399 insertions, 325 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04a209e2b66d..7fc6f8bd4821 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -91,29 +91,29 @@ | |||
91 | */ | 91 | */ |
92 | 92 | ||
93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
94 | unsigned long size, | 94 | u64 size, |
95 | unsigned alignment, | 95 | unsigned alignment, |
96 | unsigned long color, | 96 | unsigned long color, |
97 | enum drm_mm_search_flags flags); | 97 | enum drm_mm_search_flags flags); |
98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
99 | unsigned long size, | 99 | u64 size, |
100 | unsigned alignment, | 100 | unsigned alignment, |
101 | unsigned long color, | 101 | unsigned long color, |
102 | unsigned long start, | 102 | u64 start, |
103 | unsigned long end, | 103 | u64 end, |
104 | enum drm_mm_search_flags flags); | 104 | enum drm_mm_search_flags flags); |
105 | 105 | ||
106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
107 | struct drm_mm_node *node, | 107 | struct drm_mm_node *node, |
108 | unsigned long size, unsigned alignment, | 108 | u64 size, unsigned alignment, |
109 | unsigned long color, | 109 | unsigned long color, |
110 | enum drm_mm_allocator_flags flags) | 110 | enum drm_mm_allocator_flags flags) |
111 | { | 111 | { |
112 | struct drm_mm *mm = hole_node->mm; | 112 | struct drm_mm *mm = hole_node->mm; |
113 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 113 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
114 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 114 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
115 | unsigned long adj_start = hole_start; | 115 | u64 adj_start = hole_start; |
116 | unsigned long adj_end = hole_end; | 116 | u64 adj_end = hole_end; |
117 | 117 | ||
118 | BUG_ON(node->allocated); | 118 | BUG_ON(node->allocated); |
119 | 119 | ||
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
124 | adj_start = adj_end - size; | 124 | adj_start = adj_end - size; |
125 | 125 | ||
126 | if (alignment) { | 126 | if (alignment) { |
127 | unsigned tmp = adj_start % alignment; | 127 | u64 tmp = adj_start; |
128 | if (tmp) { | 128 | unsigned rem; |
129 | |||
130 | rem = do_div(tmp, alignment); | ||
131 | if (rem) { | ||
129 | if (flags & DRM_MM_CREATE_TOP) | 132 | if (flags & DRM_MM_CREATE_TOP) |
130 | adj_start -= tmp; | 133 | adj_start -= rem; |
131 | else | 134 | else |
132 | adj_start += alignment - tmp; | 135 | adj_start += alignment - rem; |
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
176 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | 179 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
177 | { | 180 | { |
178 | struct drm_mm_node *hole; | 181 | struct drm_mm_node *hole; |
179 | unsigned long end = node->start + node->size; | 182 | u64 end = node->start + node->size; |
180 | unsigned long hole_start; | 183 | u64 hole_start; |
181 | unsigned long hole_end; | 184 | u64 hole_end; |
182 | 185 | ||
183 | BUG_ON(node == NULL); | 186 | BUG_ON(node == NULL); |
184 | 187 | ||
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node); | |||
227 | * 0 on success, -ENOSPC if there's no suitable hole. | 230 | * 0 on success, -ENOSPC if there's no suitable hole. |
228 | */ | 231 | */ |
229 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, | 232 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | unsigned long size, unsigned alignment, | 233 | u64 size, unsigned alignment, |
231 | unsigned long color, | 234 | unsigned long color, |
232 | enum drm_mm_search_flags sflags, | 235 | enum drm_mm_search_flags sflags, |
233 | enum drm_mm_allocator_flags aflags) | 236 | enum drm_mm_allocator_flags aflags) |
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic); | |||
246 | 249 | ||
247 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | 250 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
248 | struct drm_mm_node *node, | 251 | struct drm_mm_node *node, |
249 | unsigned long size, unsigned alignment, | 252 | u64 size, unsigned alignment, |
250 | unsigned long color, | 253 | unsigned long color, |
251 | unsigned long start, unsigned long end, | 254 | u64 start, u64 end, |
252 | enum drm_mm_allocator_flags flags) | 255 | enum drm_mm_allocator_flags flags) |
253 | { | 256 | { |
254 | struct drm_mm *mm = hole_node->mm; | 257 | struct drm_mm *mm = hole_node->mm; |
255 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 258 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
256 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 259 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
257 | unsigned long adj_start = hole_start; | 260 | u64 adj_start = hole_start; |
258 | unsigned long adj_end = hole_end; | 261 | u64 adj_end = hole_end; |
259 | 262 | ||
260 | BUG_ON(!hole_node->hole_follows || node->allocated); | 263 | BUG_ON(!hole_node->hole_follows || node->allocated); |
261 | 264 | ||
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
271 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | 274 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
272 | 275 | ||
273 | if (alignment) { | 276 | if (alignment) { |
274 | unsigned tmp = adj_start % alignment; | 277 | u64 tmp = adj_start; |
275 | if (tmp) { | 278 | unsigned rem; |
279 | |||
280 | rem = do_div(tmp, alignment); | ||
281 | if (rem) { | ||
276 | if (flags & DRM_MM_CREATE_TOP) | 282 | if (flags & DRM_MM_CREATE_TOP) |
277 | adj_start -= tmp; | 283 | adj_start -= rem; |
278 | else | 284 | else |
279 | adj_start += alignment - tmp; | 285 | adj_start += alignment - rem; |
280 | } | 286 | } |
281 | } | 287 | } |
282 | 288 | ||
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
324 | * 0 on success, -ENOSPC if there's no suitable hole. | 330 | * 0 on success, -ENOSPC if there's no suitable hole. |
325 | */ | 331 | */ |
326 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, | 332 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
327 | unsigned long size, unsigned alignment, | 333 | u64 size, unsigned alignment, |
328 | unsigned long color, | 334 | unsigned long color, |
329 | unsigned long start, unsigned long end, | 335 | u64 start, u64 end, |
330 | enum drm_mm_search_flags sflags, | 336 | enum drm_mm_search_flags sflags, |
331 | enum drm_mm_allocator_flags aflags) | 337 | enum drm_mm_allocator_flags aflags) |
332 | { | 338 | { |
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node) | |||
387 | } | 393 | } |
388 | EXPORT_SYMBOL(drm_mm_remove_node); | 394 | EXPORT_SYMBOL(drm_mm_remove_node); |
389 | 395 | ||
390 | static int check_free_hole(unsigned long start, unsigned long end, | 396 | static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) |
391 | unsigned long size, unsigned alignment) | ||
392 | { | 397 | { |
393 | if (end - start < size) | 398 | if (end - start < size) |
394 | return 0; | 399 | return 0; |
395 | 400 | ||
396 | if (alignment) { | 401 | if (alignment) { |
397 | unsigned tmp = start % alignment; | 402 | u64 tmp = start; |
403 | unsigned rem; | ||
404 | |||
405 | rem = do_div(tmp, alignment); | ||
398 | if (tmp) | 406 | if (tmp) |
399 | start += alignment - tmp; | 407 | start += alignment - rem; |
400 | } | 408 | } |
401 | 409 | ||
402 | return end >= start + size; | 410 | return end >= start + size; |
403 | } | 411 | } |
404 | 412 | ||
405 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 413 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
406 | unsigned long size, | 414 | u64 size, |
407 | unsigned alignment, | 415 | unsigned alignment, |
408 | unsigned long color, | 416 | unsigned long color, |
409 | enum drm_mm_search_flags flags) | 417 | enum drm_mm_search_flags flags) |
410 | { | 418 | { |
411 | struct drm_mm_node *entry; | 419 | struct drm_mm_node *entry; |
412 | struct drm_mm_node *best; | 420 | struct drm_mm_node *best; |
413 | unsigned long adj_start; | 421 | u64 adj_start; |
414 | unsigned long adj_end; | 422 | u64 adj_end; |
415 | unsigned long best_size; | 423 | u64 best_size; |
416 | 424 | ||
417 | BUG_ON(mm->scanned_blocks); | 425 | BUG_ON(mm->scanned_blocks); |
418 | 426 | ||
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
421 | 429 | ||
422 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 430 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
423 | flags & DRM_MM_SEARCH_BELOW) { | 431 | flags & DRM_MM_SEARCH_BELOW) { |
424 | unsigned long hole_size = adj_end - adj_start; | 432 | u64 hole_size = adj_end - adj_start; |
425 | 433 | ||
426 | if (mm->color_adjust) { | 434 | if (mm->color_adjust) { |
427 | mm->color_adjust(entry, color, &adj_start, &adj_end); | 435 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
445 | } | 453 | } |
446 | 454 | ||
447 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 455 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
448 | unsigned long size, | 456 | u64 size, |
449 | unsigned alignment, | 457 | unsigned alignment, |
450 | unsigned long color, | 458 | unsigned long color, |
451 | unsigned long start, | 459 | u64 start, |
452 | unsigned long end, | 460 | u64 end, |
453 | enum drm_mm_search_flags flags) | 461 | enum drm_mm_search_flags flags) |
454 | { | 462 | { |
455 | struct drm_mm_node *entry; | 463 | struct drm_mm_node *entry; |
456 | struct drm_mm_node *best; | 464 | struct drm_mm_node *best; |
457 | unsigned long adj_start; | 465 | u64 adj_start; |
458 | unsigned long adj_end; | 466 | u64 adj_end; |
459 | unsigned long best_size; | 467 | u64 best_size; |
460 | 468 | ||
461 | BUG_ON(mm->scanned_blocks); | 469 | BUG_ON(mm->scanned_blocks); |
462 | 470 | ||
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ | |||
465 | 473 | ||
466 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 474 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
467 | flags & DRM_MM_SEARCH_BELOW) { | 475 | flags & DRM_MM_SEARCH_BELOW) { |
468 | unsigned long hole_size = adj_end - adj_start; | 476 | u64 hole_size = adj_end - adj_start; |
469 | 477 | ||
470 | if (adj_start < start) | 478 | if (adj_start < start) |
471 | adj_start = start; | 479 | adj_start = start; |
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node); | |||
561 | * adding/removing nodes to/from the scan list are allowed. | 569 | * adding/removing nodes to/from the scan list are allowed. |
562 | */ | 570 | */ |
563 | void drm_mm_init_scan(struct drm_mm *mm, | 571 | void drm_mm_init_scan(struct drm_mm *mm, |
564 | unsigned long size, | 572 | u64 size, |
565 | unsigned alignment, | 573 | unsigned alignment, |
566 | unsigned long color) | 574 | unsigned long color) |
567 | { | 575 | { |
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan); | |||
594 | * adding/removing nodes to/from the scan list are allowed. | 602 | * adding/removing nodes to/from the scan list are allowed. |
595 | */ | 603 | */ |
596 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 604 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
597 | unsigned long size, | 605 | u64 size, |
598 | unsigned alignment, | 606 | unsigned alignment, |
599 | unsigned long color, | 607 | unsigned long color, |
600 | unsigned long start, | 608 | u64 start, |
601 | unsigned long end) | 609 | u64 end) |
602 | { | 610 | { |
603 | mm->scan_color = color; | 611 | mm->scan_color = color; |
604 | mm->scan_alignment = alignment; | 612 | mm->scan_alignment = alignment; |
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node) | |||
627 | { | 635 | { |
628 | struct drm_mm *mm = node->mm; | 636 | struct drm_mm *mm = node->mm; |
629 | struct drm_mm_node *prev_node; | 637 | struct drm_mm_node *prev_node; |
630 | unsigned long hole_start, hole_end; | 638 | u64 hole_start, hole_end; |
631 | unsigned long adj_start, adj_end; | 639 | u64 adj_start, adj_end; |
632 | 640 | ||
633 | mm->scanned_blocks++; | 641 | mm->scanned_blocks++; |
634 | 642 | ||
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean); | |||
731 | * | 739 | * |
732 | * Note that @mm must be cleared to 0 before calling this function. | 740 | * Note that @mm must be cleared to 0 before calling this function. |
733 | */ | 741 | */ |
734 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 742 | void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) |
735 | { | 743 | { |
736 | INIT_LIST_HEAD(&mm->hole_stack); | 744 | INIT_LIST_HEAD(&mm->hole_stack); |
737 | mm->scanned_blocks = 0; | 745 | mm->scanned_blocks = 0; |
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
766 | } | 774 | } |
767 | EXPORT_SYMBOL(drm_mm_takedown); | 775 | EXPORT_SYMBOL(drm_mm_takedown); |
768 | 776 | ||
769 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | 777 | static u64 drm_mm_debug_hole(struct drm_mm_node *entry, |
770 | const char *prefix) | 778 | const char *prefix) |
771 | { | 779 | { |
772 | unsigned long hole_start, hole_end, hole_size; | 780 | u64 hole_start, hole_end, hole_size; |
773 | 781 | ||
774 | if (entry->hole_follows) { | 782 | if (entry->hole_follows) { |
775 | hole_start = drm_mm_hole_node_start(entry); | 783 | hole_start = drm_mm_hole_node_start(entry); |
776 | hole_end = drm_mm_hole_node_end(entry); | 784 | hole_end = drm_mm_hole_node_end(entry); |
777 | hole_size = hole_end - hole_start; | 785 | hole_size = hole_end - hole_start; |
778 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 786 | pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, |
779 | prefix, hole_start, hole_end, | 787 | hole_end, hole_size); |
780 | hole_size); | ||
781 | return hole_size; | 788 | return hole_size; |
782 | } | 789 | } |
783 | 790 | ||
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |||
792 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 799 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
793 | { | 800 | { |
794 | struct drm_mm_node *entry; | 801 | struct drm_mm_node *entry; |
795 | unsigned long total_used = 0, total_free = 0, total = 0; | 802 | u64 total_used = 0, total_free = 0, total = 0; |
796 | 803 | ||
797 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); | 804 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
798 | 805 | ||
799 | drm_mm_for_each_node(entry, mm) { | 806 | drm_mm_for_each_node(entry, mm) { |
800 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | 807 | pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, |
801 | prefix, entry->start, entry->start + entry->size, | 808 | entry->start + entry->size, entry->size); |
802 | entry->size); | ||
803 | total_used += entry->size; | 809 | total_used += entry->size; |
804 | total_free += drm_mm_debug_hole(entry, prefix); | 810 | total_free += drm_mm_debug_hole(entry, prefix); |
805 | } | 811 | } |
806 | total = total_free + total_used; | 812 | total = total_free + total_used; |
807 | 813 | ||
808 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | 814 | pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, |
809 | total_used, total_free); | 815 | total_used, total_free); |
810 | } | 816 | } |
811 | EXPORT_SYMBOL(drm_mm_debug_table); | 817 | EXPORT_SYMBOL(drm_mm_debug_table); |
812 | 818 | ||
813 | #if defined(CONFIG_DEBUG_FS) | 819 | #if defined(CONFIG_DEBUG_FS) |
814 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | 820 | static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
815 | { | 821 | { |
816 | unsigned long hole_start, hole_end, hole_size; | 822 | u64 hole_start, hole_end, hole_size; |
817 | 823 | ||
818 | if (entry->hole_follows) { | 824 | if (entry->hole_follows) { |
819 | hole_start = drm_mm_hole_node_start(entry); | 825 | hole_start = drm_mm_hole_node_start(entry); |
820 | hole_end = drm_mm_hole_node_end(entry); | 826 | hole_end = drm_mm_hole_node_end(entry); |
821 | hole_size = hole_end - hole_start; | 827 | hole_size = hole_end - hole_start; |
822 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 828 | seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start, |
823 | hole_start, hole_end, hole_size); | 829 | hole_end, hole_size); |
824 | return hole_size; | 830 | return hole_size; |
825 | } | 831 | } |
826 | 832 | ||
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en | |||
835 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 841 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
836 | { | 842 | { |
837 | struct drm_mm_node *entry; | 843 | struct drm_mm_node *entry; |
838 | unsigned long total_used = 0, total_free = 0, total = 0; | 844 | u64 total_used = 0, total_free = 0, total = 0; |
839 | 845 | ||
840 | total_free += drm_mm_dump_hole(m, &mm->head_node); | 846 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
841 | 847 | ||
842 | drm_mm_for_each_node(entry, mm) { | 848 | drm_mm_for_each_node(entry, mm) { |
843 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 849 | seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start, |
844 | entry->start, entry->start + entry->size, | 850 | entry->start + entry->size, entry->size); |
845 | entry->size); | ||
846 | total_used += entry->size; | 851 | total_used += entry->size; |
847 | total_free += drm_mm_dump_hole(m, entry); | 852 | total_free += drm_mm_dump_hole(m, entry); |
848 | } | 853 | } |
849 | total = total_free + total_used; | 854 | total = total_free + total_used; |
850 | 855 | ||
851 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | 856 | seq_printf(m, "total: %llu, used %llu free %llu\n", total, |
857 | total_used, total_free); | ||
852 | return 0; | 858 | return 0; |
853 | } | 859 | } |
854 | EXPORT_SYMBOL(drm_mm_dump_table); | 860 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 96e811fe24ca..e8b18e542da4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
152 | seq_puts(m, " (pp"); | 152 | seq_puts(m, " (pp"); |
153 | else | 153 | else |
154 | seq_puts(m, " (g"); | 154 | seq_puts(m, " (g"); |
155 | seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", | 155 | seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", |
156 | vma->node.start, vma->node.size, | 156 | vma->node.start, vma->node.size, |
157 | vma->ggtt_view.type); | 157 | vma->ggtt_view.type); |
158 | } | 158 | } |
159 | if (obj->stolen) | 159 | if (obj->stolen) |
160 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | 160 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); |
161 | if (obj->pin_mappable || obj->fault_mappable) { | 161 | if (obj->pin_mappable || obj->fault_mappable) { |
162 | char s[3], *t = s; | 162 | char s[3], *t = s; |
163 | if (obj->pin_mappable) | 163 | if (obj->pin_mappable) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8039cec71fc2..cc6ea53d2b81 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
622 | return 0; | 622 | return 0; |
623 | } | 623 | } |
624 | 624 | ||
625 | static int i915_drm_suspend_late(struct drm_device *drm_dev) | 625 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
626 | { | 626 | { |
627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | 627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
628 | int ret; | 628 | int ret; |
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev) | |||
636 | } | 636 | } |
637 | 637 | ||
638 | pci_disable_device(drm_dev->pdev); | 638 | pci_disable_device(drm_dev->pdev); |
639 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | 639 | /* |
640 | * During hibernation on some GEN4 platforms the BIOS may try to access | ||
641 | * the device even though it's already in D3 and hang the machine. So | ||
642 | * leave the device in D0 on those platforms and hope the BIOS will | ||
643 | * power down the device properly. Platforms where this was seen: | ||
644 | * Lenovo Thinkpad X301, X61s | ||
645 | */ | ||
646 | if (!(hibernation && | ||
647 | drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && | ||
648 | INTEL_INFO(dev_priv)->gen == 4)) | ||
649 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | ||
640 | 650 | ||
641 | return 0; | 651 | return 0; |
642 | } | 652 | } |
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) | |||
662 | if (error) | 672 | if (error) |
663 | return error; | 673 | return error; |
664 | 674 | ||
665 | return i915_drm_suspend_late(dev); | 675 | return i915_drm_suspend_late(dev, false); |
666 | } | 676 | } |
667 | 677 | ||
668 | static int i915_drm_resume(struct drm_device *dev) | 678 | static int i915_drm_resume(struct drm_device *dev) |
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev) | |||
950 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 960 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
951 | return 0; | 961 | return 0; |
952 | 962 | ||
953 | return i915_drm_suspend_late(drm_dev); | 963 | return i915_drm_suspend_late(drm_dev, false); |
964 | } | ||
965 | |||
966 | static int i915_pm_poweroff_late(struct device *dev) | ||
967 | { | ||
968 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | ||
969 | |||
970 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
971 | return 0; | ||
972 | |||
973 | return i915_drm_suspend_late(drm_dev, true); | ||
954 | } | 974 | } |
955 | 975 | ||
956 | static int i915_pm_resume_early(struct device *dev) | 976 | static int i915_pm_resume_early(struct device *dev) |
@@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = { | |||
1520 | .thaw_early = i915_pm_resume_early, | 1540 | .thaw_early = i915_pm_resume_early, |
1521 | .thaw = i915_pm_resume, | 1541 | .thaw = i915_pm_resume, |
1522 | .poweroff = i915_pm_suspend, | 1542 | .poweroff = i915_pm_suspend, |
1523 | .poweroff_late = i915_pm_suspend_late, | 1543 | .poweroff_late = i915_pm_poweroff_late, |
1524 | .restore_early = i915_pm_resume_early, | 1544 | .restore_early = i915_pm_resume_early, |
1525 | .restore = i915_pm_resume, | 1545 | .restore = i915_pm_resume, |
1526 | 1546 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 746f77fb57a3..dccdc8aad2e2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
1145 | 1145 | ||
1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
1147 | 1147 | ||
1148 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | 1148 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
1149 | ppgtt->node.size >> 20, | 1149 | ppgtt->node.size >> 20, |
1150 | ppgtt->node.start / PAGE_SIZE); | 1150 | ppgtt->node.start / PAGE_SIZE); |
1151 | 1151 | ||
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | |||
1713 | 1713 | ||
1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | 1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
1715 | unsigned long color, | 1715 | unsigned long color, |
1716 | unsigned long *start, | 1716 | u64 *start, |
1717 | unsigned long *end) | 1717 | u64 *end) |
1718 | { | 1718 | { |
1719 | if (node->color != color) | 1719 | if (node->color != color) |
1720 | *start += 4096; | 1720 | *start += 4096; |
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04e248dd2259..54daa66c6970 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c | |||
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
282 | return ret; | 282 | return ret; |
283 | } | 283 | } |
284 | 284 | ||
285 | static bool | ||
286 | __cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, | ||
287 | enum pipe pipe) | ||
288 | { | ||
289 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
290 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
291 | |||
292 | return !intel_crtc->cpu_fifo_underrun_disabled; | ||
293 | } | ||
294 | |||
295 | /** | 285 | /** |
296 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state | 286 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state |
297 | * @dev_priv: i915 device instance | 287 | * @dev_priv: i915 device instance |
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
352 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, | 342 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, |
353 | enum pipe pipe) | 343 | enum pipe pipe) |
354 | { | 344 | { |
345 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
346 | |||
347 | /* We may be called too early in init, thanks BIOS! */ | ||
348 | if (crtc == NULL) | ||
349 | return; | ||
350 | |||
355 | /* GMCH can't disable fifo underruns, filter them. */ | 351 | /* GMCH can't disable fifo underruns, filter them. */ |
356 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && | 352 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && |
357 | !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) | 353 | to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) |
358 | return; | 354 | return; |
359 | 355 | ||
360 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) | 356 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) |
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 121d30ca2d44..87fe8ed92ebe 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c | |||
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = { | |||
70 | 118800000, { 0x091c, 0x091c, 0x06dc }, | 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, |
71 | }, { | 71 | }, { |
72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, | 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, |
73 | } | 73 | }, { |
74 | ~0UL, { 0x0000, 0x0000, 0x0000 }, | ||
75 | }, | ||
74 | }; | 76 | }; |
75 | 77 | ||
76 | static const struct dw_hdmi_sym_term imx_sym_term[] = { | 78 | static const struct dw_hdmi_sym_term imx_sym_term[] = { |
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { | |||
136 | .destroy = drm_encoder_cleanup, | 138 | .destroy = drm_encoder_cleanup, |
137 | }; | 139 | }; |
138 | 140 | ||
141 | static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, | ||
142 | struct drm_display_mode *mode) | ||
143 | { | ||
144 | if (mode->clock < 13500) | ||
145 | return MODE_CLOCK_LOW; | ||
146 | if (mode->clock > 266000) | ||
147 | return MODE_CLOCK_HIGH; | ||
148 | |||
149 | return MODE_OK; | ||
150 | } | ||
151 | |||
152 | static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con, | ||
153 | struct drm_display_mode *mode) | ||
154 | { | ||
155 | if (mode->clock < 13500) | ||
156 | return MODE_CLOCK_LOW; | ||
157 | if (mode->clock > 270000) | ||
158 | return MODE_CLOCK_HIGH; | ||
159 | |||
160 | return MODE_OK; | ||
161 | } | ||
162 | |||
139 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { | 163 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { |
140 | .mpll_cfg = imx_mpll_cfg, | 164 | .mpll_cfg = imx_mpll_cfg, |
141 | .cur_ctr = imx_cur_ctr, | 165 | .cur_ctr = imx_cur_ctr, |
142 | .sym_term = imx_sym_term, | 166 | .sym_term = imx_sym_term, |
143 | .dev_type = IMX6Q_HDMI, | 167 | .dev_type = IMX6Q_HDMI, |
168 | .mode_valid = imx6q_hdmi_mode_valid, | ||
144 | }; | 169 | }; |
145 | 170 | ||
146 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | 171 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { |
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | |||
148 | .cur_ctr = imx_cur_ctr, | 173 | .cur_ctr = imx_cur_ctr, |
149 | .sym_term = imx_sym_term, | 174 | .sym_term = imx_sym_term, |
150 | .dev_type = IMX6DL_HDMI, | 175 | .dev_type = IMX6DL_HDMI, |
176 | .mode_valid = imx6dl_hdmi_mode_valid, | ||
151 | }; | 177 | }; |
152 | 178 | ||
153 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { | 179 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 1b86aac0b341..2d6dc94e1e64 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) | |||
163 | { | 163 | { |
164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
166 | struct drm_display_mode *mode = &encoder->crtc->hwmode; | ||
167 | u32 pixel_fmt; | 166 | u32 pixel_fmt; |
168 | unsigned long serial_clk; | ||
169 | unsigned long di_clk = mode->clock * 1000; | ||
170 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
171 | |||
172 | if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { | ||
173 | /* dual channel LVDS mode */ | ||
174 | serial_clk = 3500UL * mode->clock; | ||
175 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
176 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
177 | } else { | ||
178 | serial_clk = 7000UL * mode->clock; | ||
179 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
180 | di_clk); | ||
181 | } | ||
182 | 167 | ||
183 | switch (imx_ldb_ch->chno) { | 168 | switch (imx_ldb_ch->chno) { |
184 | case 0: | 169 | case 0: |
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
247 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 232 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
248 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 233 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
249 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; | 234 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
235 | unsigned long serial_clk; | ||
236 | unsigned long di_clk = mode->clock * 1000; | ||
237 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
250 | 238 | ||
251 | if (mode->clock > 170000) { | 239 | if (mode->clock > 170000) { |
252 | dev_warn(ldb->dev, | 240 | dev_warn(ldb->dev, |
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
257 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); | 245 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); |
258 | } | 246 | } |
259 | 247 | ||
248 | if (dual) { | ||
249 | serial_clk = 3500UL * mode->clock; | ||
250 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
251 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
252 | } else { | ||
253 | serial_clk = 7000UL * mode->clock; | ||
254 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
255 | di_clk); | ||
256 | } | ||
257 | |||
260 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ | 258 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ |
261 | if (imx_ldb_ch == &ldb->channel[0]) { | 259 | if (imx_ldb_ch == &ldb->channel[0]) { |
262 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 260 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 5e83e007080f..900dda6a8e71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); | 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); |
239 | if (panel_node) | 239 | if (panel_node) { |
240 | imxpd->panel = of_drm_find_panel(panel_node); | 240 | imxpd->panel = of_drm_find_panel(panel_node); |
241 | if (!imxpd->panel) | ||
242 | return -EPROBE_DEFER; | ||
243 | } | ||
241 | 244 | ||
242 | imxpd->dev = dev; | 245 | imxpd->dev = dev; |
243 | 246 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 8edd531cb621..7369ee7f0c55 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
32 | void mdp4_irq_preinstall(struct msm_kms *kms) | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
33 | { | 33 | { |
34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
35 | mdp4_enable(mdp4_kms); | ||
35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | 36 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
37 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
38 | mdp4_disable(mdp4_kms); | ||
36 | } | 39 | } |
37 | 40 | ||
38 | int mdp4_irq_postinstall(struct msm_kms *kms) | 41 | int mdp4_irq_postinstall(struct msm_kms *kms) |
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms) | |||
53 | void mdp4_irq_uninstall(struct msm_kms *kms) | 56 | void mdp4_irq_uninstall(struct msm_kms *kms) |
54 | { | 57 | { |
55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 58 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
59 | mdp4_enable(mdp4_kms); | ||
56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | 60 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
61 | mdp4_disable(mdp4_kms); | ||
57 | } | 62 | } |
58 | 63 | ||
59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | 64 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 09b4a25eb553..c276624290af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) | 11 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) | 13 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) | ||
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) | ||
19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) | ||
21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) | ||
22 | 14 | ||
23 | Copyright (C) 2013-2015 by the following authors: | 15 | Copyright (C) 2013-2015 by the following authors: |
24 | - Rob Clark <robdclark@gmail.com> (robclark) | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx) | |||
910 | case 2: return (mdp5_cfg->lm.base[2]); | 902 | case 2: return (mdp5_cfg->lm.base[2]); |
911 | case 3: return (mdp5_cfg->lm.base[3]); | 903 | case 3: return (mdp5_cfg->lm.base[3]); |
912 | case 4: return (mdp5_cfg->lm.base[4]); | 904 | case 4: return (mdp5_cfg->lm.base[4]); |
905 | case 5: return (mdp5_cfg->lm.base[5]); | ||
913 | default: return INVALID_IDX(idx); | 906 | default: return INVALID_IDX(idx); |
914 | } | 907 | } |
915 | } | 908 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 46fac545dc2b..2f2863cf8b45 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -62,8 +62,8 @@ struct mdp5_crtc { | |||
62 | 62 | ||
63 | /* current cursor being scanned out: */ | 63 | /* current cursor being scanned out: */ |
64 | struct drm_gem_object *scanout_bo; | 64 | struct drm_gem_object *scanout_bo; |
65 | uint32_t width; | 65 | uint32_t width, height; |
66 | uint32_t height; | 66 | uint32_t x, y; |
67 | } cursor; | 67 | } cursor; |
68 | }; | 68 | }; |
69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) |
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc) | |||
103 | struct drm_plane *plane; | 103 | struct drm_plane *plane; |
104 | uint32_t flush_mask = 0; | 104 | uint32_t flush_mask = 0; |
105 | 105 | ||
106 | /* we could have already released CTL in the disable path: */ | 106 | /* this should not happen: */ |
107 | if (!mdp5_crtc->ctl) | 107 | if (WARN_ON(!mdp5_crtc->ctl)) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
143 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
144 | mdp5_plane_complete_flip(plane); | 144 | mdp5_plane_complete_flip(plane); |
145 | } | 145 | } |
146 | |||
147 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
148 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
149 | mdp5_crtc->ctl = NULL; | ||
150 | } | ||
146 | } | 151 | } |
147 | 152 | ||
148 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) | 153 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) |
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) | |||
386 | mdp5_crtc->event = crtc->state->event; | 391 | mdp5_crtc->event = crtc->state->event; |
387 | spin_unlock_irqrestore(&dev->event_lock, flags); | 392 | spin_unlock_irqrestore(&dev->event_lock, flags); |
388 | 393 | ||
394 | /* | ||
395 | * If no CTL has been allocated in mdp5_crtc_atomic_check(), | ||
396 | * it means we are trying to flush a CRTC whose state is disabled: | ||
397 | * nothing else needs to be done. | ||
398 | */ | ||
399 | if (unlikely(!mdp5_crtc->ctl)) | ||
400 | return; | ||
401 | |||
389 | blend_setup(crtc); | 402 | blend_setup(crtc); |
390 | crtc_flush_all(crtc); | 403 | crtc_flush_all(crtc); |
391 | request_pending(crtc, PENDING_FLIP); | 404 | request_pending(crtc, PENDING_FLIP); |
392 | |||
393 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
394 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
395 | mdp5_crtc->ctl = NULL; | ||
396 | } | ||
397 | } | 405 | } |
398 | 406 | ||
399 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | 407 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, |
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, | |||
403 | return -EINVAL; | 411 | return -EINVAL; |
404 | } | 412 | } |
405 | 413 | ||
414 | static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) | ||
415 | { | ||
416 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
417 | uint32_t xres = crtc->mode.hdisplay; | ||
418 | uint32_t yres = crtc->mode.vdisplay; | ||
419 | |||
420 | /* | ||
421 | * Cursor Region Of Interest (ROI) is a plane read from cursor | ||
422 | * buffer to render. The ROI region is determined by the visibility of | ||
423 | * the cursor point. In the default Cursor image the cursor point will | ||
424 | * be at the top left of the cursor image, unless it is specified | ||
425 | * otherwise using hotspot feature. | ||
426 | * | ||
427 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
428 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
429 | * width and ROI height need to be evaluated to crop the cursor image | ||
430 | * accordingly. | ||
431 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
432 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
433 | */ | ||
434 | *roi_w = min(mdp5_crtc->cursor.width, xres - | ||
435 | mdp5_crtc->cursor.x); | ||
436 | *roi_h = min(mdp5_crtc->cursor.height, yres - | ||
437 | mdp5_crtc->cursor.y); | ||
438 | } | ||
439 | |||
406 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | 440 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, |
407 | struct drm_file *file, uint32_t handle, | 441 | struct drm_file *file, uint32_t handle, |
408 | uint32_t width, uint32_t height) | 442 | uint32_t width, uint32_t height) |
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
416 | unsigned int depth; | 450 | unsigned int depth; |
417 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; | 451 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; |
418 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 452 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
453 | uint32_t roi_w, roi_h; | ||
419 | unsigned long flags; | 454 | unsigned long flags; |
420 | 455 | ||
421 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { | 456 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { |
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
446 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 481 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
447 | old_bo = mdp5_crtc->cursor.scanout_bo; | 482 | old_bo = mdp5_crtc->cursor.scanout_bo; |
448 | 483 | ||
484 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
485 | mdp5_crtc->cursor.width = width; | ||
486 | mdp5_crtc->cursor.height = height; | ||
487 | |||
488 | get_roi(crtc, &roi_w, &roi_h); | ||
489 | |||
449 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); | 490 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
450 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), | 491 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
451 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); | 492 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
453 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | | 494 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | |
454 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); | 495 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); |
455 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), | 496 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
456 | MDP5_LM_CURSOR_SIZE_ROI_H(height) | | 497 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
457 | MDP5_LM_CURSOR_SIZE_ROI_W(width)); | 498 | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); |
458 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); | 499 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); |
459 | 500 | ||
460 | |||
461 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; | 501 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; |
462 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; | ||
463 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); | 502 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); |
464 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); | 503 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); |
465 | 504 | ||
466 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
467 | mdp5_crtc->cursor.width = width; | ||
468 | mdp5_crtc->cursor.height = height; | ||
469 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 505 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
470 | 506 | ||
471 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); | 507 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); |
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
489 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | 525 | struct mdp5_kms *mdp5_kms = get_kms(crtc); |
490 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | 526 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
491 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 527 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
492 | uint32_t xres = crtc->mode.hdisplay; | ||
493 | uint32_t yres = crtc->mode.vdisplay; | ||
494 | uint32_t roi_w; | 528 | uint32_t roi_w; |
495 | uint32_t roi_h; | 529 | uint32_t roi_h; |
496 | unsigned long flags; | 530 | unsigned long flags; |
497 | 531 | ||
498 | x = (x > 0) ? x : 0; | 532 | /* In case the CRTC is disabled, just drop the cursor update */ |
499 | y = (y > 0) ? y : 0; | 533 | if (unlikely(!crtc->state->enable)) |
534 | return 0; | ||
500 | 535 | ||
501 | /* | 536 | mdp5_crtc->cursor.x = x = max(x, 0); |
502 | * Cursor Region Of Interest (ROI) is a plane read from cursor | 537 | mdp5_crtc->cursor.y = y = max(y, 0); |
503 | * buffer to render. The ROI region is determined by the visiblity of | 538 | |
504 | * the cursor point. In the default Cursor image the cursor point will | 539 | get_roi(crtc, &roi_w, &roi_h); |
505 | * be at the top left of the cursor image, unless it is specified | ||
506 | * otherwise using hotspot feature. | ||
507 | * | ||
508 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
509 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
510 | * width and ROI height need to be evaluated to crop the cursor image | ||
511 | * accordingly. | ||
512 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
513 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
514 | */ | ||
515 | roi_w = min(mdp5_crtc->cursor.width, xres - x); | ||
516 | roi_h = min(mdp5_crtc->cursor.height, yres - y); | ||
517 | 540 | ||
518 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 541 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
519 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), | 542 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), |
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { | |||
544 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | 567 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
545 | .mode_fixup = mdp5_crtc_mode_fixup, | 568 | .mode_fixup = mdp5_crtc_mode_fixup, |
546 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, | 569 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
547 | .prepare = mdp5_crtc_disable, | 570 | .disable = mdp5_crtc_disable, |
548 | .commit = mdp5_crtc_enable, | 571 | .enable = mdp5_crtc_enable, |
549 | .atomic_check = mdp5_crtc_atomic_check, | 572 | .atomic_check = mdp5_crtc_atomic_check, |
550 | .atomic_begin = mdp5_crtc_atomic_begin, | 573 | .atomic_begin = mdp5_crtc_atomic_begin, |
551 | .atomic_flush = mdp5_crtc_atomic_flush, | 574 | .atomic_flush = mdp5_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index d6a14bb99988..af0e02fa4f48 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); |
268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); | 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); |
269 | 269 | ||
270 | mdp5_encoder->enabled = false; | 270 | mdp5_encoder->enabled = true; |
271 | } | 271 | } |
272 | 272 | ||
273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
274 | .mode_fixup = mdp5_encoder_mode_fixup, | 274 | .mode_fixup = mdp5_encoder_mode_fixup, |
275 | .mode_set = mdp5_encoder_mode_set, | 275 | .mode_set = mdp5_encoder_mode_set, |
276 | .prepare = mdp5_encoder_disable, | 276 | .disable = mdp5_encoder_disable, |
277 | .commit = mdp5_encoder_enable, | 277 | .enable = mdp5_encoder_enable, |
278 | }; | 278 | }; |
279 | 279 | ||
280 | /* initialize encoder */ | 280 | /* initialize encoder */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 70ac81edd40f..a9407105b9b7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
34 | void mdp5_irq_preinstall(struct msm_kms *kms) | 34 | void mdp5_irq_preinstall(struct msm_kms *kms) |
35 | { | 35 | { |
36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
37 | mdp5_enable(mdp5_kms); | ||
37 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | 38 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); |
39 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
40 | mdp5_disable(mdp5_kms); | ||
38 | } | 41 | } |
39 | 42 | ||
40 | int mdp5_irq_postinstall(struct msm_kms *kms) | 43 | int mdp5_irq_postinstall(struct msm_kms *kms) |
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) | |||
57 | void mdp5_irq_uninstall(struct msm_kms *kms) | 60 | void mdp5_irq_uninstall(struct msm_kms *kms) |
58 | { | 61 | { |
59 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 62 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
63 | mdp5_enable(mdp5_kms); | ||
60 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | 64 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); |
65 | mdp5_disable(mdp5_kms); | ||
61 | } | 66 | } |
62 | 67 | ||
63 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | 68 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 871aa2108dc6..18fd643b6e69 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev, | |||
219 | * mark our set of crtc's as busy: | 219 | * mark our set of crtc's as busy: |
220 | */ | 220 | */ |
221 | ret = start_atomic(dev->dev_private, c->crtc_mask); | 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
222 | if (ret) | 222 | if (ret) { |
223 | kfree(c); | ||
223 | return ret; | 224 | return ret; |
225 | } | ||
224 | 226 | ||
225 | /* | 227 | /* |
226 | * This is the point of no return - everything below never fails except | 228 | * This is the point of no return - everything below never fails except |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 79924e4b1b49..6751553abe4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
418 | nouveau_fbcon_zfill(dev, fbcon); | 418 | nouveau_fbcon_zfill(dev, fbcon); |
419 | 419 | ||
420 | /* To allow resizeing without swapping buffers */ | 420 | /* To allow resizeing without swapping buffers */ |
421 | NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", | 421 | NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", |
422 | nouveau_fb->base.width, nouveau_fb->base.height, | 422 | nouveau_fb->base.width, nouveau_fb->base.height, |
423 | nvbo->bo.offset, nvbo); | 423 | nvbo->bo.offset, nvbo); |
424 | 424 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ed644a4f6f57..86807ee91bd1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1405 | (x << 16) | y); | 1405 | (x << 16) | y); |
1406 | viewport_w = crtc->mode.hdisplay; | 1406 | viewport_w = crtc->mode.hdisplay; |
1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
1408 | if ((rdev->family >= CHIP_BONAIRE) && | ||
1409 | (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) | ||
1410 | viewport_h *= 2; | ||
1408 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1411 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1409 | (viewport_w << 16) | viewport_h); | 1412 | (viewport_w << 16) | viewport_h); |
1410 | 1413 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7fe7b749e182..c39c1d0d9d4e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1626,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1626 | struct radeon_connector *radeon_connector = NULL; | 1626 | struct radeon_connector *radeon_connector = NULL; |
1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
1628 | bool travis_quirk = false; | 1628 | bool travis_quirk = false; |
1629 | int encoder_mode; | ||
1630 | 1629 | ||
1631 | if (connector) { | 1630 | if (connector) { |
1632 | radeon_connector = to_radeon_connector(connector); | 1631 | radeon_connector = to_radeon_connector(connector); |
@@ -1722,13 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1722 | } | 1721 | } |
1723 | break; | 1722 | break; |
1724 | } | 1723 | } |
1725 | |||
1726 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
1727 | if (connector && (radeon_audio != 0) && | ||
1728 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
1729 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
1730 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
1731 | radeon_audio_dpms(encoder, mode); | ||
1732 | } | 1724 | } |
1733 | 1725 | ||
1734 | static void | 1726 | static void |
@@ -1737,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1737 | struct drm_device *dev = encoder->dev; | 1729 | struct drm_device *dev = encoder->dev; |
1738 | struct radeon_device *rdev = dev->dev_private; | 1730 | struct radeon_device *rdev = dev->dev_private; |
1739 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1731 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1732 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1733 | int encoder_mode = atombios_get_encoder_mode(encoder); | ||
1740 | 1734 | ||
1741 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
1742 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
1743 | radeon_encoder->active_device); | 1737 | radeon_encoder->active_device); |
1738 | |||
1739 | if (connector && (radeon_audio != 0) && | ||
1740 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
1741 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
1742 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
1743 | radeon_audio_dpms(encoder, mode); | ||
1744 | |||
1744 | switch (radeon_encoder->encoder_id) { | 1745 | switch (radeon_encoder->encoder_id) { |
1745 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | 1746 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
1746 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 1747 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
@@ -2170,12 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2170 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 2171 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
2171 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 2172 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
2172 | /* handled in dpms */ | 2173 | /* handled in dpms */ |
2173 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2174 | if (connector && (radeon_audio != 0) && | ||
2175 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
2176 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
2177 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
2178 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2179 | break; | 2174 | break; |
2180 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2175 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
2181 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2176 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
@@ -2197,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2197 | } | 2192 | } |
2198 | 2193 | ||
2199 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 2194 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
2195 | |||
2196 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2197 | if (connector && (radeon_audio != 0) && | ||
2198 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
2199 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
2200 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
2201 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2200 | } | 2202 | } |
2201 | 2203 | ||
2202 | static bool | 2204 | static bool |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0c993da9c8fb..3e670d344a20 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev) | |||
7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | 7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); |
7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | 7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); |
7557 | 7557 | ||
7558 | /* posting read */ | ||
7559 | RREG32(SRBM_STATUS); | ||
7560 | |||
7558 | return 0; | 7561 | return 0; |
7559 | } | 7562 | } |
7560 | 7563 | ||
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 192c80389151..3adc2afe32aa 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include "radeon_audio.h" | 26 | #include "radeon_audio.h" |
27 | #include "sid.h" | 27 | #include "sid.h" |
28 | 28 | ||
29 | #define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 | ||
30 | #define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc | ||
31 | |||
29 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, | 32 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
30 | u32 block_offset, u32 reg) | 33 | u32 block_offset, u32 reg) |
31 | { | 34 | { |
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev, | |||
252 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, | 255 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
253 | struct radeon_crtc *crtc, unsigned int clock) | 256 | struct radeon_crtc *crtc, unsigned int clock) |
254 | { | 257 | { |
255 | /* Two dtos; generally use dto0 for HDMI */ | 258 | /* Two dtos; generally use dto0 for HDMI */ |
256 | u32 value = 0; | 259 | u32 value = 0; |
257 | 260 | ||
258 | if (crtc) | 261 | if (crtc) |
259 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 262 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
260 | 263 | ||
261 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 264 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
262 | 265 | ||
263 | /* Express [24MHz / target pixel clock] as an exact rational | 266 | /* Express [24MHz / target pixel clock] as an exact rational |
264 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 267 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
265 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 268 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
266 | */ | 269 | */ |
267 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | 270 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); |
268 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | 271 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
269 | } | 272 | } |
270 | 273 | ||
271 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
272 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
273 | { | 276 | { |
274 | /* Two dtos; generally use dto1 for DP */ | 277 | /* Two dtos; generally use dto1 for DP */ |
275 | u32 value = 0; | 278 | u32 value = 0; |
276 | value |= DCCG_AUDIO_DTO_SEL; | 279 | value |= DCCG_AUDIO_DTO_SEL; |
277 | 280 | ||
278 | if (crtc) | 281 | if (crtc) |
279 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 282 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
280 | 283 | ||
281 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 284 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
282 | 285 | ||
283 | /* Express [24MHz / target pixel clock] as an exact rational | 286 | /* Express [24MHz / target pixel clock] as an exact rational |
284 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 287 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
285 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 288 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
286 | */ | 289 | */ |
287 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 290 | if (ASIC_IS_DCE8(rdev)) { |
288 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 291 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
292 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | ||
293 | } else { | ||
294 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | ||
295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | ||
296 | } | ||
289 | } | 297 | } |
290 | 298 | ||
291 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 299 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable) |
292 | { | 300 | { |
293 | struct drm_device *dev = encoder->dev; | 301 | struct drm_device *dev = encoder->dev; |
294 | struct radeon_device *rdev = dev->dev_private; | 302 | struct radeon_device *rdev = dev->dev_private; |
295 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
296 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
297 | uint32_t offset; | ||
298 | 305 | ||
299 | if (!dig || !dig->afmt) | 306 | if (!dig || !dig->afmt) |
300 | return; | 307 | return; |
301 | 308 | ||
302 | offset = dig->afmt->offset; | ||
303 | |||
304 | if (enable) { | 309 | if (enable) { |
305 | if (dig->afmt->enabled) | 310 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
306 | return; | 311 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
307 | 312 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
308 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | 313 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
309 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 314 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
310 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 315 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
311 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 316 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
312 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | ||
313 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | ||
314 | radeon_audio_enable(rdev, dig->afmt->pin, true); | ||
315 | } else { | 317 | } else { |
316 | if (!dig->afmt->enabled) | 318 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
317 | return; | ||
318 | |||
319 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
320 | radeon_audio_enable(rdev, dig->afmt->pin, false); | ||
321 | } | 319 | } |
322 | 320 | ||
323 | dig->afmt->enabled = enable; | 321 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4c0e24b3bb90..973df064c14f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -4593,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); | 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); | 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
4595 | 4595 | ||
4596 | /* posting read */ | ||
4597 | RREG32(SRBM_STATUS); | ||
4598 | |||
4596 | return 0; | 4599 | return 0; |
4597 | } | 4600 | } |
4598 | 4601 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 1d9aebc79595..c18d4ecbd95d 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
275 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
276 | { | 276 | { |
277 | u32 value; | 277 | u32 value; |
278 | 278 | ||
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, | |||
294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
295 | */ | 295 | */ |
296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
297 | WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); | 297 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
298 | } | 298 | } |
299 | 299 | ||
300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) | 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) | |||
350 | struct drm_device *dev = encoder->dev; | 350 | struct drm_device *dev = encoder->dev; |
351 | struct radeon_device *rdev = dev->dev_private; | 351 | struct radeon_device *rdev = dev->dev_private; |
352 | 352 | ||
353 | WREG32(HDMI_INFOFRAME_CONTROL0 + offset, | ||
354 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
355 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | ||
356 | |||
357 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, | 353 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
358 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ | 354 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
359 | 355 | ||
360 | WREG32(HDMI_INFOFRAME_CONTROL1 + offset, | ||
361 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | ||
362 | |||
363 | WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | ||
364 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
365 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
366 | |||
367 | WREG32(AFMT_60958_0 + offset, | 356 | WREG32(AFMT_60958_0 + offset, |
368 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); | 357 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
369 | 358 | ||
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
408 | if (!dig || !dig->afmt) | 397 | if (!dig || !dig->afmt) |
409 | return; | 398 | return; |
410 | 399 | ||
411 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 400 | if (enable) { |
412 | if (enable && dig->afmt->enabled) | 401 | WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, |
413 | return; | 402 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
414 | if (!enable && !dig->afmt->enabled) | 403 | |
415 | return; | 404 | WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
405 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
406 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
416 | 407 | ||
417 | if (!enable && dig->afmt->pin) { | 408 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
418 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 409 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
419 | dig->afmt->pin = NULL; | 410 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
411 | } else { | ||
412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | ||
420 | } | 413 | } |
421 | 414 | ||
422 | dig->afmt->enabled = enable; | 415 | dig->afmt->enabled = enable; |
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
425 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); | 418 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
426 | } | 419 | } |
427 | 420 | ||
428 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 421 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
429 | { | 422 | { |
430 | struct drm_device *dev = encoder->dev; | 423 | struct drm_device *dev = encoder->dev; |
431 | struct radeon_device *rdev = dev->dev_private; | 424 | struct radeon_device *rdev = dev->dev_private; |
432 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
433 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
434 | uint32_t offset; | ||
435 | 427 | ||
436 | if (!dig || !dig->afmt) | 428 | if (!dig || !dig->afmt) |
437 | return; | 429 | return; |
438 | 430 | ||
439 | offset = dig->afmt->offset; | ||
440 | |||
441 | if (enable) { | 431 | if (enable) { |
442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
444 | struct radeon_connector_atom_dig *dig_connector; | 434 | struct radeon_connector_atom_dig *dig_connector; |
445 | uint32_t val; | 435 | uint32_t val; |
446 | 436 | ||
447 | if (dig->afmt->enabled) | 437 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
448 | return; | 438 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
449 | |||
450 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | ||
451 | 439 | ||
452 | if (radeon_connector->con_priv) { | 440 | if (radeon_connector->con_priv) { |
453 | dig_connector = radeon_connector->con_priv; | 441 | dig_connector = radeon_connector->con_priv; |
454 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); | 442 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
455 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); | 443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
456 | 444 | ||
457 | if (dig_connector->dp_clock == 162000) | 445 | if (dig_connector->dp_clock == 162000) |
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |||
459 | else | 447 | else |
460 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); | 448 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
461 | 449 | ||
462 | WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); | 450 | WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); |
463 | } | 451 | } |
464 | 452 | ||
465 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 453 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, |
466 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 454 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
467 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 455 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
468 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | 456 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
469 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | 457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
470 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
471 | } else { | 458 | } else { |
472 | if (!dig->afmt->enabled) | 459 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
473 | return; | ||
474 | |||
475 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
476 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
477 | } | 460 | } |
478 | 461 | ||
479 | dig->afmt->enabled = enable; | 462 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 279801ca5110..04f2514f7564 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) | |||
728 | tmp |= RADEON_FP2_DETECT_MASK; | 728 | tmp |= RADEON_FP2_DETECT_MASK; |
729 | } | 729 | } |
730 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 730 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
731 | |||
732 | /* read back to post the write */ | ||
733 | RREG32(RADEON_GEN_INT_CNTL); | ||
734 | |||
731 | return 0; | 735 | return 0; |
732 | } | 736 | } |
733 | 737 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 07a71a2488c9..2fcad344492f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); | 3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); |
3785 | } | 3785 | } |
3786 | 3786 | ||
3787 | /* posting read */ | ||
3788 | RREG32(R_000E50_SRBM_STATUS); | ||
3789 | |||
3787 | return 0; | 3790 | return 0; |
3788 | } | 3791 | } |
3789 | 3792 | ||
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 62c91ed669ce..dd6606b8e23c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
476 | if (!dig || !dig->afmt) | 476 | if (!dig || !dig->afmt) |
477 | return; | 477 | return; |
478 | 478 | ||
479 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
480 | if (enable && dig->afmt->enabled) | ||
481 | return; | ||
482 | if (!enable && !dig->afmt->enabled) | ||
483 | return; | ||
484 | |||
485 | if (!enable && dig->afmt->pin) { | ||
486 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
487 | dig->afmt->pin = NULL; | ||
488 | } | ||
489 | |||
490 | /* Older chipsets require setting HDMI and routing manually */ | 479 | /* Older chipsets require setting HDMI and routing manually */ |
491 | if (!ASIC_IS_DCE3(rdev)) { | 480 | if (!ASIC_IS_DCE3(rdev)) { |
492 | if (enable) | 481 | if (enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index a3ceef6d9632..b21ef69a34ac 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
101 | struct drm_display_mode *mode); | 101 | struct drm_display_mode *mode); |
102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
104 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); |
105 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 105 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable); |
106 | 106 | ||
107 | static const u32 pin_offsets[7] = | 107 | static const u32 pin_offsets[7] = |
108 | { | 108 | { |
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = { | |||
210 | .set_avi_packet = evergreen_set_avi_packet, | 210 | .set_avi_packet = evergreen_set_avi_packet, |
211 | .set_audio_packet = dce4_set_audio_packet, | 211 | .set_audio_packet = dce4_set_audio_packet, |
212 | .mode_set = radeon_audio_dp_mode_set, | 212 | .mode_set = radeon_audio_dp_mode_set, |
213 | .dpms = evergreen_enable_dp_audio_packets, | 213 | .dpms = evergreen_dp_enable, |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { | 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { |
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { | |||
240 | .set_avi_packet = evergreen_set_avi_packet, | 240 | .set_avi_packet = evergreen_set_avi_packet, |
241 | .set_audio_packet = dce4_set_audio_packet, | 241 | .set_audio_packet = dce4_set_audio_packet, |
242 | .mode_set = radeon_audio_dp_mode_set, | 242 | .mode_set = radeon_audio_dp_mode_set, |
243 | .dpms = dce6_enable_dp_audio_packets, | 243 | .dpms = dce6_dp_enable, |
244 | }; | 244 | }; |
245 | 245 | ||
246 | static void radeon_audio_interface_init(struct radeon_device *rdev) | 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | void radeon_audio_detect(struct drm_connector *connector, | 454 | void radeon_audio_detect(struct drm_connector *connector, |
455 | enum drm_connector_status status) | 455 | enum drm_connector_status status) |
456 | { | 456 | { |
457 | struct radeon_device *rdev; | 457 | struct radeon_device *rdev; |
458 | struct radeon_encoder *radeon_encoder; | 458 | struct radeon_encoder *radeon_encoder; |
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
483 | else | 483 | else |
484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
485 | 485 | ||
486 | radeon_audio_write_speaker_allocation(connector->encoder); | 486 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
487 | radeon_audio_write_sad_regs(connector->encoder); | ||
488 | if (connector->encoder->crtc) | ||
489 | radeon_audio_write_latency_fields(connector->encoder, | ||
490 | &connector->encoder->crtc->mode); | ||
491 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | 487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
492 | } else { | 488 | } else { |
493 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
490 | dig->afmt->pin = NULL; | ||
494 | } | 491 | } |
495 | } | 492 | } |
496 | 493 | ||
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute) | |||
694 | * update the info frames with the data from the current display mode | 691 | * update the info frames with the data from the current display mode |
695 | */ | 692 | */ |
696 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | 693 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, |
697 | struct drm_display_mode *mode) | 694 | struct drm_display_mode *mode) |
698 | { | 695 | { |
699 | struct radeon_device *rdev = encoder->dev->dev_private; | ||
700 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 696 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
701 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 697 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
702 | 698 | ||
703 | if (!dig || !dig->afmt) | 699 | if (!dig || !dig->afmt) |
704 | return; | 700 | return; |
705 | 701 | ||
706 | /* disable audio prior to setting up hw */ | 702 | radeon_audio_set_mute(encoder, true); |
707 | dig->afmt->pin = radeon_audio_get_pin(encoder); | ||
708 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
709 | 703 | ||
704 | radeon_audio_write_speaker_allocation(encoder); | ||
705 | radeon_audio_write_sad_regs(encoder); | ||
706 | radeon_audio_write_latency_fields(encoder, mode); | ||
710 | radeon_audio_set_dto(encoder, mode->clock); | 707 | radeon_audio_set_dto(encoder, mode->clock); |
711 | radeon_audio_set_vbi_packet(encoder); | 708 | radeon_audio_set_vbi_packet(encoder); |
712 | radeon_hdmi_set_color_depth(encoder); | 709 | radeon_hdmi_set_color_depth(encoder); |
713 | radeon_audio_set_mute(encoder, false); | ||
714 | radeon_audio_update_acr(encoder, mode->clock); | 710 | radeon_audio_update_acr(encoder, mode->clock); |
715 | radeon_audio_set_audio_packet(encoder); | 711 | radeon_audio_set_audio_packet(encoder); |
716 | radeon_audio_select_pin(encoder); | 712 | radeon_audio_select_pin(encoder); |
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | |||
718 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 714 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
719 | return; | 715 | return; |
720 | 716 | ||
721 | /* enable audio after to setting up hw */ | 717 | radeon_audio_set_mute(encoder, false); |
722 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
723 | } | 718 | } |
724 | 719 | ||
725 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | 720 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, |
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
729 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 725 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 726 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
727 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
728 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
729 | struct radeon_connector_atom_dig *dig_connector = | ||
730 | radeon_connector->con_priv; | ||
732 | 731 | ||
733 | if (!dig || !dig->afmt) | 732 | if (!dig || !dig->afmt) |
734 | return; | 733 | return; |
735 | 734 | ||
736 | /* disable audio prior to setting up hw */ | 735 | radeon_audio_write_speaker_allocation(encoder); |
737 | dig->afmt->pin = radeon_audio_get_pin(encoder); | 736 | radeon_audio_write_sad_regs(encoder); |
738 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 737 | radeon_audio_write_latency_fields(encoder, mode); |
739 | 738 | if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | |
740 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | 739 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); |
740 | else | ||
741 | radeon_audio_set_dto(encoder, dig_connector->dp_clock); | ||
741 | radeon_audio_set_audio_packet(encoder); | 742 | radeon_audio_set_audio_packet(encoder); |
742 | radeon_audio_select_pin(encoder); | 743 | radeon_audio_select_pin(encoder); |
743 | 744 | ||
744 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 745 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
745 | return; | 746 | return; |
746 | |||
747 | /* enable audio after to setting up hw */ | ||
748 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
749 | } | 747 | } |
750 | 748 | ||
751 | void radeon_audio_mode_set(struct drm_encoder *encoder, | 749 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a579ed379f20..4d0f96cc3da4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
256 | u32 ring = RADEON_CS_RING_GFX; | 256 | u32 ring = RADEON_CS_RING_GFX; |
257 | s32 priority = 0; | 257 | s32 priority = 0; |
258 | 258 | ||
259 | INIT_LIST_HEAD(&p->validated); | ||
260 | |||
259 | if (!cs->num_chunks) { | 261 | if (!cs->num_chunks) { |
260 | return 0; | 262 | return 0; |
261 | } | 263 | } |
264 | |||
262 | /* get chunks */ | 265 | /* get chunks */ |
263 | INIT_LIST_HEAD(&p->validated); | ||
264 | p->idx = 0; | 266 | p->idx = 0; |
265 | p->ib.sa_bo = NULL; | 267 | p->ib.sa_bo = NULL; |
266 | p->const_ib.sa_bo = NULL; | 268 | p->const_ib.sa_bo = NULL; |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d81182ad53ec..97a904835759 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | 694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
695 | if (ASIC_IS_DCE2(rdev)) | 695 | if (ASIC_IS_DCE2(rdev)) |
696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); | 696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
697 | |||
698 | /* posting read */ | ||
699 | RREG32(R_000040_GEN_INT_CNTL); | ||
700 | |||
697 | return 0; | 701 | return 0; |
698 | } | 702 | } |
699 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index bcf516a8a2f1..e088e5558da0 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -6203,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev) | |||
6203 | 6203 | ||
6204 | WREG32(CG_THERMAL_INT, thermal_int); | 6204 | WREG32(CG_THERMAL_INT, thermal_int); |
6205 | 6205 | ||
6206 | /* posting read */ | ||
6207 | RREG32(SRBM_STATUS); | ||
6208 | |||
6206 | return 0; | 6209 | return 0; |
6207 | } | 6210 | } |
6208 | 6211 | ||
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index c27118cab16a..99a9835c9f61 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -912,8 +912,8 @@ | |||
912 | 912 | ||
913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 | 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
915 | #define DCCG_AUDIO_DTO1_PHASE 0x05b8 | 915 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
916 | #define DCCG_AUDIO_DTO1_MODULE 0x05bc | 916 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
917 | 917 | ||
918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c | 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c |
919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) | 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..8d9b7de25613 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
74 | pr_err(" has_type: %d\n", man->has_type); | 74 | pr_err(" has_type: %d\n", man->has_type); |
75 | pr_err(" use_type: %d\n", man->use_type); | 75 | pr_err(" use_type: %d\n", man->use_type); |
76 | pr_err(" flags: 0x%08X\n", man->flags); | 76 | pr_err(" flags: 0x%08X\n", man->flags); |
77 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); | 77 | pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); |
78 | pr_err(" size: %llu\n", man->size); | 78 | pr_err(" size: %llu\n", man->size); |
79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); | 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); | 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b61d6be97602..3ddfb3d0b64d 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c | |||
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di, | |||
459 | 459 | ||
460 | clkrate = clk_get_rate(di->clk_ipu); | 460 | clkrate = clk_get_rate(di->clk_ipu); |
461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); | 461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); |
462 | if (div == 0) | ||
463 | div = 1; | ||
462 | rate = clkrate / div; | 464 | rate = clkrate / div; |
463 | 465 | ||
464 | error = rate / (sig->mode.pixelclock / 1000); | 466 | error = rate / (sig->mode.pixelclock / 1000); |
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index a24addfdfcec..0de6290df4da 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h | |||
@@ -68,8 +68,8 @@ struct drm_mm_node { | |||
68 | unsigned scanned_preceeds_hole : 1; | 68 | unsigned scanned_preceeds_hole : 1; |
69 | unsigned allocated : 1; | 69 | unsigned allocated : 1; |
70 | unsigned long color; | 70 | unsigned long color; |
71 | unsigned long start; | 71 | u64 start; |
72 | unsigned long size; | 72 | u64 size; |
73 | struct drm_mm *mm; | 73 | struct drm_mm *mm; |
74 | }; | 74 | }; |
75 | 75 | ||
@@ -82,16 +82,16 @@ struct drm_mm { | |||
82 | unsigned int scan_check_range : 1; | 82 | unsigned int scan_check_range : 1; |
83 | unsigned scan_alignment; | 83 | unsigned scan_alignment; |
84 | unsigned long scan_color; | 84 | unsigned long scan_color; |
85 | unsigned long scan_size; | 85 | u64 scan_size; |
86 | unsigned long scan_hit_start; | 86 | u64 scan_hit_start; |
87 | unsigned long scan_hit_end; | 87 | u64 scan_hit_end; |
88 | unsigned scanned_blocks; | 88 | unsigned scanned_blocks; |
89 | unsigned long scan_start; | 89 | u64 scan_start; |
90 | unsigned long scan_end; | 90 | u64 scan_end; |
91 | struct drm_mm_node *prev_scanned_node; | 91 | struct drm_mm_node *prev_scanned_node; |
92 | 92 | ||
93 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, | 93 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, |
94 | unsigned long *start, unsigned long *end); | 94 | u64 *start, u64 *end); |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /** | 97 | /** |
@@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) | |||
124 | return mm->hole_stack.next; | 124 | return mm->hole_stack.next; |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 127 | static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
128 | { | 128 | { |
129 | return hole_node->start + hole_node->size; | 129 | return hole_node->start + hole_node->size; |
130 | } | 130 | } |
@@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no | |||
140 | * Returns: | 140 | * Returns: |
141 | * Start of the subsequent hole. | 141 | * Start of the subsequent hole. |
142 | */ | 142 | */ |
143 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 143 | static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
144 | { | 144 | { |
145 | BUG_ON(!hole_node->hole_follows); | 145 | BUG_ON(!hole_node->hole_follows); |
146 | return __drm_mm_hole_node_start(hole_node); | 146 | return __drm_mm_hole_node_start(hole_node); |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 149 | static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
150 | { | 150 | { |
151 | return list_entry(hole_node->node_list.next, | 151 | return list_entry(hole_node->node_list.next, |
152 | struct drm_mm_node, node_list)->start; | 152 | struct drm_mm_node, node_list)->start; |
@@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node | |||
163 | * Returns: | 163 | * Returns: |
164 | * End of the subsequent hole. | 164 | * End of the subsequent hole. |
165 | */ | 165 | */ |
166 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 166 | static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
167 | { | 167 | { |
168 | return __drm_mm_hole_node_end(hole_node); | 168 | return __drm_mm_hole_node_end(hole_node); |
169 | } | 169 | } |
@@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); | |||
222 | 222 | ||
223 | int drm_mm_insert_node_generic(struct drm_mm *mm, | 223 | int drm_mm_insert_node_generic(struct drm_mm *mm, |
224 | struct drm_mm_node *node, | 224 | struct drm_mm_node *node, |
225 | unsigned long size, | 225 | u64 size, |
226 | unsigned alignment, | 226 | unsigned alignment, |
227 | unsigned long color, | 227 | unsigned long color, |
228 | enum drm_mm_search_flags sflags, | 228 | enum drm_mm_search_flags sflags, |
@@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, | |||
245 | */ | 245 | */ |
246 | static inline int drm_mm_insert_node(struct drm_mm *mm, | 246 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
247 | struct drm_mm_node *node, | 247 | struct drm_mm_node *node, |
248 | unsigned long size, | 248 | u64 size, |
249 | unsigned alignment, | 249 | unsigned alignment, |
250 | enum drm_mm_search_flags flags) | 250 | enum drm_mm_search_flags flags) |
251 | { | 251 | { |
@@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, | |||
255 | 255 | ||
256 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, | 256 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
257 | struct drm_mm_node *node, | 257 | struct drm_mm_node *node, |
258 | unsigned long size, | 258 | u64 size, |
259 | unsigned alignment, | 259 | unsigned alignment, |
260 | unsigned long color, | 260 | unsigned long color, |
261 | unsigned long start, | 261 | u64 start, |
262 | unsigned long end, | 262 | u64 end, |
263 | enum drm_mm_search_flags sflags, | 263 | enum drm_mm_search_flags sflags, |
264 | enum drm_mm_allocator_flags aflags); | 264 | enum drm_mm_allocator_flags aflags); |
265 | /** | 265 | /** |
@@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, | |||
282 | */ | 282 | */ |
283 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | 283 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
284 | struct drm_mm_node *node, | 284 | struct drm_mm_node *node, |
285 | unsigned long size, | 285 | u64 size, |
286 | unsigned alignment, | 286 | unsigned alignment, |
287 | unsigned long start, | 287 | u64 start, |
288 | unsigned long end, | 288 | u64 end, |
289 | enum drm_mm_search_flags flags) | 289 | enum drm_mm_search_flags flags) |
290 | { | 290 | { |
291 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, | 291 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, |
@@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | |||
296 | void drm_mm_remove_node(struct drm_mm_node *node); | 296 | void drm_mm_remove_node(struct drm_mm_node *node); |
297 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); | 297 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
298 | void drm_mm_init(struct drm_mm *mm, | 298 | void drm_mm_init(struct drm_mm *mm, |
299 | unsigned long start, | 299 | u64 start, |
300 | unsigned long size); | 300 | u64 size); |
301 | void drm_mm_takedown(struct drm_mm *mm); | 301 | void drm_mm_takedown(struct drm_mm *mm); |
302 | bool drm_mm_clean(struct drm_mm *mm); | 302 | bool drm_mm_clean(struct drm_mm *mm); |
303 | 303 | ||
304 | void drm_mm_init_scan(struct drm_mm *mm, | 304 | void drm_mm_init_scan(struct drm_mm *mm, |
305 | unsigned long size, | 305 | u64 size, |
306 | unsigned alignment, | 306 | unsigned alignment, |
307 | unsigned long color); | 307 | unsigned long color); |
308 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 308 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
309 | unsigned long size, | 309 | u64 size, |
310 | unsigned alignment, | 310 | unsigned alignment, |
311 | unsigned long color, | 311 | unsigned long color, |
312 | unsigned long start, | 312 | u64 start, |
313 | unsigned long end); | 313 | u64 end); |
314 | bool drm_mm_scan_add_block(struct drm_mm_node *node); | 314 | bool drm_mm_scan_add_block(struct drm_mm_node *node); |
315 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); | 315 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); |
316 | 316 | ||
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0ccf7f267ff9..c768ddfbe53c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -249,7 +249,7 @@ struct ttm_buffer_object { | |||
249 | * either of these locks held. | 249 | * either of these locks held. |
250 | */ | 250 | */ |
251 | 251 | ||
252 | unsigned long offset; | 252 | uint64_t offset; /* GPU address space is independent of CPU word size */ |
253 | uint32_t cur_placement; | 253 | uint32_t cur_placement; |
254 | 254 | ||
255 | struct sg_table *sg; | 255 | struct sg_table *sg; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 142d752fc450..813042cede57 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -277,7 +277,7 @@ struct ttm_mem_type_manager { | |||
277 | bool has_type; | 277 | bool has_type; |
278 | bool use_type; | 278 | bool use_type; |
279 | uint32_t flags; | 279 | uint32_t flags; |
280 | unsigned long gpu_offset; | 280 | uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ |
281 | uint64_t size; | 281 | uint64_t size; |
282 | uint32_t available_caching; | 282 | uint32_t available_caching; |
283 | uint32_t default_caching; | 283 | uint32_t default_caching; |