aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorEric Bernstein <eric.bernstein@amd.com>2018-04-05 17:09:20 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-05-15 14:43:25 -0400
commit5ebfb7a5996ea1dceeb2a392d7e46357042e4506 (patch)
tree9c01d56d76ee65df98c2c163ad8f897476e0729a /drivers/gpu
parentc4b0faae71f33377a11fe19dadcce6deb86f5037 (diff)
drm/amd/display: Move DCC support functions into dchubbub
Added dchububu.h header file for common enum/struct definitions. Added new interface functions get_dcc_compression_cap, dcc_support_swizzle, dcc_support_pixel_format. Signed-off-by: Eric Bernstein <eric.bernstein@amd.com> Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com> Acked-by: Harry Wentland <harry.wentland@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c221
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c231
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h64
4 files changed, 291 insertions, 232 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 738f67ffd1b4..b9fb14a3224b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
476 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); 476 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
477} 477}
478 478
479static bool hubbub1_dcc_support_swizzle(
480 enum swizzle_mode_values swizzle,
481 unsigned int bytes_per_element,
482 enum segment_order *segment_order_horz,
483 enum segment_order *segment_order_vert)
484{
485 bool standard_swizzle = false;
486 bool display_swizzle = false;
487
488 switch (swizzle) {
489 case DC_SW_4KB_S:
490 case DC_SW_64KB_S:
491 case DC_SW_VAR_S:
492 case DC_SW_4KB_S_X:
493 case DC_SW_64KB_S_X:
494 case DC_SW_VAR_S_X:
495 standard_swizzle = true;
496 break;
497 case DC_SW_4KB_D:
498 case DC_SW_64KB_D:
499 case DC_SW_VAR_D:
500 case DC_SW_4KB_D_X:
501 case DC_SW_64KB_D_X:
502 case DC_SW_VAR_D_X:
503 display_swizzle = true;
504 break;
505 default:
506 break;
507 }
508
509 if (bytes_per_element == 1 && standard_swizzle) {
510 *segment_order_horz = segment_order__contiguous;
511 *segment_order_vert = segment_order__na;
512 return true;
513 }
514 if (bytes_per_element == 2 && standard_swizzle) {
515 *segment_order_horz = segment_order__non_contiguous;
516 *segment_order_vert = segment_order__contiguous;
517 return true;
518 }
519 if (bytes_per_element == 4 && standard_swizzle) {
520 *segment_order_horz = segment_order__non_contiguous;
521 *segment_order_vert = segment_order__contiguous;
522 return true;
523 }
524 if (bytes_per_element == 8 && standard_swizzle) {
525 *segment_order_horz = segment_order__na;
526 *segment_order_vert = segment_order__contiguous;
527 return true;
528 }
529 if (bytes_per_element == 8 && display_swizzle) {
530 *segment_order_horz = segment_order__contiguous;
531 *segment_order_vert = segment_order__non_contiguous;
532 return true;
533 }
534
535 return false;
536}
537
538static bool hubbub1_dcc_support_pixel_format(
539 enum surface_pixel_format format,
540 unsigned int *bytes_per_element)
541{
542 /* DML: get_bytes_per_element */
543 switch (format) {
544 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
545 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
546 *bytes_per_element = 2;
547 return true;
548 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
549 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
550 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
551 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
552 *bytes_per_element = 4;
553 return true;
554 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
555 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
556 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
557 *bytes_per_element = 8;
558 return true;
559 default:
560 return false;
561 }
562}
563
564static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
565 unsigned int bytes_per_element)
566{
567 /* copied from DML. might want to refactor DML to leverage from DML */
568 /* DML : get_blk256_size */
569 if (bytes_per_element == 1) {
570 *blk256_width = 16;
571 *blk256_height = 16;
572 } else if (bytes_per_element == 2) {
573 *blk256_width = 16;
574 *blk256_height = 8;
575 } else if (bytes_per_element == 4) {
576 *blk256_width = 8;
577 *blk256_height = 8;
578 } else if (bytes_per_element == 8) {
579 *blk256_width = 8;
580 *blk256_height = 4;
581 }
582}
583
584static void hubbub1_det_request_size(
585 unsigned int height,
586 unsigned int width,
587 unsigned int bpe,
588 bool *req128_horz_wc,
589 bool *req128_vert_wc)
590{
591 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
592
593 unsigned int blk256_height = 0;
594 unsigned int blk256_width = 0;
595 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
596
597 hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
598
599 swath_bytes_horz_wc = height * blk256_height * bpe;
600 swath_bytes_vert_wc = width * blk256_width * bpe;
601
602 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
603 false : /* full 256B request */
604 true; /* half 128b request */
605
606 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
607 false : /* full 256B request */
608 true; /* half 128b request */
609}
610
611static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
612 const struct dc_dcc_surface_param *input,
613 struct dc_surface_dcc_cap *output)
614{
615 struct dc *dc = hubbub->ctx->dc;
616 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
617 enum dcc_control dcc_control;
618 unsigned int bpe;
619 enum segment_order segment_order_horz, segment_order_vert;
620 bool req128_horz_wc, req128_vert_wc;
621
622 memset(output, 0, sizeof(*output));
623
624 if (dc->debug.disable_dcc == DCC_DISABLE)
625 return false;
626
627 if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
628 return false;
629
630 if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
631 &segment_order_horz, &segment_order_vert))
632 return false;
633
634 hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
635 bpe, &req128_horz_wc, &req128_vert_wc);
636
637 if (!req128_horz_wc && !req128_vert_wc) {
638 dcc_control = dcc_control__256_256_xxx;
639 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
640 if (!req128_horz_wc)
641 dcc_control = dcc_control__256_256_xxx;
642 else if (segment_order_horz == segment_order__contiguous)
643 dcc_control = dcc_control__128_128_xxx;
644 else
645 dcc_control = dcc_control__256_64_64;
646 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
647 if (!req128_vert_wc)
648 dcc_control = dcc_control__256_256_xxx;
649 else if (segment_order_vert == segment_order__contiguous)
650 dcc_control = dcc_control__128_128_xxx;
651 else
652 dcc_control = dcc_control__256_64_64;
653 } else {
654 if ((req128_horz_wc &&
655 segment_order_horz == segment_order__non_contiguous) ||
656 (req128_vert_wc &&
657 segment_order_vert == segment_order__non_contiguous))
658 /* access_dir not known, must use most constraining */
659 dcc_control = dcc_control__256_64_64;
660 else
661 /* reg128 is true for either horz and vert
662 * but segment_order is contiguous
663 */
664 dcc_control = dcc_control__128_128_xxx;
665 }
666
667 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
668 dcc_control != dcc_control__256_256_xxx)
669 return false;
670
671 switch (dcc_control) {
672 case dcc_control__256_256_xxx:
673 output->grph.rgb.max_uncompressed_blk_size = 256;
674 output->grph.rgb.max_compressed_blk_size = 256;
675 output->grph.rgb.independent_64b_blks = false;
676 break;
677 case dcc_control__128_128_xxx:
678 output->grph.rgb.max_uncompressed_blk_size = 128;
679 output->grph.rgb.max_compressed_blk_size = 128;
680 output->grph.rgb.independent_64b_blks = false;
681 break;
682 case dcc_control__256_64_64:
683 output->grph.rgb.max_uncompressed_blk_size = 256;
684 output->grph.rgb.max_compressed_blk_size = 64;
685 output->grph.rgb.independent_64b_blks = true;
686 break;
687 }
688
689 output->capable = true;
690 output->const_color_support = false;
691
692 return true;
693}
694
479static const struct hubbub_funcs hubbub1_funcs = { 695static const struct hubbub_funcs hubbub1_funcs = {
480 .update_dchub = hubbub1_update_dchub 696 .update_dchub = hubbub1_update_dchub,
697 .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
698 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
699 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
481}; 700};
482 701
483void hubbub1_construct(struct hubbub *hubbub, 702void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index a16e908821a0..f479f54e5bb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -27,6 +27,7 @@
27#define __DC_HUBBUB_DCN10_H__ 27#define __DC_HUBBUB_DCN10_H__
28 28
29#include "core_types.h" 29#include "core_types.h"
30#include "dchubbub.h"
30 31
31#define HUBHUB_REG_LIST_DCN()\ 32#define HUBHUB_REG_LIST_DCN()\
32 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ 33 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
173 struct dcn_hubbub_wm_set sets[4]; 174 struct dcn_hubbub_wm_set sets[4];
174}; 175};
175 176
176struct hubbub_funcs {
177 void (*update_dchub)(
178 struct hubbub *hubbub,
179 struct dchub_init_data *dh_data);
180};
181
182struct hubbub { 177struct hubbub {
183 const struct hubbub_funcs *funcs; 178 const struct hubbub_funcs *funcs;
184 struct dc_context *ctx; 179 struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f305f65675d8..2c0a3150bf2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -937,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
937 return idle_pipe; 937 return idle_pipe;
938} 938}
939 939
940enum dcc_control { 940static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
941 dcc_control__256_256_xxx,
942 dcc_control__128_128_xxx,
943 dcc_control__256_64_64,
944};
945
946enum segment_order {
947 segment_order__na,
948 segment_order__contiguous,
949 segment_order__non_contiguous,
950};
951
952static bool dcc_support_pixel_format(
953 enum surface_pixel_format format,
954 unsigned int *bytes_per_element)
955{
956 /* DML: get_bytes_per_element */
957 switch (format) {
958 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
959 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
960 *bytes_per_element = 2;
961 return true;
962 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
963 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
964 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
965 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
966 *bytes_per_element = 4;
967 return true;
968 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
969 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
970 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
971 *bytes_per_element = 8;
972 return true;
973 default:
974 return false;
975 }
976}
977
978static bool dcc_support_swizzle(
979 enum swizzle_mode_values swizzle,
980 unsigned int bytes_per_element,
981 enum segment_order *segment_order_horz,
982 enum segment_order *segment_order_vert)
983{
984 bool standard_swizzle = false;
985 bool display_swizzle = false;
986
987 switch (swizzle) {
988 case DC_SW_4KB_S:
989 case DC_SW_64KB_S:
990 case DC_SW_VAR_S:
991 case DC_SW_4KB_S_X:
992 case DC_SW_64KB_S_X:
993 case DC_SW_VAR_S_X:
994 standard_swizzle = true;
995 break;
996 case DC_SW_4KB_D:
997 case DC_SW_64KB_D:
998 case DC_SW_VAR_D:
999 case DC_SW_4KB_D_X:
1000 case DC_SW_64KB_D_X:
1001 case DC_SW_VAR_D_X:
1002 display_swizzle = true;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 if (bytes_per_element == 1 && standard_swizzle) {
1009 *segment_order_horz = segment_order__contiguous;
1010 *segment_order_vert = segment_order__na;
1011 return true;
1012 }
1013 if (bytes_per_element == 2 && standard_swizzle) {
1014 *segment_order_horz = segment_order__non_contiguous;
1015 *segment_order_vert = segment_order__contiguous;
1016 return true;
1017 }
1018 if (bytes_per_element == 4 && standard_swizzle) {
1019 *segment_order_horz = segment_order__non_contiguous;
1020 *segment_order_vert = segment_order__contiguous;
1021 return true;
1022 }
1023 if (bytes_per_element == 8 && standard_swizzle) {
1024 *segment_order_horz = segment_order__na;
1025 *segment_order_vert = segment_order__contiguous;
1026 return true;
1027 }
1028 if (bytes_per_element == 8 && display_swizzle) {
1029 *segment_order_horz = segment_order__contiguous;
1030 *segment_order_vert = segment_order__non_contiguous;
1031 return true;
1032 }
1033
1034 return false;
1035}
1036
1037static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1038 unsigned int bytes_per_element)
1039{
1040 /* copied from DML. might want to refactor DML to leverage from DML */
1041 /* DML : get_blk256_size */
1042 if (bytes_per_element == 1) {
1043 *blk256_width = 16;
1044 *blk256_height = 16;
1045 } else if (bytes_per_element == 2) {
1046 *blk256_width = 16;
1047 *blk256_height = 8;
1048 } else if (bytes_per_element == 4) {
1049 *blk256_width = 8;
1050 *blk256_height = 8;
1051 } else if (bytes_per_element == 8) {
1052 *blk256_width = 8;
1053 *blk256_height = 4;
1054 }
1055}
1056
1057static void det_request_size(
1058 unsigned int height,
1059 unsigned int width,
1060 unsigned int bpe,
1061 bool *req128_horz_wc,
1062 bool *req128_vert_wc)
1063{
1064 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
1065
1066 unsigned int blk256_height = 0;
1067 unsigned int blk256_width = 0;
1068 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1069
1070 get_blk256_size(&blk256_width, &blk256_height, bpe);
1071
1072 swath_bytes_horz_wc = height * blk256_height * bpe;
1073 swath_bytes_vert_wc = width * blk256_width * bpe;
1074
1075 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1076 false : /* full 256B request */
1077 true; /* half 128b request */
1078
1079 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1080 false : /* full 256B request */
1081 true; /* half 128b request */
1082}
1083
1084static bool get_dcc_compression_cap(const struct dc *dc,
1085 const struct dc_dcc_surface_param *input, 941 const struct dc_dcc_surface_param *input,
1086 struct dc_surface_dcc_cap *output) 942 struct dc_surface_dcc_cap *output)
1087{ 943{
1088 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ 944 return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
1089 enum dcc_control dcc_control; 945 dc->res_pool->hubbub,
1090 unsigned int bpe; 946 input,
1091 enum segment_order segment_order_horz, segment_order_vert; 947 output);
1092 bool req128_horz_wc, req128_vert_wc;
1093
1094 memset(output, 0, sizeof(*output));
1095
1096 if (dc->debug.disable_dcc == DCC_DISABLE)
1097 return false;
1098
1099 if (!dcc_support_pixel_format(input->format,
1100 &bpe))
1101 return false;
1102
1103 if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1104 &segment_order_horz, &segment_order_vert))
1105 return false;
1106
1107 det_request_size(input->surface_size.height, input->surface_size.width,
1108 bpe, &req128_horz_wc, &req128_vert_wc);
1109
1110 if (!req128_horz_wc && !req128_vert_wc) {
1111 dcc_control = dcc_control__256_256_xxx;
1112 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1113 if (!req128_horz_wc)
1114 dcc_control = dcc_control__256_256_xxx;
1115 else if (segment_order_horz == segment_order__contiguous)
1116 dcc_control = dcc_control__128_128_xxx;
1117 else
1118 dcc_control = dcc_control__256_64_64;
1119 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1120 if (!req128_vert_wc)
1121 dcc_control = dcc_control__256_256_xxx;
1122 else if (segment_order_vert == segment_order__contiguous)
1123 dcc_control = dcc_control__128_128_xxx;
1124 else
1125 dcc_control = dcc_control__256_64_64;
1126 } else {
1127 if ((req128_horz_wc &&
1128 segment_order_horz == segment_order__non_contiguous) ||
1129 (req128_vert_wc &&
1130 segment_order_vert == segment_order__non_contiguous))
1131 /* access_dir not known, must use most constraining */
1132 dcc_control = dcc_control__256_64_64;
1133 else
1134 /* reg128 is true for either horz and vert
1135 * but segment_order is contiguous
1136 */
1137 dcc_control = dcc_control__128_128_xxx;
1138 }
1139
1140 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
1141 dcc_control != dcc_control__256_256_xxx)
1142 return false;
1143
1144 switch (dcc_control) {
1145 case dcc_control__256_256_xxx:
1146 output->grph.rgb.max_uncompressed_blk_size = 256;
1147 output->grph.rgb.max_compressed_blk_size = 256;
1148 output->grph.rgb.independent_64b_blks = false;
1149 break;
1150 case dcc_control__128_128_xxx:
1151 output->grph.rgb.max_uncompressed_blk_size = 128;
1152 output->grph.rgb.max_compressed_blk_size = 128;
1153 output->grph.rgb.independent_64b_blks = false;
1154 break;
1155 case dcc_control__256_64_64:
1156 output->grph.rgb.max_uncompressed_blk_size = 256;
1157 output->grph.rgb.max_compressed_blk_size = 64;
1158 output->grph.rgb.independent_64b_blks = true;
1159 break;
1160 }
1161
1162 output->capable = true;
1163 output->const_color_support = false;
1164
1165 return true;
1166} 948}
1167 949
1168
1169static void dcn10_destroy_resource_pool(struct resource_pool **pool) 950static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1170{ 951{
1171 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); 952 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
@@ -1186,7 +967,7 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
1186} 967}
1187 968
1188static struct dc_cap_funcs cap_funcs = { 969static struct dc_cap_funcs cap_funcs = {
1189 .get_dcc_compression_cap = get_dcc_compression_cap 970 .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
1190}; 971};
1191 972
1192static struct resource_funcs dcn10_res_pool_funcs = { 973static struct resource_funcs dcn10_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
new file mode 100644
index 000000000000..02f757dd70d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DCHUBBUB_H__
27#define __DAL_DCHUBBUB_H__
28
29
30enum dcc_control {
31 dcc_control__256_256_xxx,
32 dcc_control__128_128_xxx,
33 dcc_control__256_64_64,
34};
35
36enum segment_order {
37 segment_order__na,
38 segment_order__contiguous,
39 segment_order__non_contiguous,
40};
41
42
43struct hubbub_funcs {
44 void (*update_dchub)(
45 struct hubbub *hubbub,
46 struct dchub_init_data *dh_data);
47
48 bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
49 const struct dc_dcc_surface_param *input,
50 struct dc_surface_dcc_cap *output);
51
52 bool (*dcc_support_swizzle)(
53 enum swizzle_mode_values swizzle,
54 unsigned int bytes_per_element,
55 enum segment_order *segment_order_horz,
56 enum segment_order *segment_order_vert);
57
58 bool (*dcc_support_pixel_format)(
59 enum surface_pixel_format format,
60 unsigned int *bytes_per_element);
61};
62
63
64#endif