summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2015-08-18 11:16:46 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:07 -0500
commit48cbfac597021ce163d4df997cdbff2f2a73eb88 (patch)
tree7b329a1934cd8428dfd4e02df43c1ce22bada072 /drivers/gpu/nvgpu/gp10b/cde_gp10b.c
parent5df1bc28b3708427db3f3dabe78ea864ff085183 (diff)
gpu: nvgpu: Add CDE scatter buffer code for GP10B
Add GP10B-specific code for populating the scatter buffer. Essentially, this enables the use of SMMU bypass mode with 4-kB page compression. Bug 1604102 Change-Id: Ic586e2f93827b9aa1c7b73b53b8f65d518588c26 Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: http://git-master/r/789434 Reviewed-on: http://git-master/r/806184 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/cde_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/cde_gp10b.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/cde_gp10b.c b/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
index acb8aee3..dadec4d2 100644
--- a/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/cde_gp10b.c
@@ -58,7 +58,91 @@ static void gp10b_cde_get_program_numbers(struct gk20a *g,
58 *vprog_out = vprog; 58 *vprog_out = vprog;
59} 59}
60 60
61static bool gp10b_need_scatter_buffer(struct gk20a *g)
62{
63 return g->mm.bypass_smmu;
64}
65
66static u8 parity(u32 a)
67{
68 a ^= a>>16u;
69 a ^= a>>8u;
70 a ^= a>>4u;
71 a &= 0xfu;
72 return (0x6996u >> a) & 1u;
73}
74
75static int gp10b_populate_scatter_buffer(struct gk20a *g,
76 struct sg_table *sgt,
77 size_t surface_size,
78 void *scatter_buffer_ptr,
79 size_t scatter_buffer_size)
80{
81 /* map scatter buffer to CPU VA and fill it */
82 const u32 page_size_log2 = 12;
83 const u32 page_size = 1 << page_size_log2;
84 const u32 page_size_shift = page_size_log2 - 7u;
85
86 /* 0011 1111 1111 1111 1111 1110 0100 1000 */
87 const u32 getSliceMaskGP10B = 0x3ffffe48;
88 u8 *scatter_buffer = scatter_buffer_ptr;
89
90 size_t i;
91 struct scatterlist *sg = NULL;
92 u8 d = 0;
93 size_t page = 0;
94 size_t pages_left;
95
96 surface_size = round_up(surface_size, page_size);
97
98 pages_left = surface_size >> page_size_log2;
99 if ((pages_left >> 3) > scatter_buffer_size)
100 return -ENOMEM;
101
102 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
103 unsigned int j;
104 u64 surf_pa = sg_phys(sg);
105 unsigned int n = (int)(sg->length >> page_size_log2);
106
107 gk20a_dbg(gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n);
108
109 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) {
110 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift;
111 u8 scatter_bit = parity(addr);
112 u8 bit = page & 7;
113
114 d |= scatter_bit << bit;
115 if (bit == 7) {
116 scatter_buffer[page >> 3] = d;
117 d = 0;
118 }
119
120 ++page;
121 --pages_left;
122 }
123
124 if (pages_left == 0)
125 break;
126 }
127
128 /* write the last byte in case the number of pages is not divisible by 8 */
129 if ((page & 7) != 0)
130 scatter_buffer[page >> 3] = d;
131
132#if defined(GK20A_DEBUG)
133 if (unlikely(gpu_dbg_cde & gk20a_dbg_mask)) {
134 gk20a_dbg(gpu_dbg_cde, "scatterBuffer content:");
135 for (i=0; i < page>>3; i++) {
136 gk20a_dbg(gpu_dbg_cde, " %x", scatter_buffer[i]);
137 }
138 }
139#endif
140 return 0;
141}
142
61void gp10b_init_cde_ops(struct gpu_ops *gops) 143void gp10b_init_cde_ops(struct gpu_ops *gops)
62{ 144{
63 gops->cde.get_program_numbers = gp10b_cde_get_program_numbers; 145 gops->cde.get_program_numbers = gp10b_cde_get_program_numbers;
146 gops->cde.need_scatter_buffer = gp10b_need_scatter_buffer;
147 gops->cde.populate_scatter_buffer = gp10b_populate_scatter_buffer;
64} 148}