summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/cde_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde_gp10b.c161
1 files changed, 0 insertions, 161 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c b/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
deleted file mode 100644
index 5c0e79a7..00000000
--- a/drivers/gpu/nvgpu/common/linux/cde_gp10b.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * GP10B CDE
3 *
4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "cde_gp10b.h"
27
28#include <nvgpu/log.h>
29#include <nvgpu/dma.h>
30
31enum gp10b_programs {
32 GP10B_PROG_HPASS = 0,
33 GP10B_PROG_HPASS_4K = 1,
34 GP10B_PROG_VPASS = 2,
35 GP10B_PROG_VPASS_4K = 3,
36 GP10B_PROG_HPASS_DEBUG = 4,
37 GP10B_PROG_HPASS_4K_DEBUG = 5,
38 GP10B_PROG_VPASS_DEBUG = 6,
39 GP10B_PROG_VPASS_4K_DEBUG = 7,
40 GP10B_PROG_PASSTHROUGH = 8,
41};
42
43void gp10b_cde_get_program_numbers(struct gk20a *g,
44 u32 block_height_log2,
45 u32 shader_parameter,
46 int *hprog_out, int *vprog_out)
47{
48 int hprog, vprog;
49
50 if (shader_parameter == 1) {
51 hprog = GP10B_PROG_PASSTHROUGH;
52 vprog = GP10B_PROG_PASSTHROUGH;
53 } else {
54 hprog = GP10B_PROG_HPASS;
55 vprog = GP10B_PROG_VPASS;
56 if (shader_parameter == 2) {
57 hprog = GP10B_PROG_HPASS_DEBUG;
58 vprog = GP10B_PROG_VPASS_DEBUG;
59 }
60 if (!nvgpu_iommuable(g)) {
61 if (!g->mm.disable_bigpage) {
62 nvgpu_warn(g,
63 "When no IOMMU big pages cannot be used");
64 }
65 hprog |= 1;
66 vprog |= 1;
67 }
68 }
69
70 *hprog_out = hprog;
71 *vprog_out = vprog;
72}
73
74bool gp10b_need_scatter_buffer(struct gk20a *g)
75{
76 return !nvgpu_iommuable(g);
77}
78
79static u8 parity(u32 a)
80{
81 a ^= a>>16u;
82 a ^= a>>8u;
83 a ^= a>>4u;
84 a &= 0xfu;
85 return (0x6996u >> a) & 1u;
86}
87
88int gp10b_populate_scatter_buffer(struct gk20a *g,
89 struct sg_table *sgt,
90 size_t surface_size,
91 void *scatter_buffer_ptr,
92 size_t scatter_buffer_size)
93{
94 /* map scatter buffer to CPU VA and fill it */
95 const u32 page_size_log2 = 12;
96 const u32 page_size = 1 << page_size_log2;
97 const u32 page_size_shift = page_size_log2 - 7u;
98
99 /* 0011 1111 1111 1111 1111 1110 0100 1000 */
100 const u32 getSliceMaskGP10B = 0x3ffffe48;
101 u8 *scatter_buffer = scatter_buffer_ptr;
102
103 size_t i;
104 struct scatterlist *sg = NULL;
105 u8 d = 0;
106 size_t page = 0;
107 size_t pages_left;
108
109 surface_size = round_up(surface_size, page_size);
110
111 pages_left = surface_size >> page_size_log2;
112 if ((pages_left >> 3) > scatter_buffer_size)
113 return -ENOMEM;
114
115 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
116 unsigned int j;
117 u64 surf_pa = sg_phys(sg);
118 unsigned int n = (int)(sg->length >> page_size_log2);
119
120 nvgpu_log(g, gpu_dbg_cde, "surfPA=0x%llx + %d pages", surf_pa, n);
121
122 for (j=0; j < n && pages_left > 0; j++, surf_pa += page_size) {
123 u32 addr = (((u32)(surf_pa>>7)) & getSliceMaskGP10B) >> page_size_shift;
124 u8 scatter_bit = parity(addr);
125 u8 bit = page & 7;
126
127 d |= scatter_bit << bit;
128 if (bit == 7) {
129 scatter_buffer[page >> 3] = d;
130 d = 0;
131 }
132
133 ++page;
134 --pages_left;
135 }
136
137 if (pages_left == 0)
138 break;
139 }
140
141 /* write the last byte in case the number of pages is not divisible by 8 */
142 if ((page & 7) != 0)
143 scatter_buffer[page >> 3] = d;
144
145 if (nvgpu_log_mask_enabled(g, gpu_dbg_cde)) {
146 nvgpu_log(g, gpu_dbg_cde, "scatterBuffer content:");
147 for (i = 0; i < page >> 3; i++) {
148 nvgpu_log(g, gpu_dbg_cde, " %x", scatter_buffer[i]);
149 }
150 }
151
152 return 0;
153}
154
155struct nvgpu_os_linux_ops gp10b_cde_ops = {
156 .cde = {
157 .get_program_numbers = gp10b_cde_get_program_numbers,
158 .need_scatter_buffer = gp10b_need_scatter_buffer,
159 .populate_scatter_buffer = gp10b_populate_scatter_buffer,
160 },
161};