summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c107
1 files changed, 44 insertions, 63 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
index 127a8ce9..219dcd40 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
@@ -103,103 +103,61 @@ static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
103 DEFINE_DMA_ATTRS(attrs); 103 DEFINE_DMA_ATTRS(attrs);
104 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs)); 104 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
105 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size, 105 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
106 (void *)(uintptr_t)secure_buffer->iova, 106 (void *)(uintptr_t)secure_buffer->phys,
107 secure_buffer->iova, __DMA_ATTR(attrs)); 107 secure_buffer->phys, __DMA_ATTR(attrs));
108 108
109 secure_buffer->destroy = NULL; 109 secure_buffer->destroy = NULL;
110} 110}
111 111
112int gk20a_tegra_secure_page_alloc(struct device *dev)
113{
114 struct gk20a_platform *platform = dev_get_drvdata(dev);
115 struct gk20a *g = get_gk20a(dev);
116 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
117 DEFINE_DMA_ATTRS(attrs);
118 dma_addr_t iova;
119 size_t size = PAGE_SIZE;
120
121 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
122 return -EINVAL;
123
124 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
125 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
126 GFP_KERNEL, __DMA_ATTR(attrs));
127 if (dma_mapping_error(&tegra_vpr_dev, iova))
128 return -ENOMEM;
129
130 secure_buffer->size = size;
131 secure_buffer->iova = iova;
132 secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
133
134 return 0;
135}
136
137static void gk20a_tegra_secure_destroy(struct gk20a *g,
138 struct gr_ctx_buffer_desc *desc)
139{
140 DEFINE_DMA_ATTRS(attrs);
141
142 if (desc->mem.priv.sgt) {
143 u64 pa = nvgpu_mem_get_phys_addr(g, &desc->mem);
144
145 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
146 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
147 (void *)(uintptr_t)pa,
148 pa, __DMA_ATTR(attrs));
149 nvgpu_free_sgtable(g, &desc->mem.priv.sgt);
150 desc->mem.priv.sgt = NULL;
151 }
152}
153
154static int gk20a_tegra_secure_alloc(struct gk20a *g, 112static int gk20a_tegra_secure_alloc(struct gk20a *g,
155 struct gr_ctx_buffer_desc *desc, 113 struct gr_ctx_buffer_desc *desc,
156 size_t size) 114 size_t size)
157{ 115{
158 struct device *dev = dev_from_gk20a(g); 116 struct device *dev = dev_from_gk20a(g);
159 struct gk20a_platform *platform = dev_get_drvdata(dev); 117 struct gk20a_platform *platform = dev_get_drvdata(dev);
160 DEFINE_DMA_ATTRS(attrs); 118 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
161 dma_addr_t iova; 119 dma_addr_t phys;
162 struct sg_table *sgt; 120 struct sg_table *sgt;
163 struct page *page; 121 struct page *page;
164 int err = 0; 122 int err = 0;
123 size_t aligned_size = PAGE_ALIGN(size);
165 124
166 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs)); 125 /* We ran out of preallocated memory */
167 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova, 126 if (secure_buffer->used + aligned_size > secure_buffer->size) {
168 GFP_KERNEL, __DMA_ATTR(attrs)); 127 nvgpu_err(platform->g, "failed to alloc %zu bytes of VPR, %zu/%zu used",
169 if (dma_mapping_error(&tegra_vpr_dev, iova)) 128 size, secure_buffer->used, secure_buffer->size);
170 return -ENOMEM; 129 return -ENOMEM;
130 }
131
132 phys = secure_buffer->phys + secure_buffer->used;
171 133
172 sgt = nvgpu_kzalloc(platform->g, sizeof(*sgt)); 134 sgt = nvgpu_kzalloc(platform->g, sizeof(*sgt));
173 if (!sgt) { 135 if (!sgt) {
174 nvgpu_err(platform->g, "failed to allocate memory"); 136 nvgpu_err(platform->g, "failed to allocate memory");
175 goto fail; 137 return -ENOMEM;
176 } 138 }
177 err = sg_alloc_table(sgt, 1, GFP_KERNEL); 139 err = sg_alloc_table(sgt, 1, GFP_KERNEL);
178 if (err) { 140 if (err) {
179 nvgpu_err(platform->g, "failed to allocate sg_table"); 141 nvgpu_err(platform->g, "failed to allocate sg_table");
180 goto fail_sgt; 142 goto fail_sgt;
181 } 143 }
182 page = phys_to_page(iova); 144 page = phys_to_page(phys);
183 sg_set_page(sgt->sgl, page, size, 0); 145 sg_set_page(sgt->sgl, page, size, 0);
184 /* This bypasses SMMU for VPR during gmmu_map. */ 146 /* This bypasses SMMU for VPR during gmmu_map. */
185 sg_dma_address(sgt->sgl) = 0; 147 sg_dma_address(sgt->sgl) = 0;
186 148
187 desc->destroy = gk20a_tegra_secure_destroy; 149 desc->destroy = NULL;
188 150
189 desc->mem.priv.sgt = sgt; 151 desc->mem.priv.sgt = sgt;
190 desc->mem.size = size; 152 desc->mem.size = size;
191 desc->mem.aperture = APERTURE_SYSMEM; 153 desc->mem.aperture = APERTURE_SYSMEM;
192 154
193 if (platform->secure_buffer.destroy) 155 secure_buffer->used += aligned_size;
194 platform->secure_buffer.destroy(g, &platform->secure_buffer);
195 156
196 return err; 157 return err;
197 158
198fail_sgt: 159fail_sgt:
199 nvgpu_kfree(platform->g, sgt); 160 nvgpu_kfree(platform->g, sgt);
200fail:
201 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
202 (void *)(uintptr_t)iova, iova, __DMA_ATTR(attrs));
203 return err; 161 return err;
204} 162}
205 163
@@ -664,10 +622,32 @@ void gk20a_tegra_idle(struct device *dev)
664#endif 622#endif
665} 623}
666 624
667void gk20a_tegra_init_secure_alloc(struct gk20a *g) 625int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform)
668{ 626{
627 struct gk20a *g = platform->g;
628 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
629 DEFINE_DMA_ATTRS(attrs);
630 dma_addr_t iova;
631
632 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
633 return 0;
634
635 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
636 (void)dma_alloc_attrs(&tegra_vpr_dev, platform->secure_buffer_size, &iova,
637 GFP_KERNEL, __DMA_ATTR(attrs));
638 /* Some platforms disable VPR. In that case VPR allocations always
639 * fail. Just disable VPR usage in nvgpu in that case. */
640 if (dma_mapping_error(&tegra_vpr_dev, iova))
641 return 0;
642
643 secure_buffer->size = platform->secure_buffer_size;
644 secure_buffer->phys = iova;
645 secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
646
669 g->ops.secure_alloc = gk20a_tegra_secure_alloc; 647 g->ops.secure_alloc = gk20a_tegra_secure_alloc;
670 __nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true); 648 __nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true);
649
650 return 0;
671} 651}
672 652
673#ifdef CONFIG_COMMON_CLK 653#ifdef CONFIG_COMMON_CLK
@@ -836,7 +816,9 @@ static int gk20a_tegra_probe(struct device *dev)
836 816
837 gk20a_tegra_get_clocks(dev); 817 gk20a_tegra_get_clocks(dev);
838 nvgpu_linux_init_clk_support(platform->g); 818 nvgpu_linux_init_clk_support(platform->g);
839 gk20a_tegra_init_secure_alloc(platform->g); 819 ret = gk20a_tegra_init_secure_alloc(platform);
820 if (ret)
821 return ret;
840 822
841 if (platform->clk_register) { 823 if (platform->clk_register) {
842 ret = platform->clk_register(platform->g); 824 ret = platform->clk_register(platform->g);
@@ -851,9 +833,6 @@ static int gk20a_tegra_probe(struct device *dev)
851 833
852static int gk20a_tegra_late_probe(struct device *dev) 834static int gk20a_tegra_late_probe(struct device *dev)
853{ 835{
854 /* Cause early VPR resize */
855 gk20a_tegra_secure_page_alloc(dev);
856
857 return 0; 836 return 0;
858} 837}
859 838
@@ -974,4 +953,6 @@ struct gk20a_platform gm20b_tegra_platform = {
974 .soc_name = "tegra21x", 953 .soc_name = "tegra21x",
975 954
976 .unified_memory = true, 955 .unified_memory = true,
956
957 .secure_buffer_size = 335872,
977}; 958};