summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-06-22 20:08:35 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-27 16:15:58 -0400
commitcadd5120d33e9ed5b70c620c7a54b2c9e338c1e4 (patch)
treef4b09b4b4723d1ea45aebd6cfa5a34035b28d2da /drivers/gpu/nvgpu/common
parent0dc80244eea4c7e504976d8028a3ddb72ba60b0e (diff)
gpu: nvgpu: Remove fmodel GMMU allocation
Remove the special cases for fmodel in the GMMU allocation code. There is no reason to treat fmodel any different than regular DMA memory. If there is no IOMMU the DMA api will handle that perfectly acceptably. JIRA NVGPU-30 Change-Id: Icceb832735a98b601b9f41064dd73a6edee29002 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master/r/1507562 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c136
1 files changed, 3 insertions, 133 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index e63155f2..06291600 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -30,108 +30,14 @@
30#define gmmu_dbg_v(g, fmt, args...) \ 30#define gmmu_dbg_v(g, fmt, args...) \
31 nvgpu_log(g, gpu_dbg_map_v, fmt, ##args) 31 nvgpu_log(g, gpu_dbg_map_v, fmt, ##args)
32 32
33static int map_gmmu_phys_pages(struct gk20a_mm_entry *entry)
34{
35 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
36 sg_phys(entry->mem.priv.sgt->sgl),
37 entry->mem.priv.sgt->sgl->length);
38 return 0;
39}
40
41static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
42{
43 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
44 sg_phys(entry->mem.priv.sgt->sgl),
45 entry->mem.priv.sgt->sgl->length);
46}
47
48static int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 33static int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
49{ 34{
50 gk20a_dbg_fn(""); 35 return nvgpu_mem_begin(g, &entry->mem);
51
52 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
53 return map_gmmu_phys_pages(entry);
54
55 if (IS_ENABLED(CONFIG_ARM64)) {
56 if (entry->mem.aperture == APERTURE_VIDMEM)
57 return 0;
58
59 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
60 sg_phys(entry->mem.priv.sgt->sgl),
61 entry->mem.size);
62 } else {
63 int err = nvgpu_mem_begin(g, &entry->mem);
64
65 if (err)
66 return err;
67 }
68
69 return 0;
70} 36}
71 37
72static void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 38static void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
73{ 39{
74 gk20a_dbg_fn(""); 40 nvgpu_mem_end(g, &entry->mem);
75
76 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
77 unmap_gmmu_phys_pages(entry);
78 return;
79 }
80
81 if (IS_ENABLED(CONFIG_ARM64)) {
82 if (entry->mem.aperture == APERTURE_VIDMEM)
83 return;
84
85 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
86 sg_phys(entry->mem.priv.sgt->sgl),
87 entry->mem.size);
88 } else {
89 nvgpu_mem_end(g, &entry->mem);
90 }
91}
92
93static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
94 struct gk20a_mm_entry *entry)
95{
96 u32 num_pages = 1 << order;
97 u32 len = num_pages * PAGE_SIZE;
98 int err;
99 struct page *pages;
100 struct gk20a *g = vm->mm->g;
101
102 /* note: mem_desc slightly abused (wrt. alloc_gmmu_pages) */
103
104 pages = alloc_pages(GFP_KERNEL, order);
105 if (!pages) {
106 nvgpu_log(g, gpu_dbg_pte, "alloc_pages failed");
107 goto err_out;
108 }
109 entry->mem.priv.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.priv.sgt));
110 if (!entry->mem.priv.sgt) {
111 nvgpu_log(g, gpu_dbg_pte, "cannot allocate sg table");
112 goto err_alloced;
113 }
114 err = sg_alloc_table(entry->mem.priv.sgt, 1, GFP_KERNEL);
115 if (err) {
116 nvgpu_log(g, gpu_dbg_pte, "sg_alloc_table failed");
117 goto err_sg_table;
118 }
119 sg_set_page(entry->mem.priv.sgt->sgl, pages, len, 0);
120 entry->mem.cpu_va = page_address(pages);
121 memset(entry->mem.cpu_va, 0, len);
122 entry->mem.size = len;
123 entry->mem.aperture = APERTURE_SYSMEM;
124 FLUSH_CPU_DCACHE(entry->mem.cpu_va,
125 sg_phys(entry->mem.priv.sgt->sgl), len);
126
127 return 0;
128
129err_sg_table:
130 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
131err_alloced:
132 __free_pages(pages, order);
133err_out:
134 return -ENOMEM;
135} 41}
136 42
137static int nvgpu_alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 43static int nvgpu_alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
@@ -142,19 +48,7 @@ static int nvgpu_alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
142 u32 len = num_pages * PAGE_SIZE; 48 u32 len = num_pages * PAGE_SIZE;
143 int err; 49 int err;
144 50
145 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) 51 err = nvgpu_dma_alloc(g, len, &entry->mem);
146 return alloc_gmmu_phys_pages(vm, order, entry);
147
148 /*
149 * On arm32 we're limited by vmalloc space, so we do not map pages by
150 * default.
151 */
152 if (IS_ENABLED(CONFIG_ARM64))
153 err = nvgpu_dma_alloc(g, len, &entry->mem);
154 else
155 err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
156 len, &entry->mem);
157
158 52
159 if (err) { 53 if (err) {
160 nvgpu_err(g, "memory allocation failed"); 54 nvgpu_err(g, "memory allocation failed");
@@ -164,41 +58,17 @@ static int nvgpu_alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
164 return 0; 58 return 0;
165} 59}
166 60
167static void free_gmmu_phys_pages(struct vm_gk20a *vm,
168 struct gk20a_mm_entry *entry)
169{
170 gk20a_dbg_fn("");
171
172 /* note: mem_desc slightly abused (wrt. nvgpu_free_gmmu_pages) */
173
174 free_pages((unsigned long)entry->mem.cpu_va, get_order(entry->mem.size));
175 entry->mem.cpu_va = NULL;
176
177 sg_free_table(entry->mem.priv.sgt);
178 nvgpu_kfree(vm->mm->g, entry->mem.priv.sgt);
179 entry->mem.priv.sgt = NULL;
180 entry->mem.size = 0;
181 entry->mem.aperture = APERTURE_INVALID;
182}
183
184void nvgpu_free_gmmu_pages(struct vm_gk20a *vm, 61void nvgpu_free_gmmu_pages(struct vm_gk20a *vm,
185 struct gk20a_mm_entry *entry) 62 struct gk20a_mm_entry *entry)
186{ 63{
187 struct gk20a *g = gk20a_from_vm(vm); 64 struct gk20a *g = gk20a_from_vm(vm);
188 65
189 gk20a_dbg_fn("");
190
191 if (!entry->mem.size) 66 if (!entry->mem.size)
192 return; 67 return;
193 68
194 if (entry->woffset) /* fake shadow mem */ 69 if (entry->woffset) /* fake shadow mem */
195 return; 70 return;
196 71
197 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
198 free_gmmu_phys_pages(vm, entry);
199 return;
200 }
201
202 nvgpu_dma_free(g, &entry->mem); 72 nvgpu_dma_free(g, &entry->mem);
203} 73}
204 74