summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm_area.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-07-02 20:14:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-09 20:20:43 -0400
commit3cf92ec89ba8deac77d726f02d79cba7c0e73e4d (patch)
tree9b20d30e81ab45d85eddbdf1cdd36901fd5a4ad2 /drivers/gpu/nvgpu/common/mm/vm_area.c
parent2dd9bb03dd56ca86b0e61b89fab38d38a58ecddf (diff)
gpu: nvgpu: Fix several issues with the buddy allocator
The issues are: 1. Non-fixed allocs must take into account explicit PTE size requests. Previously the PTE size was determines from the allocation size which was incorect. To do this, the PTE size is now plumbed through all GPU VA allocations. This is what the new alloc_pte() op does. 2. Fix buddy PTE size assignment. This changes a '<=' into a '<' in the buddy allocation logic. Effectively this is now leaving the PTE size for buddy blocks equal to the PDE block size as 'ANY'. This prevents a buddy block of PDE size which has yet to be allocated from having a specific PDE size. Without this its possible to do a fixed alloc that fails unexpectedly due to mismatching PDE sizes. Consider two PDE block sized fixed allocs that are contained in one buddy twice the size of a PDE block. Let's call these fixed allocs S and B (small and big). Let's assume that two fixed allocs are done, each targeting S and B, in that order. With the current logic the first alloc, when we create the two buddies S and B, causes both S and B to have a PTE size of SMALL. Now when the second alloc happens we attempt to find a buddy B with a PTE size of either BIG or ANY. But we cannot becasue B already has size SMALL. This casues us to appear like we have a conflicting fixed alloc despite this not being the case. 3. Misc cleanups & bug fixes: - Clean up some MISRA issues - Delete an extraneous unlock that could have caused a deadlock. Bug 200105199 Change-Id: Ib5447ec6705a5a289ac0cf3d5e90c79b5d67582d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1768582 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index c2c0d569..d096de5d 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -99,11 +99,22 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
99 struct nvgpu_allocator *vma; 99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area; 100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0; 101 u64 vaddr_start = 0;
102 u64 our_addr = *addr;
102 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL; 103 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
103 104
105 /*
106 * If we have a fixed address then use the passed address in *addr. This
107 * corresponds to the o_a field in the IOCTL. But since we do not
108 * support specific alignments in the buddy allocator we ignore the
109 * field if it isn't a fixed offset.
110 */
111 if ((flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) != 0U) {
112 our_addr = *addr;
113 }
114
104 nvgpu_log(g, gpu_dbg_map, 115 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", 116 "ADD vm_area: pgsz=%#-8x pages=%-9u a/o=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags); 117 page_size, pages, our_addr, flags);
107 118
108 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) { 119 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { 120 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
@@ -133,14 +144,15 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
133 144
134 vma = vm->vma[pgsz_idx]; 145 vma = vm->vma[pgsz_idx];
135 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) { 146 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
136 vaddr_start = nvgpu_alloc_fixed(vma, *addr, 147 vaddr_start = nvgpu_alloc_fixed(vma, our_addr,
137 (u64)pages * 148 (u64)pages *
138 (u64)page_size, 149 (u64)page_size,
139 page_size); 150 page_size);
140 } else { 151 } else {
141 vaddr_start = nvgpu_alloc(vma, 152 vaddr_start = nvgpu_alloc_pte(vma,
142 (u64)pages * 153 (u64)pages *
143 (u64)page_size); 154 (u64)page_size,
155 page_size);
144 } 156 }
145 157
146 if (!vaddr_start) { 158 if (!vaddr_start) {