summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-10-17 20:12:28 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-01 22:06:30 -0400
commit88ee812d56333375f7ae44e28b483c1a161d75da (patch)
tree00b4ff8d2c0aa21721a9e8756df7dd9b1aad95ed /drivers/gpu/nvgpu/include
parenta8bd154f7907c7054f8668c5995c1b5a7f748edc (diff)
gpu: nvgpu: Remove buffer_attrs struct
Remove the buffer_attrs struct and replace it with a more streamlined nvgpu_ctag_buffer_info struct. This struct allows several different fields to all be passed by pointer to the various kind/compression functions in the VM map process. This path also moves several comptag/kind related functions to the core vm.c code since these functions can be reused by other OSes. Change-Id: I2a0f0a1c4b554ce4c8f2acdbe3161392e717d3bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1583984 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/include')
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/vm.h19
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h18
2 files changed, 18 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
index 3794706c..596a3b62 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
@@ -38,21 +38,6 @@ struct vm_gk20a;
38struct vm_gk20a_mapping_batch; 38struct vm_gk20a_mapping_batch;
39struct nvgpu_vm_area; 39struct nvgpu_vm_area;
40 40
41struct buffer_attrs {
42 struct sg_table *sgt;
43 u64 size;
44 u64 align;
45 u32 ctag_offset;
46 u32 ctag_lines;
47 u32 ctag_allocated_lines;
48 int pgsz_idx;
49 u8 kind_v;
50 bool use_kind_v;
51 u8 uc_kind_v;
52 bool use_uc_kind_v;
53 bool ctag_user_mappable;
54};
55
56u64 nvgpu_vm_map_linux(struct vm_gk20a *vm, 41u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
57 struct dma_buf *dmabuf, 42 struct dma_buf *dmabuf,
58 u64 offset_align, 43 u64 offset_align,
@@ -104,9 +89,5 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
104 89
105enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, 90enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
106 struct dma_buf *dmabuf); 91 struct dma_buf *dmabuf);
107int validate_fixed_buffer(struct vm_gk20a *vm,
108 struct buffer_attrs *bfr,
109 u64 map_offset, u64 map_size,
110 struct nvgpu_vm_area **pva_node);
111 92
112#endif 93#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index 7370a8e3..801fb8ed 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -122,6 +122,19 @@ mapped_buffer_from_rbtree_node(struct nvgpu_rbtree_node *node)
122 ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf, node)); 122 ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf, node));
123} 123}
124 124
125struct nvgpu_ctag_buffer_info {
126 u64 size;
127 enum gmmu_pgsz_gk20a pgsz_idx;
128 u32 flags;
129
130 u8 kind_v;
131 u8 uc_kind_v;
132 bool use_kind_v;
133 bool use_uc_kind_v;
134
135 u32 ctag_lines;
136};
137
125struct vm_gk20a { 138struct vm_gk20a {
126 struct mm_gk20a *mm; 139 struct mm_gk20a *mm;
127 struct gk20a_as_share *as_share; /* as_share this represents */ 140 struct gk20a_as_share *as_share; /* as_share this represents */
@@ -189,6 +202,11 @@ void nvgpu_vm_put(struct vm_gk20a *vm);
189int vm_aspace_id(struct vm_gk20a *vm); 202int vm_aspace_id(struct vm_gk20a *vm);
190int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); 203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
191 204
205int nvgpu_vm_init_kind_info(struct nvgpu_ctag_buffer_info *binfo,
206 s16 compr_kind, s16 incompr_kind);
207int nvgpu_vm_compute_kind_and_compression(struct vm_gk20a *vm,
208 struct nvgpu_ctag_buffer_info *binfo);
209
192/* batching eliminates redundant cache flushes and invalidates */ 210/* batching eliminates redundant cache flushes and invalidates */
193void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch); 211void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch);
194void nvgpu_vm_mapping_batch_finish( 212void nvgpu_vm_mapping_batch_finish(