diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-03-21 15:55:35 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-06 21:14:53 -0400 |
commit | c9665079d7b12f22a847c62587724b4ee120ca6e (patch) | |
tree | 7882bd08193db4c34b3b8ad7df7013339da2fba1 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |
parent | b69020bff5dfa69cad926c9374cdbe9a62509ffd (diff) |
gpu: nvgpu: rename mem_desc to nvgpu_mem
Renaming was done with the following command:
$ find -type f | \
xargs sed -i 's/struct mem_desc/struct nvgpu_mem/g'
Also rename mem_desc.[ch] to nvgpu_mem.[ch].
JIRA NVGPU-12
Change-Id: I69395758c22a56aa01e3dffbcded70a729bf559a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1325547
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 76 |
1 files changed, 38 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 3c701907..db72ca79 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/dma-iommu.h> | 24 | #include <asm/dma-iommu.h> |
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | 26 | ||
27 | #include <nvgpu/mem_desc.h> | 27 | #include <nvgpu/nvgpu_mem.h> |
28 | #include <nvgpu/allocator.h> | 28 | #include <nvgpu/allocator.h> |
29 | #include <nvgpu/list.h> | 29 | #include <nvgpu/list.h> |
30 | #include <nvgpu/rbtree.h> | 30 | #include <nvgpu/rbtree.h> |
@@ -47,7 +47,7 @@ enum gk20a_mem_rw_flag { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct gpfifo_desc { | 49 | struct gpfifo_desc { |
50 | struct mem_desc mem; | 50 | struct nvgpu_mem mem; |
51 | u32 entry_num; | 51 | u32 entry_num; |
52 | 52 | ||
53 | u32 get; | 53 | u32 get; |
@@ -61,7 +61,7 @@ struct gpfifo_desc { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | struct patch_desc { | 63 | struct patch_desc { |
64 | struct mem_desc mem; | 64 | struct nvgpu_mem mem; |
65 | u32 data_count; | 65 | u32 data_count; |
66 | }; | 66 | }; |
67 | 67 | ||
@@ -72,14 +72,14 @@ struct zcull_ctx_desc { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct pm_ctx_desc { | 74 | struct pm_ctx_desc { |
75 | struct mem_desc mem; | 75 | struct nvgpu_mem mem; |
76 | u32 pm_mode; | 76 | u32 pm_mode; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct gk20a; | 79 | struct gk20a; |
80 | 80 | ||
81 | struct compbit_store_desc { | 81 | struct compbit_store_desc { |
82 | struct mem_desc mem; | 82 | struct nvgpu_mem mem; |
83 | 83 | ||
84 | /* The value that is written to the hardware. This depends on | 84 | /* The value that is written to the hardware. This depends on |
85 | * on the number of ltcs and is not an address. */ | 85 | * on the number of ltcs and is not an address. */ |
@@ -124,7 +124,7 @@ struct gk20a_comptags { | |||
124 | 124 | ||
125 | struct gk20a_mm_entry { | 125 | struct gk20a_mm_entry { |
126 | /* backing for */ | 126 | /* backing for */ |
127 | struct mem_desc mem; | 127 | struct nvgpu_mem mem; |
128 | u32 woffset; /* if >0, mem is a shadow copy, owned by another entry */ | 128 | u32 woffset; /* if >0, mem is a shadow copy, owned by another entry */ |
129 | int pgsz; | 129 | int pgsz; |
130 | struct gk20a_mm_entry *entries; | 130 | struct gk20a_mm_entry *entries; |
@@ -132,7 +132,7 @@ struct gk20a_mm_entry { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | struct priv_cmd_queue { | 134 | struct priv_cmd_queue { |
135 | struct mem_desc mem; | 135 | struct nvgpu_mem mem; |
136 | u32 size; /* num of entries in words */ | 136 | u32 size; /* num of entries in words */ |
137 | u32 put; /* put for priv cmd queue */ | 137 | u32 put; /* put for priv cmd queue */ |
138 | u32 get; /* get for priv cmd queue */ | 138 | u32 get; /* get for priv cmd queue */ |
@@ -140,7 +140,7 @@ struct priv_cmd_queue { | |||
140 | 140 | ||
141 | struct priv_cmd_entry { | 141 | struct priv_cmd_entry { |
142 | bool valid; | 142 | bool valid; |
143 | struct mem_desc *mem; | 143 | struct nvgpu_mem *mem; |
144 | u32 off; /* offset in mem, in u32 entries */ | 144 | u32 off; /* offset in mem, in u32 entries */ |
145 | u64 gva; | 145 | u64 gva; |
146 | u32 get; /* start of entry in queue */ | 146 | u32 get; /* start of entry in queue */ |
@@ -335,24 +335,24 @@ struct mm_gk20a { | |||
335 | struct { | 335 | struct { |
336 | u32 aperture_size; | 336 | u32 aperture_size; |
337 | struct vm_gk20a vm; | 337 | struct vm_gk20a vm; |
338 | struct mem_desc inst_block; | 338 | struct nvgpu_mem inst_block; |
339 | } bar1; | 339 | } bar1; |
340 | 340 | ||
341 | struct { | 341 | struct { |
342 | u32 aperture_size; | 342 | u32 aperture_size; |
343 | struct vm_gk20a vm; | 343 | struct vm_gk20a vm; |
344 | struct mem_desc inst_block; | 344 | struct nvgpu_mem inst_block; |
345 | } bar2; | 345 | } bar2; |
346 | 346 | ||
347 | struct { | 347 | struct { |
348 | u32 aperture_size; | 348 | u32 aperture_size; |
349 | struct vm_gk20a vm; | 349 | struct vm_gk20a vm; |
350 | struct mem_desc inst_block; | 350 | struct nvgpu_mem inst_block; |
351 | } pmu; | 351 | } pmu; |
352 | 352 | ||
353 | struct { | 353 | struct { |
354 | /* using pmu vm currently */ | 354 | /* using pmu vm currently */ |
355 | struct mem_desc inst_block; | 355 | struct nvgpu_mem inst_block; |
356 | } hwpm; | 356 | } hwpm; |
357 | 357 | ||
358 | struct { | 358 | struct { |
@@ -367,7 +367,7 @@ struct mm_gk20a { | |||
367 | struct nvgpu_mutex tlb_lock; | 367 | struct nvgpu_mutex tlb_lock; |
368 | struct nvgpu_mutex priv_lock; | 368 | struct nvgpu_mutex priv_lock; |
369 | #ifdef CONFIG_ARCH_TEGRA_18x_SOC | 369 | #ifdef CONFIG_ARCH_TEGRA_18x_SOC |
370 | struct mem_desc bar2_desc; | 370 | struct nvgpu_mem bar2_desc; |
371 | #endif | 371 | #endif |
372 | /* | 372 | /* |
373 | * Separate function to cleanup the CE since it requires a channel to | 373 | * Separate function to cleanup the CE since it requires a channel to |
@@ -397,7 +397,7 @@ struct mm_gk20a { | |||
397 | /* false if vidmem aperture actually points to sysmem */ | 397 | /* false if vidmem aperture actually points to sysmem */ |
398 | bool vidmem_is_vidmem; | 398 | bool vidmem_is_vidmem; |
399 | 399 | ||
400 | struct mem_desc sysmem_flush; | 400 | struct nvgpu_mem sysmem_flush; |
401 | 401 | ||
402 | u32 pramin_window; | 402 | u32 pramin_window; |
403 | struct nvgpu_spinlock pramin_window_lock; | 403 | struct nvgpu_spinlock pramin_window_lock; |
@@ -475,11 +475,11 @@ struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl); | |||
475 | #define bar1_instance_block_shift_gk20a() bus_bar1_block_ptr_shift_v() | 475 | #define bar1_instance_block_shift_gk20a() bus_bar1_block_ptr_shift_v() |
476 | #endif | 476 | #endif |
477 | 477 | ||
478 | int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block); | 478 | int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); |
479 | void gk20a_free_inst_block(struct gk20a *g, struct mem_desc *inst_block); | 479 | void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); |
480 | void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm, | 480 | void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, |
481 | u32 big_page_size); | 481 | u32 big_page_size); |
482 | u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct mem_desc *mem); | 482 | u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *mem); |
483 | 483 | ||
484 | void gk20a_mm_dump_vm(struct vm_gk20a *vm, | 484 | void gk20a_mm_dump_vm(struct vm_gk20a *vm, |
485 | u64 va_begin, u64 va_end, char *label); | 485 | u64 va_begin, u64 va_end, char *label); |
@@ -499,7 +499,7 @@ void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt); | |||
499 | u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl, | 499 | u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl, |
500 | u32 flags); | 500 | u32 flags); |
501 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); | 501 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); |
502 | u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem, | 502 | u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem, |
503 | u32 flags); | 503 | u32 flags); |
504 | 504 | ||
505 | void gk20a_mm_ltc_isr(struct gk20a *g); | 505 | void gk20a_mm_ltc_isr(struct gk20a *g); |
@@ -542,39 +542,39 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, | |||
542 | #define NVGPU_DMA_READ_ONLY (1 << 2) | 542 | #define NVGPU_DMA_READ_ONLY (1 << 2) |
543 | 543 | ||
544 | int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, | 544 | int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, |
545 | struct mem_desc *mem); | 545 | struct nvgpu_mem *mem); |
546 | int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, | 546 | int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, |
547 | size_t size, struct mem_desc *mem); | 547 | size_t size, struct nvgpu_mem *mem); |
548 | 548 | ||
549 | int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, | 549 | int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, |
550 | struct mem_desc *mem); | 550 | struct nvgpu_mem *mem); |
551 | int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, | 551 | int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, |
552 | size_t size, struct mem_desc *mem); | 552 | size_t size, struct nvgpu_mem *mem); |
553 | 553 | ||
554 | int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, | 554 | int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, |
555 | struct mem_desc *mem); | 555 | struct nvgpu_mem *mem); |
556 | int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, | 556 | int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, |
557 | size_t size, struct mem_desc *mem); | 557 | size_t size, struct nvgpu_mem *mem); |
558 | 558 | ||
559 | void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct mem_desc *mem); | 559 | void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem); |
560 | 560 | ||
561 | int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem); | 561 | int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); |
562 | int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, | 562 | int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, |
563 | struct mem_desc *mem); | 563 | struct nvgpu_mem *mem); |
564 | 564 | ||
565 | int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem); | 565 | int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); |
566 | int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, | 566 | int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, |
567 | size_t size, struct mem_desc *mem); | 567 | size_t size, struct nvgpu_mem *mem); |
568 | 568 | ||
569 | int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem); | 569 | int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); |
570 | int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, | 570 | int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, |
571 | size_t size, struct mem_desc *mem); | 571 | size_t size, struct nvgpu_mem *mem); |
572 | int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | 572 | int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, |
573 | size_t size, struct mem_desc *mem, dma_addr_t at); | 573 | size_t size, struct nvgpu_mem *mem, dma_addr_t at); |
574 | 574 | ||
575 | void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem); | 575 | void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem); |
576 | 576 | ||
577 | static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem) | 577 | static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem) |
578 | { | 578 | { |
579 | /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr | 579 | /* FIXME: the sgt/sgl may get null if this is accessed e.g. in an isr |
580 | * during channel deletion - attempt to fix at least null derefs */ | 580 | * during channel deletion - attempt to fix at least null derefs */ |
@@ -591,7 +591,7 @@ static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem) | |||
591 | 591 | ||
592 | u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture, | 592 | u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture, |
593 | u32 sysmem_mask, u32 vidmem_mask); | 593 | u32 sysmem_mask, u32 vidmem_mask); |
594 | u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem, | 594 | u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem, |
595 | u32 sysmem_mask, u32 vidmem_mask); | 595 | u32 sysmem_mask, u32 vidmem_mask); |
596 | 596 | ||
597 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, | 597 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, |
@@ -769,10 +769,10 @@ struct gpu_ops; | |||
769 | void gk20a_init_mm(struct gpu_ops *gops); | 769 | void gk20a_init_mm(struct gpu_ops *gops); |
770 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, | 770 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, |
771 | u32 big_page_size); | 771 | u32 big_page_size); |
772 | void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *mem, | 772 | void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem, |
773 | struct vm_gk20a *vm); | 773 | struct vm_gk20a *vm); |
774 | 774 | ||
775 | void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block); | 775 | void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); |
776 | 776 | ||
777 | int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); | 777 | int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); |
778 | 778 | ||