1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
/*
* include/linux/nvmap.h
*
* structure declarations for nvmem and nvmap user-space ioctls
*
* Copyright (c) 2009-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _LINUX_NVMAP_H
#define _LINUX_NVMAP_H
#include <linux/rbtree.h>
#include <linux/file.h>
#include <linux/dma-buf.h>
#include <linux/device.h>
#include <uapi/linux/nvmap.h>
#define NVMAP_HEAP_IOVMM (1ul<<30)
/* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
/* allocation flags */
#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
#define NVMAP_HANDLE_SECURE (0x1ul << 2)
#define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
#define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
#define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
#define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
#define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
#define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
#define NVMAP_HANDLE_RO (0x1ul << 9)
#ifdef CONFIG_NVMAP_PAGE_POOLS
ulong nvmap_page_pool_get_unused_pages(void);
#else
static inline ulong nvmap_page_pool_get_unused_pages(void)
{
return 0;
}
#endif
ulong nvmap_iovmm_get_used_pages(void);
int nvmap_register_vidmem_carveout(struct device *dma_dev,
phys_addr_t base, size_t size);
/* The following two functions are to help enable DRAM
* overprovising via paging. These functions allow the
* physical frames backing an nvmap dma_buf to be freed
* (for example, after their contents has been saved
* elsewhere by paging logic), and to be reallocated
* (such as when other code is ready to repopulate them).
* Both functions preserve any open nvmap handles.
*/
int nvmap_dealloc_dmabuf(struct dma_buf *dmabuf);
int nvmap_realloc_dmabuf(struct dma_buf *dmabuf);
/* Some drivers (such as nvgpu) store parallel structures
* for each dmabuf to track internal state. To allow these
* drivers to quickly access their state from a *dmabuf or
* FD, we allow them access to a per-dmabuf list_head.
*/
struct list_head* nvmap_get_priv_list(struct dma_buf *dmabuf);
/*
* A heap can be mapped to memory other than DRAM.
* The HW, controls the memory, can be power gated/ungated
* based upon the clients using the memory.
* if no client/alloc happens from the memory, the HW needs
* to be power gated. Similarly it should power ungated if
* alloc happens from the memory.
* int (*busy)(void) - trigger runtime power ungate
* int (*idle)(void) - trigger runtime power gate
*/
struct nvmap_pm_ops {
int (*busy)(void);
int (*idle)(void);
};
struct nvmap_platform_carveout {
const char *name;
unsigned int usage_mask;
phys_addr_t base;
size_t size;
struct device *cma_dev;
bool resize;
struct device *dma_dev;
struct device dev;
struct dma_declare_info *dma_info;
bool is_ivm;
int peer;
int vmid;
int can_alloc;
bool enable_static_dma_map;
bool disable_dynamic_dma_map;
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
struct nvmap_pm_ops pm_ops;
};
struct nvmap_platform_data {
const struct nvmap_platform_carveout *carveouts;
unsigned int nr_carveouts;
};
#endif /* _LINUX_NVMAP_H */
|