diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c | 162 |
1 files changed, 162 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c new file mode 100644 index 00000000..7a4e7705 --- /dev/null +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * gk20a allocator | ||
3 | * | ||
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <nvgpu/allocator.h> | ||
26 | |||
27 | #include "gk20a/gk20a.h" | ||
28 | #include "gk20a/mm_gk20a.h" | ||
29 | |||
30 | u64 nvgpu_alloc_length(struct nvgpu_allocator *a) | ||
31 | { | ||
32 | if (a->ops->length) | ||
33 | return a->ops->length(a); | ||
34 | |||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | u64 nvgpu_alloc_base(struct nvgpu_allocator *a) | ||
39 | { | ||
40 | if (a->ops->base) | ||
41 | return a->ops->base(a); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) | ||
47 | { | ||
48 | if (!a->ops || !a->ops->inited) | ||
49 | return 0; | ||
50 | |||
51 | return a->ops->inited(a); | ||
52 | } | ||
53 | |||
54 | u64 nvgpu_alloc_end(struct nvgpu_allocator *a) | ||
55 | { | ||
56 | if (a->ops->end) | ||
57 | return a->ops->end(a); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | u64 nvgpu_alloc_space(struct nvgpu_allocator *a) | ||
63 | { | ||
64 | if (a->ops->space) | ||
65 | return a->ops->space(a); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | u64 nvgpu_alloc(struct nvgpu_allocator *a, u64 len) | ||
71 | { | ||
72 | return a->ops->alloc(a, len); | ||
73 | } | ||
74 | |||
75 | void nvgpu_free(struct nvgpu_allocator *a, u64 addr) | ||
76 | { | ||
77 | a->ops->free(a, addr); | ||
78 | } | ||
79 | |||
80 | u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len, | ||
81 | u32 page_size) | ||
82 | { | ||
83 | if (a->ops->alloc_fixed) | ||
84 | return a->ops->alloc_fixed(a, base, len, page_size); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len) | ||
90 | { | ||
91 | /* | ||
92 | * If this operation is not defined for the allocator then just do | ||
93 | * nothing. The alternative would be to fall back on the regular | ||
94 | * free but that may be harmful in unexpected ways. | ||
95 | */ | ||
96 | if (a->ops->free_fixed) | ||
97 | a->ops->free_fixed(a, base, len); | ||
98 | } | ||
99 | |||
100 | int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, | ||
101 | struct nvgpu_alloc_carveout *co) | ||
102 | { | ||
103 | if (a->ops->reserve_carveout) | ||
104 | return a->ops->reserve_carveout(a, co); | ||
105 | |||
106 | return -ENODEV; | ||
107 | } | ||
108 | |||
109 | void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, | ||
110 | struct nvgpu_alloc_carveout *co) | ||
111 | { | ||
112 | if (a->ops->release_carveout) | ||
113 | a->ops->release_carveout(a, co); | ||
114 | } | ||
115 | |||
116 | void nvgpu_alloc_destroy(struct nvgpu_allocator *a) | ||
117 | { | ||
118 | a->ops->fini(a); | ||
119 | nvgpu_mutex_destroy(&a->lock); | ||
120 | memset(a, 0, sizeof(*a)); | ||
121 | } | ||
122 | |||
123 | #ifdef __KERNEL__ | ||
124 | void nvgpu_alloc_print_stats(struct nvgpu_allocator *__a, | ||
125 | struct seq_file *s, int lock) | ||
126 | { | ||
127 | __a->ops->print_stats(__a, s, lock); | ||
128 | } | ||
129 | #endif | ||
130 | |||
131 | /* | ||
132 | * Handle the common init stuff for a nvgpu_allocator. | ||
133 | */ | ||
134 | int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, | ||
135 | const char *name, void *priv, bool dbg, | ||
136 | const struct nvgpu_allocator_ops *ops) | ||
137 | { | ||
138 | int err; | ||
139 | |||
140 | if (!ops) | ||
141 | return -EINVAL; | ||
142 | |||
143 | /* | ||
144 | * This is the bare minimum operations required for a sensible | ||
145 | * allocator. | ||
146 | */ | ||
147 | if (!ops->alloc || !ops->free || !ops->fini) | ||
148 | return -EINVAL; | ||
149 | |||
150 | err = nvgpu_mutex_init(&a->lock); | ||
151 | if (err) | ||
152 | return err; | ||
153 | |||
154 | a->g = g; | ||
155 | a->ops = ops; | ||
156 | a->priv = priv; | ||
157 | a->debug = dbg; | ||
158 | |||
159 | strlcpy(a->name, name, sizeof(a->name)); | ||
160 | |||
161 | return 0; | ||
162 | } | ||