summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
index c749c729..a0b9013f 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * gk20a allocator 2 * gk20a allocator
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -29,40 +29,45 @@
29 29
30u64 nvgpu_alloc_length(struct nvgpu_allocator *a) 30u64 nvgpu_alloc_length(struct nvgpu_allocator *a)
31{ 31{
32 if (a->ops->length) 32 if (a->ops->length) {
33 return a->ops->length(a); 33 return a->ops->length(a);
34 }
34 35
35 return 0; 36 return 0;
36} 37}
37 38
38u64 nvgpu_alloc_base(struct nvgpu_allocator *a) 39u64 nvgpu_alloc_base(struct nvgpu_allocator *a)
39{ 40{
40 if (a->ops->base) 41 if (a->ops->base) {
41 return a->ops->base(a); 42 return a->ops->base(a);
43 }
42 44
43 return 0; 45 return 0;
44} 46}
45 47
46u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) 48u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a)
47{ 49{
48 if (!a->ops || !a->ops->inited) 50 if (!a->ops || !a->ops->inited) {
49 return 0; 51 return 0;
52 }
50 53
51 return a->ops->inited(a); 54 return a->ops->inited(a);
52} 55}
53 56
54u64 nvgpu_alloc_end(struct nvgpu_allocator *a) 57u64 nvgpu_alloc_end(struct nvgpu_allocator *a)
55{ 58{
56 if (a->ops->end) 59 if (a->ops->end) {
57 return a->ops->end(a); 60 return a->ops->end(a);
61 }
58 62
59 return 0; 63 return 0;
60} 64}
61 65
62u64 nvgpu_alloc_space(struct nvgpu_allocator *a) 66u64 nvgpu_alloc_space(struct nvgpu_allocator *a)
63{ 67{
64 if (a->ops->space) 68 if (a->ops->space) {
65 return a->ops->space(a); 69 return a->ops->space(a);
70 }
66 71
67 return 0; 72 return 0;
68} 73}
@@ -80,8 +85,9 @@ void nvgpu_free(struct nvgpu_allocator *a, u64 addr)
80u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len, 85u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len,
81 u32 page_size) 86 u32 page_size)
82{ 87{
83 if (a->ops->alloc_fixed) 88 if (a->ops->alloc_fixed) {
84 return a->ops->alloc_fixed(a, base, len, page_size); 89 return a->ops->alloc_fixed(a, base, len, page_size);
90 }
85 91
86 return 0; 92 return 0;
87} 93}
@@ -93,15 +99,17 @@ void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len)
93 * nothing. The alternative would be to fall back on the regular 99 * nothing. The alternative would be to fall back on the regular
94 * free but that may be harmful in unexpected ways. 100 * free but that may be harmful in unexpected ways.
95 */ 101 */
96 if (a->ops->free_fixed) 102 if (a->ops->free_fixed) {
97 a->ops->free_fixed(a, base, len); 103 a->ops->free_fixed(a, base, len);
104 }
98} 105}
99 106
100int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, 107int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
101 struct nvgpu_alloc_carveout *co) 108 struct nvgpu_alloc_carveout *co)
102{ 109{
103 if (a->ops->reserve_carveout) 110 if (a->ops->reserve_carveout) {
104 return a->ops->reserve_carveout(a, co); 111 return a->ops->reserve_carveout(a, co);
112 }
105 113
106 return -ENODEV; 114 return -ENODEV;
107} 115}
@@ -109,8 +117,9 @@ int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
109void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, 117void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a,
110 struct nvgpu_alloc_carveout *co) 118 struct nvgpu_alloc_carveout *co)
111{ 119{
112 if (a->ops->release_carveout) 120 if (a->ops->release_carveout) {
113 a->ops->release_carveout(a, co); 121 a->ops->release_carveout(a, co);
122 }
114} 123}
115 124
116void nvgpu_alloc_destroy(struct nvgpu_allocator *a) 125void nvgpu_alloc_destroy(struct nvgpu_allocator *a)
@@ -137,19 +146,22 @@ int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
137{ 146{
138 int err; 147 int err;
139 148
140 if (!ops) 149 if (!ops) {
141 return -EINVAL; 150 return -EINVAL;
151 }
142 152
143 /* 153 /*
144 * This is the bare minimum operations required for a sensible 154 * This is the bare minimum operations required for a sensible
145 * allocator. 155 * allocator.
146 */ 156 */
147 if (!ops->alloc || !ops->free || !ops->fini) 157 if (!ops->alloc || !ops->free || !ops->fini) {
148 return -EINVAL; 158 return -EINVAL;
159 }
149 160
150 err = nvgpu_mutex_init(&a->lock); 161 err = nvgpu_mutex_init(&a->lock);
151 if (err) 162 if (err) {
152 return err; 163 return err;
164 }
153 165
154 a->g = g; 166 a->g = g;
155 a->ops = ops; 167 a->ops = ops;