diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/posix/bitmap.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/posix/bitmap.c | 216 |
1 files changed, 0 insertions, 216 deletions
diff --git a/drivers/gpu/nvgpu/common/posix/bitmap.c b/drivers/gpu/nvgpu/common/posix/bitmap.c deleted file mode 100644 index f25f6e64..00000000 --- a/drivers/gpu/nvgpu/common/posix/bitmap.c +++ /dev/null | |||
@@ -1,216 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <stdio.h> | ||
24 | #include <stdlib.h> | ||
25 | |||
26 | #include <nvgpu/posix/bitops.h> | ||
27 | #include <nvgpu/posix/atomic.h> | ||
28 | |||
29 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
30 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | ||
31 | |||
32 | unsigned long __nvgpu_posix_ffs(unsigned long word) | ||
33 | { | ||
34 | return (__builtin_ffsl(word) - 1) & | ||
35 | ((sizeof(unsigned long) * 8UL) - 1UL); | ||
36 | } | ||
37 | |||
38 | unsigned long __nvgpu_posix_fls(unsigned long word) | ||
39 | { | ||
40 | return ((sizeof(unsigned long) * 8UL) - 1UL) - __builtin_clzl(word); | ||
41 | } | ||
42 | |||
43 | static unsigned long __find_next_bit(const unsigned long *addr, | ||
44 | unsigned long n, | ||
45 | unsigned long start, | ||
46 | bool invert) | ||
47 | { | ||
48 | unsigned long idx; | ||
49 | unsigned long w; | ||
50 | unsigned long start_mask; | ||
51 | |||
52 | /* | ||
53 | * We make a mask we can XOR into the word so that we can invert the | ||
54 | * word without requiring a branch. I.e instead of doing: | ||
55 | * | ||
56 | * w = invert ? ~addr[idx] : addr[idx] | ||
57 | * | ||
58 | * We can do: | ||
59 | * | ||
60 | * w = addr[idx] ^= invert_mask | ||
61 | * | ||
62 | * This saves us a branch every iteration through the loop. Now we can | ||
63 | * always just look for 1s. | ||
64 | */ | ||
65 | unsigned long invert_mask = invert ? ~0UL : 0UL; | ||
66 | |||
67 | if (start >= n) | ||
68 | return n; | ||
69 | |||
70 | start_mask = ~0UL << (start & (BITS_PER_LONG - 1)); | ||
71 | |||
72 | idx = start / BITS_PER_LONG; | ||
73 | w = (addr[idx] ^ invert_mask) & start_mask; | ||
74 | |||
75 | start = round_up(start, BITS_PER_LONG); | ||
76 | |||
77 | /* | ||
78 | * Find the first non-zero word taking into account start and | ||
79 | * invert. | ||
80 | */ | ||
81 | while (!w) { | ||
82 | idx++; | ||
83 | start += BITS_PER_LONG; | ||
84 | |||
85 | w = addr[idx] ^ invert_mask; | ||
86 | } | ||
87 | |||
88 | return min(n, ffs(w) + idx * BITS_PER_LONG); | ||
89 | } | ||
90 | |||
91 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) | ||
92 | { | ||
93 | return __find_next_bit(addr, size, 0, false); | ||
94 | } | ||
95 | |||
96 | unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) | ||
97 | { | ||
98 | return __find_next_bit(addr, size, 0, true); | ||
99 | } | ||
100 | |||
101 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | ||
102 | unsigned long offset) | ||
103 | { | ||
104 | return __find_next_bit(addr, size, offset, false); | ||
105 | } | ||
106 | |||
107 | static unsigned long find_next_zero_bit(const unsigned long *addr, | ||
108 | unsigned long size, | ||
109 | unsigned long offset) | ||
110 | { | ||
111 | return __find_next_bit(addr, size, offset, true); | ||
112 | } | ||
113 | |||
114 | void bitmap_set(unsigned long *map, unsigned int start, int len) | ||
115 | { | ||
116 | unsigned int end = start + len; | ||
117 | |||
118 | /* | ||
119 | * Super slow naive implementation. But speed isn't what matters here. | ||
120 | */ | ||
121 | while (start < end) | ||
122 | set_bit(start++, map); | ||
123 | } | ||
124 | |||
125 | void bitmap_clear(unsigned long *map, unsigned int start, int len) | ||
126 | { | ||
127 | unsigned int end = start + len; | ||
128 | |||
129 | while (start < end) | ||
130 | clear_bit(start++, map); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * This is essentially a find-first-fit allocator: this searches a bitmap for | ||
135 | * the first space that is large enough to satisfy the requested size of bits. | ||
136 | * That means that this is not a vary smart allocator. But it is fast relative | ||
137 | * to an allocator that goes looking for an optimal location. | ||
138 | */ | ||
139 | unsigned long bitmap_find_next_zero_area_off(unsigned long *map, | ||
140 | unsigned long size, | ||
141 | unsigned long start, | ||
142 | unsigned int nr, | ||
143 | unsigned long align_mask, | ||
144 | unsigned long align_offset) | ||
145 | { | ||
146 | unsigned long offs; | ||
147 | |||
148 | while (start + nr <= size) { | ||
149 | start = find_next_zero_bit(map, size, start); | ||
150 | |||
151 | start = ALIGN_MASK(start + align_offset, align_mask) - | ||
152 | align_offset; | ||
153 | |||
154 | /* | ||
155 | * Not enough space left to satisfy the requested area. | ||
156 | */ | ||
157 | if ((start + nr) > size) | ||
158 | return size; | ||
159 | |||
160 | offs = find_next_bit(map, size, start); | ||
161 | |||
162 | if ((offs - start) >= nr) | ||
163 | return start; | ||
164 | |||
165 | start = offs + 1; | ||
166 | } | ||
167 | |||
168 | return size; | ||
169 | } | ||
170 | |||
171 | unsigned long bitmap_find_next_zero_area(unsigned long *map, | ||
172 | unsigned long size, | ||
173 | unsigned long start, | ||
174 | unsigned int nr, | ||
175 | unsigned long align_mask) | ||
176 | { | ||
177 | return bitmap_find_next_zero_area_off(map, size, start, nr, | ||
178 | align_mask, 0); | ||
179 | } | ||
180 | |||
181 | bool test_bit(int nr, const volatile unsigned long *addr) | ||
182 | { | ||
183 | return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
184 | } | ||
185 | |||
186 | bool test_and_set_bit(int nr, volatile unsigned long *addr) | ||
187 | { | ||
188 | unsigned long mask = BIT_MASK(nr); | ||
189 | volatile unsigned long *p = addr + BIT_WORD(nr); | ||
190 | |||
191 | return !!(__sync_fetch_and_or(p, mask) & mask); | ||
192 | } | ||
193 | |||
194 | bool test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
195 | { | ||
196 | unsigned long mask = BIT_MASK(nr); | ||
197 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
198 | |||
199 | return !!(__sync_fetch_and_and(p, ~mask) & mask); | ||
200 | } | ||
201 | |||
202 | void set_bit(int nr, volatile unsigned long *addr) | ||
203 | { | ||
204 | unsigned long mask = BIT_MASK(nr); | ||
205 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
206 | |||
207 | __atomic_or(p, mask); | ||
208 | } | ||
209 | |||
210 | void clear_bit(int nr, volatile unsigned long *addr) | ||
211 | { | ||
212 | unsigned long mask = BIT_MASK(nr); | ||
213 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
214 | |||
215 | __atomic_and(p, ~mask); | ||
216 | } | ||