aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/posix/bitmap.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
commitf347fde22f1297e4f022600d201780d5ead78114 (patch)
tree76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/posix/bitmap.c
parent8340d234d78a7d0f46c11a584de538148b78b7cb (diff)
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/posix/bitmap.c')
-rw-r--r--include/os/posix/bitmap.c227
1 files changed, 0 insertions, 227 deletions
diff --git a/include/os/posix/bitmap.c b/include/os/posix/bitmap.c
deleted file mode 100644
index 99ba62c..0000000
--- a/include/os/posix/bitmap.c
+++ /dev/null
@@ -1,227 +0,0 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <stdio.h>
24#include <stdlib.h>
25
26#include <nvgpu/posix/bitops.h>
27#include <nvgpu/posix/atomic.h>
28
29#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
30#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
31
32unsigned long __nvgpu_posix_ffs(unsigned long word)
33{
34 return (__builtin_ffsl(word) - 1) &
35 ((sizeof(unsigned long) * 8UL) - 1UL);
36}
37
38unsigned long __nvgpu_posix_fls(unsigned long word)
39{
40 unsigned long ret;
41
42 if (word == 0UL) {
43 /* __builtin_clzl() below is undefined for 0, so we have
44 * to handle that as a special case.
45 */
46 ret = 0UL;
47 } else {
48 ret = (sizeof(unsigned long) * 8UL) - __builtin_clzl(word);
49 }
50
51 return ret;
52}
53
54static unsigned long __find_next_bit(const unsigned long *addr,
55 unsigned long n,
56 unsigned long start,
57 bool invert)
58{
59 unsigned long idx;
60 unsigned long w;
61 unsigned long start_mask;
62
63 /*
64 * We make a mask we can XOR into the word so that we can invert the
65 * word without requiring a branch. I.e instead of doing:
66 *
67 * w = invert ? ~addr[idx] : addr[idx]
68 *
69 * We can do:
70 *
71 * w = addr[idx] ^= invert_mask
72 *
73 * This saves us a branch every iteration through the loop. Now we can
74 * always just look for 1s.
75 */
76 unsigned long invert_mask = invert ? ~0UL : 0UL;
77
78 if (start >= n)
79 return n;
80
81 start_mask = ~0UL << (start & (BITS_PER_LONG - 1));
82
83 idx = start / BITS_PER_LONG;
84 w = (addr[idx] ^ invert_mask) & start_mask;
85
86 start = round_up(start, BITS_PER_LONG);
87
88 /*
89 * Find the first non-zero word taking into account start and
90 * invert.
91 */
92 while (!w) {
93 idx++;
94 start += BITS_PER_LONG;
95
96 w = addr[idx] ^ invert_mask;
97 }
98
99 return min(n, ffs(w) + idx * BITS_PER_LONG);
100}
101
102unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
103{
104 return __find_next_bit(addr, size, 0, false);
105}
106
107unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
108{
109 return __find_next_bit(addr, size, 0, true);
110}
111
112unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
113 unsigned long offset)
114{
115 return __find_next_bit(addr, size, offset, false);
116}
117
118static unsigned long find_next_zero_bit(const unsigned long *addr,
119 unsigned long size,
120 unsigned long offset)
121{
122 return __find_next_bit(addr, size, offset, true);
123}
124
125void bitmap_set(unsigned long *map, unsigned int start, int len)
126{
127 unsigned int end = start + len;
128
129 /*
130 * Super slow naive implementation. But speed isn't what matters here.
131 */
132 while (start < end)
133 set_bit(start++, map);
134}
135
136void bitmap_clear(unsigned long *map, unsigned int start, int len)
137{
138 unsigned int end = start + len;
139
140 while (start < end)
141 clear_bit(start++, map);
142}
143
144/*
145 * This is essentially a find-first-fit allocator: this searches a bitmap for
146 * the first space that is large enough to satisfy the requested size of bits.
147 * That means that this is not a vary smart allocator. But it is fast relative
148 * to an allocator that goes looking for an optimal location.
149 */
150unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
151 unsigned long size,
152 unsigned long start,
153 unsigned int nr,
154 unsigned long align_mask,
155 unsigned long align_offset)
156{
157 unsigned long offs;
158
159 while (start + nr <= size) {
160 start = find_next_zero_bit(map, size, start);
161
162 start = ALIGN_MASK(start + align_offset, align_mask) -
163 align_offset;
164
165 /*
166 * Not enough space left to satisfy the requested area.
167 */
168 if ((start + nr) > size)
169 return size;
170
171 offs = find_next_bit(map, size, start);
172
173 if ((offs - start) >= nr)
174 return start;
175
176 start = offs + 1;
177 }
178
179 return size;
180}
181
182unsigned long bitmap_find_next_zero_area(unsigned long *map,
183 unsigned long size,
184 unsigned long start,
185 unsigned int nr,
186 unsigned long align_mask)
187{
188 return bitmap_find_next_zero_area_off(map, size, start, nr,
189 align_mask, 0);
190}
191
192bool test_bit(int nr, const volatile unsigned long *addr)
193{
194 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
195}
196
197bool test_and_set_bit(int nr, volatile unsigned long *addr)
198{
199 unsigned long mask = BIT_MASK(nr);
200 volatile unsigned long *p = addr + BIT_WORD(nr);
201
202 return !!(__sync_fetch_and_or(p, mask) & mask);
203}
204
205bool test_and_clear_bit(int nr, volatile unsigned long *addr)
206{
207 unsigned long mask = BIT_MASK(nr);
208 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
209
210 return !!(__sync_fetch_and_and(p, ~mask) & mask);
211}
212
213void set_bit(int nr, volatile unsigned long *addr)
214{
215 unsigned long mask = BIT_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
217
218 __atomic_or(p, mask);
219}
220
221void clear_bit(int nr, volatile unsigned long *addr)
222{
223 unsigned long mask = BIT_MASK(nr);
224 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
225
226 __atomic_and(p, ~mask);
227}