aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/lib/bitext.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/lib/bitext.c')
-rw-r--r--arch/sparc/lib/bitext.c132
1 files changed, 132 insertions, 0 deletions
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
new file mode 100644
index 000000000000..94b05e8c906c
--- /dev/null
+++ b/arch/sparc/lib/bitext.c
@@ -0,0 +1,132 @@
1/*
2 * bitext.c: kernel little helper (of bit shuffling variety).
3 *
4 * Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
5 *
6 * The algorithm to search a zero bit string is geared towards its application.
7 * We expect a couple of fixed sizes of requests, so a rotating counter, reset
8 * by align size, should provide fast enough search while maintaining low
9 * fragmentation.
10 */
11
12#include <linux/smp_lock.h>
13#include <linux/bitops.h>
14
15#include <asm/bitext.h>
16
17/**
18 * bit_map_string_get - find and set a bit string in bit map.
19 * @t: the bit map.
20 * @len: requested string length
21 * @align: requested alignment
22 *
23 * Returns offset in the map or -1 if out of space.
24 *
25 * Not safe to call from an interrupt (uses spin_lock).
26 */
27int bit_map_string_get(struct bit_map *t, int len, int align)
28{
29 int offset, count; /* siamese twins */
30 int off_new;
31 int align1;
32 int i, color;
33
34 if (t->num_colors) {
35 /* align is overloaded to be the page color */
36 color = align;
37 align = t->num_colors;
38 } else {
39 color = 0;
40 if (align == 0)
41 align = 1;
42 }
43 align1 = align - 1;
44 if ((align & align1) != 0)
45 BUG();
46 if (align < 0 || align >= t->size)
47 BUG();
48 if (len <= 0 || len > t->size)
49 BUG();
50 color &= align1;
51
52 spin_lock(&t->lock);
53 if (len < t->last_size)
54 offset = t->first_free;
55 else
56 offset = t->last_off & ~align1;
57 count = 0;
58 for (;;) {
59 off_new = find_next_zero_bit(t->map, t->size, offset);
60 off_new = ((off_new + align1) & ~align1) + color;
61 count += off_new - offset;
62 offset = off_new;
63 if (offset >= t->size)
64 offset = 0;
65 if (count + len > t->size) {
66 spin_unlock(&t->lock);
67/* P3 */ printk(KERN_ERR
68 "bitmap out: size %d used %d off %d len %d align %d count %d\n",
69 t->size, t->used, offset, len, align, count);
70 return -1;
71 }
72
73 if (offset + len > t->size) {
74 count += t->size - offset;
75 offset = 0;
76 continue;
77 }
78
79 i = 0;
80 while (test_bit(offset + i, t->map) == 0) {
81 i++;
82 if (i == len) {
83 for (i = 0; i < len; i++)
84 __set_bit(offset + i, t->map);
85 if (offset == t->first_free)
86 t->first_free = find_next_zero_bit
87 (t->map, t->size,
88 t->first_free + len);
89 if ((t->last_off = offset + len) >= t->size)
90 t->last_off = 0;
91 t->used += len;
92 t->last_size = len;
93 spin_unlock(&t->lock);
94 return offset;
95 }
96 }
97 count += i + 1;
98 if ((offset += i + 1) >= t->size)
99 offset = 0;
100 }
101}
102
103void bit_map_clear(struct bit_map *t, int offset, int len)
104{
105 int i;
106
107 if (t->used < len)
108 BUG(); /* Much too late to do any good, but alas... */
109 spin_lock(&t->lock);
110 for (i = 0; i < len; i++) {
111 if (test_bit(offset + i, t->map) == 0)
112 BUG();
113 __clear_bit(offset + i, t->map);
114 }
115 if (offset < t->first_free)
116 t->first_free = offset;
117 t->used -= len;
118 spin_unlock(&t->lock);
119}
120
121void bit_map_init(struct bit_map *t, unsigned long *map, int size)
122{
123
124 if ((size & 07) != 0)
125 BUG();
126 memset(map, 0, size>>3);
127
128 memset(t, 0, sizeof *t);
129 spin_lock_init(&t->lock);
130 t->map = map;
131 t->size = size;
132}