diff options
Diffstat (limited to 'net/tipc/ref.c')
-rw-r--r-- | net/tipc/ref.c | 211 |
1 files changed, 155 insertions, 56 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index c38744c96ed1..89cbab24d08f 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/ref.c: TIPC object registry code | 2 | * net/tipc/ref.c: TIPC object registry code |
3 | * | 3 | * |
4 | * Copyright (c) 1991-2006, Ericsson AB | 4 | * Copyright (c) 1991-2006, Ericsson AB |
5 | * Copyright (c) 2004-2005, Wind River Systems | 5 | * Copyright (c) 2004-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -36,32 +36,60 @@ | |||
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "ref.h" | 38 | #include "ref.h" |
39 | #include "port.h" | 39 | |
40 | #include "subscr.h" | 40 | /** |
41 | #include "name_distr.h" | 41 | * struct reference - TIPC object reference entry |
42 | #include "name_table.h" | 42 | * @object: pointer to object associated with reference entry |
43 | #include "config.h" | 43 | * @lock: spinlock controlling access to object |
44 | #include "discover.h" | 44 | * @ref: reference value for object (combines instance & array index info) |
45 | #include "bearer.h" | 45 | */ |
46 | #include "node.h" | 46 | |
47 | #include "bcast.h" | 47 | struct reference { |
48 | void *object; | ||
49 | spinlock_t lock; | ||
50 | u32 ref; | ||
51 | }; | ||
52 | |||
53 | /** | ||
54 | * struct tipc_ref_table - table of TIPC object reference entries | ||
55 | * @entries: pointer to array of reference entries | ||
56 | * @capacity: array index of first unusable entry | ||
57 | * @init_point: array index of first uninitialized entry | ||
58 | * @first_free: array index of first unused object reference entry | ||
59 | * @last_free: array index of last unused object reference entry | ||
60 | * @index_mask: bitmask for array index portion of reference values | ||
61 | * @start_mask: initial value for instance value portion of reference values | ||
62 | */ | ||
63 | |||
64 | struct ref_table { | ||
65 | struct reference *entries; | ||
66 | u32 capacity; | ||
67 | u32 init_point; | ||
68 | u32 first_free; | ||
69 | u32 last_free; | ||
70 | u32 index_mask; | ||
71 | u32 start_mask; | ||
72 | }; | ||
48 | 73 | ||
49 | /* | 74 | /* |
50 | * Object reference table consists of 2**N entries. | 75 | * Object reference table consists of 2**N entries. |
51 | * | 76 | * |
52 | * A used entry has object ptr != 0, reference == XXXX|own index | 77 | * State Object ptr Reference |
53 | * (XXXX changes each time entry is acquired) | 78 | * ----- ---------- --------- |
54 | * A free entry has object ptr == 0, reference == YYYY|next free index | 79 | * In use non-NULL XXXX|own index |
55 | * (YYYY is one more than last used XXXX) | 80 | * (XXXX changes each time entry is acquired) |
81 | * Free NULL YYYY|next free index | ||
82 | * (YYYY is one more than last used XXXX) | ||
83 | * Uninitialized NULL 0 | ||
56 | * | 84 | * |
57 | * Free list is initially chained from entry (2**N)-1 to entry 1. | 85 | * Entry 0 is not used; this allows index 0 to denote the end of the free list. |
58 | * Entry 0 is not used to allow index 0 to indicate the end of the free list. | ||
59 | * | 86 | * |
60 | * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0 | 87 | * Note that a reference value of 0 does not necessarily indicate that an |
61 | * because entry 0's reference field has the form XXXX|1--1. | 88 | * entry is uninitialized, since the last entry in the free list could also |
89 | * have a reference value of 0 (although this is unlikely). | ||
62 | */ | 90 | */ |
63 | 91 | ||
64 | struct ref_table tipc_ref_table = { NULL }; | 92 | static struct ref_table tipc_ref_table = { NULL }; |
65 | 93 | ||
66 | static DEFINE_RWLOCK(ref_table_lock); | 94 | static DEFINE_RWLOCK(ref_table_lock); |
67 | 95 | ||
@@ -72,29 +100,29 @@ static DEFINE_RWLOCK(ref_table_lock); | |||
72 | int tipc_ref_table_init(u32 requested_size, u32 start) | 100 | int tipc_ref_table_init(u32 requested_size, u32 start) |
73 | { | 101 | { |
74 | struct reference *table; | 102 | struct reference *table; |
75 | u32 sz = 1 << 4; | 103 | u32 actual_size; |
76 | u32 index_mask; | ||
77 | int i; | ||
78 | 104 | ||
79 | while (sz < requested_size) { | 105 | /* account for unused entry, then round up size to a power of 2 */ |
80 | sz <<= 1; | 106 | |
81 | } | 107 | requested_size++; |
82 | table = vmalloc(sz * sizeof(*table)); | 108 | for (actual_size = 16; actual_size < requested_size; actual_size <<= 1) |
109 | /* do nothing */ ; | ||
110 | |||
111 | /* allocate table & mark all entries as uninitialized */ | ||
112 | |||
113 | table = __vmalloc(actual_size * sizeof(struct reference), | ||
114 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
83 | if (table == NULL) | 115 | if (table == NULL) |
84 | return -ENOMEM; | 116 | return -ENOMEM; |
85 | 117 | ||
86 | write_lock_bh(&ref_table_lock); | ||
87 | index_mask = sz - 1; | ||
88 | for (i = sz - 1; i >= 0; i--) { | ||
89 | table[i].object = NULL; | ||
90 | spin_lock_init(&table[i].lock); | ||
91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; | ||
92 | } | ||
93 | tipc_ref_table.entries = table; | 118 | tipc_ref_table.entries = table; |
94 | tipc_ref_table.index_mask = index_mask; | 119 | tipc_ref_table.capacity = requested_size; |
95 | tipc_ref_table.first_free = sz - 1; | 120 | tipc_ref_table.init_point = 1; |
96 | tipc_ref_table.last_free = 1; | 121 | tipc_ref_table.first_free = 0; |
97 | write_unlock_bh(&ref_table_lock); | 122 | tipc_ref_table.last_free = 0; |
123 | tipc_ref_table.index_mask = actual_size - 1; | ||
124 | tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; | ||
125 | |||
98 | return TIPC_OK; | 126 | return TIPC_OK; |
99 | } | 127 | } |
100 | 128 | ||
@@ -125,7 +153,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
125 | u32 index; | 153 | u32 index; |
126 | u32 index_mask; | 154 | u32 index_mask; |
127 | u32 next_plus_upper; | 155 | u32 next_plus_upper; |
128 | u32 reference = 0; | 156 | u32 ref; |
129 | 157 | ||
130 | if (!object) { | 158 | if (!object) { |
131 | err("Attempt to acquire reference to non-existent object\n"); | 159 | err("Attempt to acquire reference to non-existent object\n"); |
@@ -136,6 +164,8 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
136 | return 0; | 164 | return 0; |
137 | } | 165 | } |
138 | 166 | ||
167 | /* take a free entry, if available; otherwise initialize a new entry */ | ||
168 | |||
139 | write_lock_bh(&ref_table_lock); | 169 | write_lock_bh(&ref_table_lock); |
140 | if (tipc_ref_table.first_free) { | 170 | if (tipc_ref_table.first_free) { |
141 | index = tipc_ref_table.first_free; | 171 | index = tipc_ref_table.first_free; |
@@ -143,17 +173,29 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
143 | index_mask = tipc_ref_table.index_mask; | 173 | index_mask = tipc_ref_table.index_mask; |
144 | /* take lock in case a previous user of entry still holds it */ | 174 | /* take lock in case a previous user of entry still holds it */ |
145 | spin_lock_bh(&entry->lock); | 175 | spin_lock_bh(&entry->lock); |
146 | next_plus_upper = entry->data.next_plus_upper; | 176 | next_plus_upper = entry->ref; |
147 | tipc_ref_table.first_free = next_plus_upper & index_mask; | 177 | tipc_ref_table.first_free = next_plus_upper & index_mask; |
148 | reference = (next_plus_upper & ~index_mask) + index; | 178 | ref = (next_plus_upper & ~index_mask) + index; |
149 | entry->data.reference = reference; | 179 | entry->ref = ref; |
150 | entry->object = object; | 180 | entry->object = object; |
151 | if (lock != NULL) | ||
152 | *lock = &entry->lock; | ||
153 | spin_unlock_bh(&entry->lock); | 181 | spin_unlock_bh(&entry->lock); |
182 | *lock = &entry->lock; | ||
183 | } | ||
184 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { | ||
185 | index = tipc_ref_table.init_point++; | ||
186 | entry = &(tipc_ref_table.entries[index]); | ||
187 | spin_lock_init(&entry->lock); | ||
188 | ref = tipc_ref_table.start_mask + index; | ||
189 | entry->ref = ref; | ||
190 | entry->object = object; | ||
191 | *lock = &entry->lock; | ||
192 | } | ||
193 | else { | ||
194 | ref = 0; | ||
154 | } | 195 | } |
155 | write_unlock_bh(&ref_table_lock); | 196 | write_unlock_bh(&ref_table_lock); |
156 | return reference; | 197 | |
198 | return ref; | ||
157 | } | 199 | } |
158 | 200 | ||
159 | /** | 201 | /** |
@@ -169,42 +211,99 @@ void tipc_ref_discard(u32 ref) | |||
169 | u32 index; | 211 | u32 index; |
170 | u32 index_mask; | 212 | u32 index_mask; |
171 | 213 | ||
172 | if (!ref) { | ||
173 | err("Attempt to discard reference 0\n"); | ||
174 | return; | ||
175 | } | ||
176 | if (!tipc_ref_table.entries) { | 214 | if (!tipc_ref_table.entries) { |
177 | err("Reference table not found during discard attempt\n"); | 215 | err("Reference table not found during discard attempt\n"); |
178 | return; | 216 | return; |
179 | } | 217 | } |
180 | 218 | ||
181 | write_lock_bh(&ref_table_lock); | ||
182 | index_mask = tipc_ref_table.index_mask; | 219 | index_mask = tipc_ref_table.index_mask; |
183 | index = ref & index_mask; | 220 | index = ref & index_mask; |
184 | entry = &(tipc_ref_table.entries[index]); | 221 | entry = &(tipc_ref_table.entries[index]); |
185 | 222 | ||
223 | write_lock_bh(&ref_table_lock); | ||
224 | |||
186 | if (!entry->object) { | 225 | if (!entry->object) { |
187 | err("Attempt to discard reference to non-existent object\n"); | 226 | err("Attempt to discard reference to non-existent object\n"); |
188 | goto exit; | 227 | goto exit; |
189 | } | 228 | } |
190 | if (entry->data.reference != ref) { | 229 | if (entry->ref != ref) { |
191 | err("Attempt to discard non-existent reference\n"); | 230 | err("Attempt to discard non-existent reference\n"); |
192 | goto exit; | 231 | goto exit; |
193 | } | 232 | } |
194 | 233 | ||
195 | /* mark entry as unused */ | 234 | /* |
235 | * mark entry as unused; increment instance part of entry's reference | ||
236 | * to invalidate any subsequent references | ||
237 | */ | ||
238 | |||
196 | entry->object = NULL; | 239 | entry->object = NULL; |
240 | entry->ref = (ref & ~index_mask) + (index_mask + 1); | ||
241 | |||
242 | /* append entry to free entry list */ | ||
243 | |||
197 | if (tipc_ref_table.first_free == 0) | 244 | if (tipc_ref_table.first_free == 0) |
198 | tipc_ref_table.first_free = index; | 245 | tipc_ref_table.first_free = index; |
199 | else | 246 | else |
200 | /* next_plus_upper is always XXXX|0--0 for last free entry */ | 247 | tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; |
201 | tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper | ||
202 | |= index; | ||
203 | tipc_ref_table.last_free = index; | 248 | tipc_ref_table.last_free = index; |
204 | 249 | ||
205 | /* increment upper bits of entry to invalidate subsequent references */ | ||
206 | entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); | ||
207 | exit: | 250 | exit: |
208 | write_unlock_bh(&ref_table_lock); | 251 | write_unlock_bh(&ref_table_lock); |
209 | } | 252 | } |
210 | 253 | ||
254 | /** | ||
255 | * tipc_ref_lock - lock referenced object and return pointer to it | ||
256 | */ | ||
257 | |||
258 | void *tipc_ref_lock(u32 ref) | ||
259 | { | ||
260 | if (likely(tipc_ref_table.entries)) { | ||
261 | struct reference *entry; | ||
262 | |||
263 | entry = &tipc_ref_table.entries[ref & | ||
264 | tipc_ref_table.index_mask]; | ||
265 | if (likely(entry->ref != 0)) { | ||
266 | spin_lock_bh(&entry->lock); | ||
267 | if (likely((entry->ref == ref) && (entry->object))) | ||
268 | return entry->object; | ||
269 | spin_unlock_bh(&entry->lock); | ||
270 | } | ||
271 | } | ||
272 | return NULL; | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * tipc_ref_unlock - unlock referenced object | ||
277 | */ | ||
278 | |||
279 | void tipc_ref_unlock(u32 ref) | ||
280 | { | ||
281 | if (likely(tipc_ref_table.entries)) { | ||
282 | struct reference *entry; | ||
283 | |||
284 | entry = &tipc_ref_table.entries[ref & | ||
285 | tipc_ref_table.index_mask]; | ||
286 | if (likely((entry->ref == ref) && (entry->object))) | ||
287 | spin_unlock_bh(&entry->lock); | ||
288 | else | ||
289 | err("Attempt to unlock non-existent reference\n"); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * tipc_ref_deref - return pointer referenced object (without locking it) | ||
295 | */ | ||
296 | |||
297 | void *tipc_ref_deref(u32 ref) | ||
298 | { | ||
299 | if (likely(tipc_ref_table.entries)) { | ||
300 | struct reference *entry; | ||
301 | |||
302 | entry = &tipc_ref_table.entries[ref & | ||
303 | tipc_ref_table.index_mask]; | ||
304 | if (likely(entry->ref == ref)) | ||
305 | return entry->object; | ||
306 | } | ||
307 | return NULL; | ||
308 | } | ||
309 | |||