aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/ref.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/ref.c')
-rw-r--r--net/tipc/ref.c13
1 files changed, 0 insertions, 13 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 9e37b7812c3..5cada0e38e0 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
43 * @lock: spinlock controlling access to object 43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info) 44 * @ref: reference value for object (combines instance & array index info)
45 */ 45 */
46
47struct reference { 46struct reference {
48 void *object; 47 void *object;
49 spinlock_t lock; 48 spinlock_t lock;
@@ -60,7 +59,6 @@ struct reference {
60 * @index_mask: bitmask for array index portion of reference values 59 * @index_mask: bitmask for array index portion of reference values
61 * @start_mask: initial value for instance value portion of reference values 60 * @start_mask: initial value for instance value portion of reference values
62 */ 61 */
63
64struct ref_table { 62struct ref_table {
65 struct reference *entries; 63 struct reference *entries;
66 u32 capacity; 64 u32 capacity;
@@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock);
96/** 94/**
97 * tipc_ref_table_init - create reference table for objects 95 * tipc_ref_table_init - create reference table for objects
98 */ 96 */
99
100int tipc_ref_table_init(u32 requested_size, u32 start) 97int tipc_ref_table_init(u32 requested_size, u32 start)
101{ 98{
102 struct reference *table; 99 struct reference *table;
@@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
109 /* do nothing */ ; 106 /* do nothing */ ;
110 107
111 /* allocate table & mark all entries as uninitialized */ 108 /* allocate table & mark all entries as uninitialized */
112
113 table = vzalloc(actual_size * sizeof(struct reference)); 109 table = vzalloc(actual_size * sizeof(struct reference));
114 if (table == NULL) 110 if (table == NULL)
115 return -ENOMEM; 111 return -ENOMEM;
@@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
128/** 124/**
129 * tipc_ref_table_stop - destroy reference table for objects 125 * tipc_ref_table_stop - destroy reference table for objects
130 */ 126 */
131
132void tipc_ref_table_stop(void) 127void tipc_ref_table_stop(void)
133{ 128{
134 if (!tipc_ref_table.entries) 129 if (!tipc_ref_table.entries)
@@ -149,7 +144,6 @@ void tipc_ref_table_stop(void)
149 * register a partially initialized object, without running the risk that 144 * register a partially initialized object, without running the risk that
150 * the object will be accessed before initialization is complete. 145 * the object will be accessed before initialization is complete.
151 */ 146 */
152
153u32 tipc_ref_acquire(void *object, spinlock_t **lock) 147u32 tipc_ref_acquire(void *object, spinlock_t **lock)
154{ 148{
155 u32 index; 149 u32 index;
@@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
168 } 162 }
169 163
170 /* take a free entry, if available; otherwise initialize a new entry */ 164 /* take a free entry, if available; otherwise initialize a new entry */
171
172 write_lock_bh(&ref_table_lock); 165 write_lock_bh(&ref_table_lock);
173 if (tipc_ref_table.first_free) { 166 if (tipc_ref_table.first_free) {
174 index = tipc_ref_table.first_free; 167 index = tipc_ref_table.first_free;
@@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
211 * Disallow future references to an object and free up the entry for re-use. 204 * Disallow future references to an object and free up the entry for re-use.
212 * Note: The entry's spin_lock may still be busy after discard 205 * Note: The entry's spin_lock may still be busy after discard
213 */ 206 */
214
215void tipc_ref_discard(u32 ref) 207void tipc_ref_discard(u32 ref)
216{ 208{
217 struct reference *entry; 209 struct reference *entry;
@@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref)
242 * mark entry as unused; increment instance part of entry's reference 234 * mark entry as unused; increment instance part of entry's reference
243 * to invalidate any subsequent references 235 * to invalidate any subsequent references
244 */ 236 */
245
246 entry->object = NULL; 237 entry->object = NULL;
247 entry->ref = (ref & ~index_mask) + (index_mask + 1); 238 entry->ref = (ref & ~index_mask) + (index_mask + 1);
248 239
249 /* append entry to free entry list */ 240 /* append entry to free entry list */
250
251 if (tipc_ref_table.first_free == 0) 241 if (tipc_ref_table.first_free == 0)
252 tipc_ref_table.first_free = index; 242 tipc_ref_table.first_free = index;
253 else 243 else
@@ -261,7 +251,6 @@ exit:
261/** 251/**
262 * tipc_ref_lock - lock referenced object and return pointer to it 252 * tipc_ref_lock - lock referenced object and return pointer to it
263 */ 253 */
264
265void *tipc_ref_lock(u32 ref) 254void *tipc_ref_lock(u32 ref)
266{ 255{
267 if (likely(tipc_ref_table.entries)) { 256 if (likely(tipc_ref_table.entries)) {
@@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref)
283/** 272/**
284 * tipc_ref_deref - return pointer referenced object (without locking it) 273 * tipc_ref_deref - return pointer referenced object (without locking it)
285 */ 274 */
286
287void *tipc_ref_deref(u32 ref) 275void *tipc_ref_deref(u32 ref)
288{ 276{
289 if (likely(tipc_ref_table.entries)) { 277 if (likely(tipc_ref_table.entries)) {
@@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref)
296 } 284 }
297 return NULL; 285 return NULL;
298} 286}
299