aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/ref.c
diff options
context:
space:
mode:
authorAllan Stephens <allan.stephens@windriver.com>2008-04-16 21:21:47 -0400
committerDavid S. Miller <davem@davemloft.net>2008-04-16 21:21:47 -0400
commit0089509826b4997c37f08dfbdfb96ee952096cc9 (patch)
treeddb4d65c6fe8665d5f2b6dcf256ccfa424a0b948 /net/tipc/ref.c
parent4784b7c348779e957c82ba638ba2ada5ad8502d8 (diff)
[TIPC]: Optimized initialization of TIPC reference table
This patch modifies TIPC's reference table code to delay initializing table entries until they are actually needed by applications. Signed-off-by: Allan Stephens <allan.stephens@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/ref.c')
-rw-r--r--net/tipc/ref.c123
1 files changed, 76 insertions, 47 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index d0b240e86ccd..1853cca66c68 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -50,7 +50,7 @@
50 * struct reference - TIPC object reference entry 50 * struct reference - TIPC object reference entry
51 * @object: pointer to object associated with reference entry 51 * @object: pointer to object associated with reference entry
52 * @lock: spinlock controlling access to object 52 * @lock: spinlock controlling access to object
53 * @data: reference value associated with object (or link to next unused entry) 53 * @data: reference value for object (combines instance & array index info)
54 */ 54 */
55 55
56struct reference { 56struct reference {
@@ -65,31 +65,40 @@ struct reference {
65/** 65/**
66 * struct tipc_ref_table - table of TIPC object reference entries 66 * struct tipc_ref_table - table of TIPC object reference entries
67 * @entries: pointer to array of reference entries 67 * @entries: pointer to array of reference entries
68 * @index_mask: bitmask for array index portion of reference values 68 * @capacity: array index of first unusable entry
69 * @init_point: array index of first uninitialized entry
69 * @first_free: array index of first unused object reference entry 70 * @first_free: array index of first unused object reference entry
70 * @last_free: array index of last unused object reference entry 71 * @last_free: array index of last unused object reference entry
72 * @index_mask: bitmask for array index portion of reference values
73 * @start_mask: initial value for instance value portion of reference values
71 */ 74 */
72 75
73struct ref_table { 76struct ref_table {
74 struct reference *entries; 77 struct reference *entries;
75 u32 index_mask; 78 u32 capacity;
79 u32 init_point;
76 u32 first_free; 80 u32 first_free;
77 u32 last_free; 81 u32 last_free;
82 u32 index_mask;
83 u32 start_mask;
78}; 84};
79 85
80/* 86/*
81 * Object reference table consists of 2**N entries. 87 * Object reference table consists of 2**N entries.
82 * 88 *
83 * A used entry has object ptr != 0, reference == XXXX|own index 89 * State Object ptr Reference
84 * (XXXX changes each time entry is acquired) 90 * ----- ---------- ---------
85 * A free entry has object ptr == 0, reference == YYYY|next free index 91 * In use non-NULL XXXX|own index
86 * (YYYY is one more than last used XXXX) 92 * (XXXX changes each time entry is acquired)
93 * Free NULL YYYY|next free index
94 * (YYYY is one more than last used XXXX)
95 * Uninitialized NULL 0
87 * 96 *
88 * Free list is initially chained from entry (2**N)-1 to entry 1. 97 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
89 * Entry 0 is not used to allow index 0 to indicate the end of the free list.
90 * 98 *
91 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0 99 * Note that a reference value of 0 does not necessarily indicate that an
92 * because entry 0's reference field has the form XXXX|1--1. 100 * entry is uninitialized, since the last entry in the free list could also
101 * have a reference value of 0 (although this is unlikely).
93 */ 102 */
94 103
95static struct ref_table tipc_ref_table = { NULL }; 104static struct ref_table tipc_ref_table = { NULL };
@@ -103,29 +112,29 @@ static DEFINE_RWLOCK(ref_table_lock);
103int tipc_ref_table_init(u32 requested_size, u32 start) 112int tipc_ref_table_init(u32 requested_size, u32 start)
104{ 113{
105 struct reference *table; 114 struct reference *table;
106 u32 sz = 1 << 4; 115 u32 actual_size;
107 u32 index_mask;
108 int i;
109 116
110 while (sz < requested_size) { 117 /* account for unused entry, then round up size to a power of 2 */
111 sz <<= 1; 118
112 } 119 requested_size++;
113 table = vmalloc(sz * sizeof(*table)); 120 for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
121 /* do nothing */ ;
122
123 /* allocate table & mark all entries as uninitialized */
124
125 table = __vmalloc(actual_size * sizeof(struct reference),
126 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
114 if (table == NULL) 127 if (table == NULL)
115 return -ENOMEM; 128 return -ENOMEM;
116 129
117 write_lock_bh(&ref_table_lock);
118 index_mask = sz - 1;
119 for (i = sz - 1; i >= 0; i--) {
120 table[i].object = NULL;
121 spin_lock_init(&table[i].lock);
122 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
123 }
124 tipc_ref_table.entries = table; 130 tipc_ref_table.entries = table;
125 tipc_ref_table.index_mask = index_mask; 131 tipc_ref_table.capacity = requested_size;
126 tipc_ref_table.first_free = sz - 1; 132 tipc_ref_table.init_point = 1;
127 tipc_ref_table.last_free = 1; 133 tipc_ref_table.first_free = 0;
128 write_unlock_bh(&ref_table_lock); 134 tipc_ref_table.last_free = 0;
135 tipc_ref_table.index_mask = actual_size - 1;
136 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
137
129 return TIPC_OK; 138 return TIPC_OK;
130} 139}
131 140
@@ -156,7 +165,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
156 u32 index; 165 u32 index;
157 u32 index_mask; 166 u32 index_mask;
158 u32 next_plus_upper; 167 u32 next_plus_upper;
159 u32 reference = 0; 168 u32 reference;
160 169
161 if (!object) { 170 if (!object) {
162 err("Attempt to acquire reference to non-existent object\n"); 171 err("Attempt to acquire reference to non-existent object\n");
@@ -167,6 +176,8 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
167 return 0; 176 return 0;
168 } 177 }
169 178
179 /* take a free entry, if available; otherwise initialize a new entry */
180
170 write_lock_bh(&ref_table_lock); 181 write_lock_bh(&ref_table_lock);
171 if (tipc_ref_table.first_free) { 182 if (tipc_ref_table.first_free) {
172 index = tipc_ref_table.first_free; 183 index = tipc_ref_table.first_free;
@@ -179,11 +190,23 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
179 reference = (next_plus_upper & ~index_mask) + index; 190 reference = (next_plus_upper & ~index_mask) + index;
180 entry->data.reference = reference; 191 entry->data.reference = reference;
181 entry->object = object; 192 entry->object = object;
182 if (lock != NULL)
183 *lock = &entry->lock;
184 spin_unlock_bh(&entry->lock); 193 spin_unlock_bh(&entry->lock);
194 *lock = &entry->lock;
195 }
196 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
197 index = tipc_ref_table.init_point++;
198 entry = &(tipc_ref_table.entries[index]);
199 spin_lock_init(&entry->lock);
200 reference = tipc_ref_table.start_mask + index;
201 entry->data.reference = reference;
202 entry->object = object;
203 *lock = &entry->lock;
204 }
205 else {
206 reference = 0;
185 } 207 }
186 write_unlock_bh(&ref_table_lock); 208 write_unlock_bh(&ref_table_lock);
209
187 return reference; 210 return reference;
188} 211}
189 212
@@ -200,20 +223,17 @@ void tipc_ref_discard(u32 ref)
200 u32 index; 223 u32 index;
201 u32 index_mask; 224 u32 index_mask;
202 225
203 if (!ref) {
204 err("Attempt to discard reference 0\n");
205 return;
206 }
207 if (!tipc_ref_table.entries) { 226 if (!tipc_ref_table.entries) {
208 err("Reference table not found during discard attempt\n"); 227 err("Reference table not found during discard attempt\n");
209 return; 228 return;
210 } 229 }
211 230
212 write_lock_bh(&ref_table_lock);
213 index_mask = tipc_ref_table.index_mask; 231 index_mask = tipc_ref_table.index_mask;
214 index = ref & index_mask; 232 index = ref & index_mask;
215 entry = &(tipc_ref_table.entries[index]); 233 entry = &(tipc_ref_table.entries[index]);
216 234
235 write_lock_bh(&ref_table_lock);
236
217 if (!entry->object) { 237 if (!entry->object) {
218 err("Attempt to discard reference to non-existent object\n"); 238 err("Attempt to discard reference to non-existent object\n");
219 goto exit; 239 goto exit;
@@ -223,18 +243,23 @@ void tipc_ref_discard(u32 ref)
223 goto exit; 243 goto exit;
224 } 244 }
225 245
226 /* mark entry as unused */ 246 /*
247 * mark entry as unused; increment upper bits of entry's data field
248 * to invalidate any subsequent references
249 */
250
227 entry->object = NULL; 251 entry->object = NULL;
252 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
253
254 /* append entry to free entry list */
255
228 if (tipc_ref_table.first_free == 0) 256 if (tipc_ref_table.first_free == 0)
229 tipc_ref_table.first_free = index; 257 tipc_ref_table.first_free = index;
230 else 258 else
231 /* next_plus_upper is always XXXX|0--0 for last free entry */
232 tipc_ref_table.entries[tipc_ref_table.last_free]. 259 tipc_ref_table.entries[tipc_ref_table.last_free].
233 data.next_plus_upper |= index; 260 data.next_plus_upper |= index;
234 tipc_ref_table.last_free = index; 261 tipc_ref_table.last_free = index;
235 262
236 /* increment upper bits of entry to invalidate subsequent references */
237 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
238exit: 263exit:
239 write_unlock_bh(&ref_table_lock); 264 write_unlock_bh(&ref_table_lock);
240} 265}
@@ -249,10 +274,13 @@ void *tipc_ref_lock(u32 ref)
249 struct reference *r; 274 struct reference *r;
250 275
251 r = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; 276 r = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
252 spin_lock_bh(&r->lock); 277
253 if (likely(r->data.reference == ref)) 278 if (likely(r->data.reference != 0)) {
254 return r->object; 279 spin_lock_bh(&r->lock);
255 spin_unlock_bh(&r->lock); 280 if (likely((r->data.reference == ref) && (r->object)))
281 return r->object;
282 spin_unlock_bh(&r->lock);
283 }
256 } 284 }
257 return NULL; 285 return NULL;
258} 286}
@@ -267,11 +295,12 @@ void tipc_ref_unlock(u32 ref)
267 struct reference *r; 295 struct reference *r;
268 296
269 r = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; 297 r = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
270 if (likely(r->data.reference == ref)) 298
299 if (likely((r->data.reference == ref) && (r->object)))
271 spin_unlock_bh(&r->lock); 300 spin_unlock_bh(&r->lock);
272 else 301 else
273 err("tipc_ref_unlock() invoked using " 302 err("tipc_ref_unlock() invoked using "
274 "obsolete reference\n"); 303 "invalid reference\n");
275 } 304 }
276} 305}
277 306