diff options
Diffstat (limited to 'net/tipc/ref.c')
-rw-r--r-- | net/tipc/ref.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 944093fe246f..5a13c2defe4a 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -61,15 +61,15 @@ | |||
61 | * because entry 0's reference field has the form XXXX|1--1. | 61 | * because entry 0's reference field has the form XXXX|1--1. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | struct ref_table ref_table = { 0 }; | 64 | struct ref_table tipc_ref_table = { 0 }; |
65 | 65 | ||
66 | rwlock_t reftbl_lock = RW_LOCK_UNLOCKED; | 66 | static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED; |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * ref_table_init - create reference table for objects | 69 | * tipc_ref_table_init - create reference table for objects |
70 | */ | 70 | */ |
71 | 71 | ||
72 | int ref_table_init(u32 requested_size, u32 start) | 72 | int tipc_ref_table_init(u32 requested_size, u32 start) |
73 | { | 73 | { |
74 | struct reference *table; | 74 | struct reference *table; |
75 | u32 sz = 1 << 4; | 75 | u32 sz = 1 << 4; |
@@ -83,43 +83,43 @@ int ref_table_init(u32 requested_size, u32 start) | |||
83 | if (table == NULL) | 83 | if (table == NULL) |
84 | return -ENOMEM; | 84 | return -ENOMEM; |
85 | 85 | ||
86 | write_lock_bh(&reftbl_lock); | 86 | write_lock_bh(&ref_table_lock); |
87 | index_mask = sz - 1; | 87 | index_mask = sz - 1; |
88 | for (i = sz - 1; i >= 0; i--) { | 88 | for (i = sz - 1; i >= 0; i--) { |
89 | table[i].object = 0; | 89 | table[i].object = 0; |
90 | table[i].lock = SPIN_LOCK_UNLOCKED; | 90 | table[i].lock = SPIN_LOCK_UNLOCKED; |
91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; | 91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; |
92 | } | 92 | } |
93 | ref_table.entries = table; | 93 | tipc_ref_table.entries = table; |
94 | ref_table.index_mask = index_mask; | 94 | tipc_ref_table.index_mask = index_mask; |
95 | ref_table.first_free = sz - 1; | 95 | tipc_ref_table.first_free = sz - 1; |
96 | ref_table.last_free = 1; | 96 | tipc_ref_table.last_free = 1; |
97 | write_unlock_bh(&reftbl_lock); | 97 | write_unlock_bh(&ref_table_lock); |
98 | return TIPC_OK; | 98 | return TIPC_OK; |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * ref_table_stop - destroy reference table for objects | 102 | * tipc_ref_table_stop - destroy reference table for objects |
103 | */ | 103 | */ |
104 | 104 | ||
105 | void ref_table_stop(void) | 105 | void tipc_ref_table_stop(void) |
106 | { | 106 | { |
107 | if (!ref_table.entries) | 107 | if (!tipc_ref_table.entries) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | vfree(ref_table.entries); | 110 | vfree(tipc_ref_table.entries); |
111 | ref_table.entries = 0; | 111 | tipc_ref_table.entries = 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | /** | 114 | /** |
115 | * ref_acquire - create reference to an object | 115 | * tipc_ref_acquire - create reference to an object |
116 | * | 116 | * |
117 | * Return a unique reference value which can be translated back to the pointer | 117 | * Return a unique reference value which can be translated back to the pointer |
118 | * 'object' at a later time. Also, pass back a pointer to the lock protecting | 118 | * 'object' at a later time. Also, pass back a pointer to the lock protecting |
119 | * the object, but without locking it. | 119 | * the object, but without locking it. |
120 | */ | 120 | */ |
121 | 121 | ||
122 | u32 ref_acquire(void *object, spinlock_t **lock) | 122 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) |
123 | { | 123 | { |
124 | struct reference *entry; | 124 | struct reference *entry; |
125 | u32 index; | 125 | u32 index; |
@@ -127,17 +127,17 @@ u32 ref_acquire(void *object, spinlock_t **lock) | |||
127 | u32 next_plus_upper; | 127 | u32 next_plus_upper; |
128 | u32 reference = 0; | 128 | u32 reference = 0; |
129 | 129 | ||
130 | assert(ref_table.entries && object); | 130 | assert(tipc_ref_table.entries && object); |
131 | 131 | ||
132 | write_lock_bh(&reftbl_lock); | 132 | write_lock_bh(&ref_table_lock); |
133 | if (ref_table.first_free) { | 133 | if (tipc_ref_table.first_free) { |
134 | index = ref_table.first_free; | 134 | index = tipc_ref_table.first_free; |
135 | entry = &(ref_table.entries[index]); | 135 | entry = &(tipc_ref_table.entries[index]); |
136 | index_mask = ref_table.index_mask; | 136 | index_mask = tipc_ref_table.index_mask; |
137 | /* take lock in case a previous user of entry still holds it */ | 137 | /* take lock in case a previous user of entry still holds it */ |
138 | spin_lock_bh(&entry->lock); | 138 | spin_lock_bh(&entry->lock); |
139 | next_plus_upper = entry->data.next_plus_upper; | 139 | next_plus_upper = entry->data.next_plus_upper; |
140 | ref_table.first_free = next_plus_upper & index_mask; | 140 | tipc_ref_table.first_free = next_plus_upper & index_mask; |
141 | reference = (next_plus_upper & ~index_mask) + index; | 141 | reference = (next_plus_upper & ~index_mask) + index; |
142 | entry->data.reference = reference; | 142 | entry->data.reference = reference; |
143 | entry->object = object; | 143 | entry->object = object; |
@@ -145,45 +145,45 @@ u32 ref_acquire(void *object, spinlock_t **lock) | |||
145 | *lock = &entry->lock; | 145 | *lock = &entry->lock; |
146 | spin_unlock_bh(&entry->lock); | 146 | spin_unlock_bh(&entry->lock); |
147 | } | 147 | } |
148 | write_unlock_bh(&reftbl_lock); | 148 | write_unlock_bh(&ref_table_lock); |
149 | return reference; | 149 | return reference; |
150 | } | 150 | } |
151 | 151 | ||
152 | /** | 152 | /** |
153 | * ref_discard - invalidate references to an object | 153 | * tipc_ref_discard - invalidate references to an object |
154 | * | 154 | * |
155 | * Disallow future references to an object and free up the entry for re-use. | 155 | * Disallow future references to an object and free up the entry for re-use. |
156 | * Note: The entry's spin_lock may still be busy after discard | 156 | * Note: The entry's spin_lock may still be busy after discard |
157 | */ | 157 | */ |
158 | 158 | ||
159 | void ref_discard(u32 ref) | 159 | void tipc_ref_discard(u32 ref) |
160 | { | 160 | { |
161 | struct reference *entry; | 161 | struct reference *entry; |
162 | u32 index; | 162 | u32 index; |
163 | u32 index_mask; | 163 | u32 index_mask; |
164 | 164 | ||
165 | assert(ref_table.entries); | 165 | assert(tipc_ref_table.entries); |
166 | assert(ref != 0); | 166 | assert(ref != 0); |
167 | 167 | ||
168 | write_lock_bh(&reftbl_lock); | 168 | write_lock_bh(&ref_table_lock); |
169 | index_mask = ref_table.index_mask; | 169 | index_mask = tipc_ref_table.index_mask; |
170 | index = ref & index_mask; | 170 | index = ref & index_mask; |
171 | entry = &(ref_table.entries[index]); | 171 | entry = &(tipc_ref_table.entries[index]); |
172 | assert(entry->object != 0); | 172 | assert(entry->object != 0); |
173 | assert(entry->data.reference == ref); | 173 | assert(entry->data.reference == ref); |
174 | 174 | ||
175 | /* mark entry as unused */ | 175 | /* mark entry as unused */ |
176 | entry->object = 0; | 176 | entry->object = 0; |
177 | if (ref_table.first_free == 0) | 177 | if (tipc_ref_table.first_free == 0) |
178 | ref_table.first_free = index; | 178 | tipc_ref_table.first_free = index; |
179 | else | 179 | else |
180 | /* next_plus_upper is always XXXX|0--0 for last free entry */ | 180 | /* next_plus_upper is always XXXX|0--0 for last free entry */ |
181 | ref_table.entries[ref_table.last_free].data.next_plus_upper | 181 | tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper |
182 | |= index; | 182 | |= index; |
183 | ref_table.last_free = index; | 183 | tipc_ref_table.last_free = index; |
184 | 184 | ||
185 | /* increment upper bits of entry to invalidate subsequent references */ | 185 | /* increment upper bits of entry to invalidate subsequent references */ |
186 | entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); | 186 | entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); |
187 | write_unlock_bh(&reftbl_lock); | 187 | write_unlock_bh(&ref_table_lock); |
188 | } | 188 | } |
189 | 189 | ||