aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/tipc/port.h20
-rw-r--r--net/tipc/ref.c158
-rw-r--r--net/tipc/ref.h15
-rw-r--r--net/tipc/socket.c64
4 files changed, 108 insertions, 149 deletions
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 33e52fe50e10..38bf8cb3df1a 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -37,7 +37,6 @@
37#ifndef _TIPC_PORT_H 37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H 38#define _TIPC_PORT_H
39 39
40#include "ref.h"
41#include "net.h" 40#include "net.h"
42#include "msg.h" 41#include "msg.h"
43#include "node_subscr.h" 42#include "node_subscr.h"
@@ -65,7 +64,6 @@
65 * @timer_ref: 64 * @timer_ref:
66 */ 65 */
67struct tipc_port { 66struct tipc_port {
68 spinlock_t *lock;
69 int connected; 67 int connected;
70 u32 conn_type; 68 u32 conn_type;
71 u32 conn_instance; 69 u32 conn_instance;
@@ -98,24 +96,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
98 96
99void tipc_port_reinit(void); 97void tipc_port_reinit(void);
100 98
101/**
102 * tipc_port_lock - lock port instance referred to and return its pointer
103 */
104static inline struct tipc_sock *tipc_port_lock(u32 ref)
105{
106 return (struct tipc_sock *)tipc_ref_lock(ref);
107}
108
109/**
110 * tipc_port_unlock - unlock a port instance
111 *
112 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
113 */
114static inline void tipc_port_unlock(struct tipc_port *p_ptr)
115{
116 spin_unlock_bh(p_ptr->lock);
117}
118
119static inline u32 tipc_port_peernode(struct tipc_port *p_ptr) 99static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
120{ 100{
121 return msg_destnode(&p_ptr->phdr); 101 return msg_destnode(&p_ptr->phdr);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 7fc2740846e3..ea981bed967b 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/ref.c: TIPC object registry code 2 * net/tipc/ref.c: TIPC socket registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, 2014, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -38,24 +38,22 @@
38#include "ref.h" 38#include "ref.h"
39 39
40/** 40/**
41 * struct reference - TIPC object reference entry 41 * struct reference - TIPC socket reference entry
42 * @object: pointer to object associated with reference entry 42 * @tsk: pointer to socket associated with reference entry
43 * @lock: spinlock controlling access to object 43 * @ref: reference value for socket (combines instance & array index info)
44 * @ref: reference value for object (combines instance & array index info)
45 */ 44 */
46struct reference { 45struct reference {
47 void *object; 46 struct tipc_sock *tsk;
48 spinlock_t lock;
49 u32 ref; 47 u32 ref;
50}; 48};
51 49
52/** 50/**
53 * struct tipc_ref_table - table of TIPC object reference entries 51 * struct tipc_ref_table - table of TIPC socket reference entries
54 * @entries: pointer to array of reference entries 52 * @entries: pointer to array of reference entries
55 * @capacity: array index of first unusable entry 53 * @capacity: array index of first unusable entry
56 * @init_point: array index of first uninitialized entry 54 * @init_point: array index of first uninitialized entry
57 * @first_free: array index of first unused object reference entry 55 * @first_free: array index of first unused socket reference entry
58 * @last_free: array index of last unused object reference entry 56 * @last_free: array index of last unused socket reference entry
59 * @index_mask: bitmask for array index portion of reference values 57 * @index_mask: bitmask for array index portion of reference values
60 * @start_mask: initial value for instance value portion of reference values 58 * @start_mask: initial value for instance value portion of reference values
61 */ 59 */
@@ -70,9 +68,9 @@ struct ref_table {
70}; 68};
71 69
72/* 70/*
73 * Object reference table consists of 2**N entries. 71 * Socket reference table consists of 2**N entries.
74 * 72 *
75 * State Object ptr Reference 73 * State Socket ptr Reference
76 * ----- ---------- --------- 74 * ----- ---------- ---------
77 * In use non-NULL XXXX|own index 75 * In use non-NULL XXXX|own index
78 * (XXXX changes each time entry is acquired) 76 * (XXXX changes each time entry is acquired)
@@ -89,10 +87,10 @@ struct ref_table {
89 87
90static struct ref_table tipc_ref_table; 88static struct ref_table tipc_ref_table;
91 89
92static DEFINE_SPINLOCK(ref_table_lock); 90static DEFINE_RWLOCK(ref_table_lock);
93 91
94/** 92/**
95 * tipc_ref_table_init - create reference table for objects 93 * tipc_ref_table_init - create reference table for sockets
96 */ 94 */
97int tipc_ref_table_init(u32 requested_size, u32 start) 95int tipc_ref_table_init(u32 requested_size, u32 start)
98{ 96{
@@ -122,84 +120,69 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
122} 120}
123 121
124/** 122/**
125 * tipc_ref_table_stop - destroy reference table for objects 123 * tipc_ref_table_stop - destroy reference table for sockets
126 */ 124 */
127void tipc_ref_table_stop(void) 125void tipc_ref_table_stop(void)
128{ 126{
127 if (!tipc_ref_table.entries)
128 return;
129 vfree(tipc_ref_table.entries); 129 vfree(tipc_ref_table.entries);
130 tipc_ref_table.entries = NULL; 130 tipc_ref_table.entries = NULL;
131} 131}
132 132
133/** 133/* tipc_ref_acquire - create reference to a socket
134 * tipc_ref_acquire - create reference to an object
135 * 134 *
136 * Register an object pointer in reference table and lock the object. 135 * Register an socket pointer in the reference table.
137 * Returns a unique reference value that is used from then on to retrieve the 136 * Returns a unique reference value that is used from then on to retrieve the
138 * object pointer, or to determine that the object has been deregistered. 137 * socket pointer, or to determine if the socket has been deregistered.
139 *
140 * Note: The object is returned in the locked state so that the caller can
141 * register a partially initialized object, without running the risk that
142 * the object will be accessed before initialization is complete.
143 */ 138 */
144u32 tipc_ref_acquire(void *object, spinlock_t **lock) 139u32 tipc_ref_acquire(struct tipc_sock *tsk)
145{ 140{
146 u32 index; 141 u32 index;
147 u32 index_mask; 142 u32 index_mask;
148 u32 next_plus_upper; 143 u32 next_plus_upper;
149 u32 ref; 144 u32 ref = 0;
150 struct reference *entry = NULL; 145 struct reference *entry;
151 146
152 if (!object) { 147 if (unlikely(!tsk)) {
153 pr_err("Attempt to acquire ref. to non-existent obj\n"); 148 pr_err("Attempt to acquire ref. to non-existent obj\n");
154 return 0; 149 return 0;
155 } 150 }
156 if (!tipc_ref_table.entries) { 151 if (unlikely(!tipc_ref_table.entries)) {
157 pr_err("Ref. table not found in acquisition attempt\n"); 152 pr_err("Ref. table not found in acquisition attempt\n");
158 return 0; 153 return 0;
159 } 154 }
160 155
161 /* take a free entry, if available; otherwise initialize a new entry */ 156 /* Take a free entry, if available; otherwise initialize a new one */
162 spin_lock_bh(&ref_table_lock); 157 write_lock_bh(&ref_table_lock);
163 if (tipc_ref_table.first_free) { 158 index = tipc_ref_table.first_free;
159 entry = &tipc_ref_table.entries[index];
160
161 if (likely(index)) {
164 index = tipc_ref_table.first_free; 162 index = tipc_ref_table.first_free;
165 entry = &(tipc_ref_table.entries[index]); 163 entry = &(tipc_ref_table.entries[index]);
166 index_mask = tipc_ref_table.index_mask; 164 index_mask = tipc_ref_table.index_mask;
167 next_plus_upper = entry->ref; 165 next_plus_upper = entry->ref;
168 tipc_ref_table.first_free = next_plus_upper & index_mask; 166 tipc_ref_table.first_free = next_plus_upper & index_mask;
169 ref = (next_plus_upper & ~index_mask) + index; 167 ref = (next_plus_upper & ~index_mask) + index;
168 entry->tsk = tsk;
170 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 169 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
171 index = tipc_ref_table.init_point++; 170 index = tipc_ref_table.init_point++;
172 entry = &(tipc_ref_table.entries[index]); 171 entry = &(tipc_ref_table.entries[index]);
173 spin_lock_init(&entry->lock);
174 ref = tipc_ref_table.start_mask + index; 172 ref = tipc_ref_table.start_mask + index;
175 } else {
176 ref = 0;
177 } 173 }
178 spin_unlock_bh(&ref_table_lock);
179 174
180 /* 175 if (ref) {
181 * Grab the lock so no one else can modify this entry
182 * While we assign its ref value & object pointer
183 */
184 if (entry) {
185 spin_lock_bh(&entry->lock);
186 entry->ref = ref; 176 entry->ref = ref;
187 entry->object = object; 177 entry->tsk = tsk;
188 *lock = &entry->lock;
189 /*
190 * keep it locked, the caller is responsible
191 * for unlocking this when they're done with it
192 */
193 } 178 }
194 179 write_unlock_bh(&ref_table_lock);
195 return ref; 180 return ref;
196} 181}
197 182
198/** 183/* tipc_ref_discard - invalidate reference to an socket
199 * tipc_ref_discard - invalidate references to an object
200 * 184 *
201 * Disallow future references to an object and free up the entry for re-use. 185 * Disallow future references to an socket and free up the entry for re-use.
202 * Note: The entry's spin_lock may still be busy after discard
203 */ 186 */
204void tipc_ref_discard(u32 ref) 187void tipc_ref_discard(u32 ref)
205{ 188{
@@ -207,7 +190,7 @@ void tipc_ref_discard(u32 ref)
207 u32 index; 190 u32 index;
208 u32 index_mask; 191 u32 index_mask;
209 192
210 if (!tipc_ref_table.entries) { 193 if (unlikely(!tipc_ref_table.entries)) {
211 pr_err("Ref. table not found during discard attempt\n"); 194 pr_err("Ref. table not found during discard attempt\n");
212 return; 195 return;
213 } 196 }
@@ -216,71 +199,72 @@ void tipc_ref_discard(u32 ref)
216 index = ref & index_mask; 199 index = ref & index_mask;
217 entry = &(tipc_ref_table.entries[index]); 200 entry = &(tipc_ref_table.entries[index]);
218 201
219 spin_lock_bh(&ref_table_lock); 202 write_lock_bh(&ref_table_lock);
220 203
221 if (!entry->object) { 204 if (unlikely(!entry->tsk)) {
222 pr_err("Attempt to discard ref. to non-existent obj\n"); 205 pr_err("Attempt to discard ref. to non-existent socket\n");
223 goto exit; 206 goto exit;
224 } 207 }
225 if (entry->ref != ref) { 208 if (unlikely(entry->ref != ref)) {
226 pr_err("Attempt to discard non-existent reference\n"); 209 pr_err("Attempt to discard non-existent reference\n");
227 goto exit; 210 goto exit;
228 } 211 }
229 212
230 /* 213 /*
231 * mark entry as unused; increment instance part of entry's reference 214 * Mark entry as unused; increment instance part of entry's reference
232 * to invalidate any subsequent references 215 * to invalidate any subsequent references
233 */ 216 */
234 entry->object = NULL; 217 entry->tsk = NULL;
235 entry->ref = (ref & ~index_mask) + (index_mask + 1); 218 entry->ref = (ref & ~index_mask) + (index_mask + 1);
236 219
237 /* append entry to free entry list */ 220 /* Append entry to free entry list */
238 if (tipc_ref_table.first_free == 0) 221 if (unlikely(tipc_ref_table.first_free == 0))
239 tipc_ref_table.first_free = index; 222 tipc_ref_table.first_free = index;
240 else 223 else
241 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; 224 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
242 tipc_ref_table.last_free = index; 225 tipc_ref_table.last_free = index;
243
244exit: 226exit:
245 spin_unlock_bh(&ref_table_lock); 227 write_unlock_bh(&ref_table_lock);
246} 228}
247 229
248/** 230/* tipc_sk_get - find referenced socket and return pointer to it
249 * tipc_ref_lock - lock referenced object and return pointer to it
250 */ 231 */
251void *tipc_ref_lock(u32 ref) 232struct tipc_sock *tipc_sk_get(u32 ref)
252{ 233{
253 if (likely(tipc_ref_table.entries)) { 234 struct reference *entry;
254 struct reference *entry; 235 struct tipc_sock *tsk;
255 236
256 entry = &tipc_ref_table.entries[ref & 237 if (unlikely(!tipc_ref_table.entries))
257 tipc_ref_table.index_mask]; 238 return NULL;
258 if (likely(entry->ref != 0)) { 239 read_lock_bh(&ref_table_lock);
259 spin_lock_bh(&entry->lock); 240 entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
260 if (likely((entry->ref == ref) && (entry->object))) 241 tsk = entry->tsk;
261 return entry->object; 242 if (likely(tsk && (entry->ref == ref)))
262 spin_unlock_bh(&entry->lock); 243 sock_hold(&tsk->sk);
263 } 244 else
264 } 245 tsk = NULL;
265 return NULL; 246 read_unlock_bh(&ref_table_lock);
247 return tsk;
266} 248}
267 249
268/* tipc_ref_lock_next - lock & return next object after referenced one 250/* tipc_sk_get_next - lock & return next socket after referenced one
269*/ 251*/
270void *tipc_ref_lock_next(u32 *ref) 252struct tipc_sock *tipc_sk_get_next(u32 *ref)
271{ 253{
272 struct reference *entry; 254 struct reference *entry;
255 struct tipc_sock *tsk = NULL;
273 uint index = *ref & tipc_ref_table.index_mask; 256 uint index = *ref & tipc_ref_table.index_mask;
274 257
258 read_lock_bh(&ref_table_lock);
275 while (++index < tipc_ref_table.capacity) { 259 while (++index < tipc_ref_table.capacity) {
276 entry = &tipc_ref_table.entries[index]; 260 entry = &tipc_ref_table.entries[index];
277 if (!entry->object) 261 if (!entry->tsk)
278 continue; 262 continue;
279 spin_lock_bh(&entry->lock); 263 tsk = entry->tsk;
264 sock_hold(&tsk->sk);
280 *ref = entry->ref; 265 *ref = entry->ref;
281 if (entry->object) 266 break;
282 return entry->object;
283 spin_unlock_bh(&entry->lock);
284 } 267 }
285 return NULL; 268 read_unlock_bh(&ref_table_lock);
269 return tsk;
286} 270}
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index e236fa520a1d..2b75a892305a 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/ref.h: Include file for TIPC object registry code 2 * net/tipc/ref.h: Include file for TIPC object registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, 2014, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,13 +37,20 @@
37#ifndef _TIPC_REF_H 37#ifndef _TIPC_REF_H
38#define _TIPC_REF_H 38#define _TIPC_REF_H
39 39
40#include "socket.h"
41
40int tipc_ref_table_init(u32 requested_size, u32 start); 42int tipc_ref_table_init(u32 requested_size, u32 start);
41void tipc_ref_table_stop(void); 43void tipc_ref_table_stop(void);
42 44
43u32 tipc_ref_acquire(void *object, spinlock_t **lock); 45u32 tipc_ref_acquire(struct tipc_sock *tsk);
44void tipc_ref_discard(u32 ref); 46void tipc_ref_discard(u32 ref);
45 47
46void *tipc_ref_lock(u32 ref); 48struct tipc_sock *tipc_sk_get(u32 ref);
47void *tipc_ref_lock_next(u32 *ref); 49struct tipc_sock *tipc_sk_get_next(u32 *ref);
50
51static inline void tipc_sk_put(struct tipc_sock *tsk)
52{
53 sock_put(&tsk->sk);
54}
48 55
49#endif 56#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 247f245ff596..7e6240e41e69 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -35,6 +35,7 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "ref.h"
38#include "port.h" 39#include "port.h"
39#include "name_table.h" 40#include "name_table.h"
40#include "node.h" 41#include "node.h"
@@ -111,13 +112,6 @@ static struct proto tipc_proto_kern;
111 112
112#include "socket.h" 113#include "socket.h"
113 114
114/* tipc_sk_lock_next: find & lock next socket in registry from given port number
115*/
116static struct tipc_sock *tipc_sk_lock_next(u32 *ref)
117{
118 return (struct tipc_sock *)tipc_ref_lock_next(ref);
119}
120
121/** 115/**
122 * advance_rx_queue - discard first buffer in socket receive queue 116 * advance_rx_queue - discard first buffer in socket receive queue
123 * 117 *
@@ -200,7 +194,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
200 194
201 tsk = tipc_sk(sk); 195 tsk = tipc_sk(sk);
202 port = &tsk->port; 196 port = &tsk->port;
203 ref = tipc_ref_acquire(tsk, &port->lock); 197 ref = tipc_ref_acquire(tsk);
204 if (!ref) { 198 if (!ref) {
205 pr_warn("Socket create failed; reference table exhausted\n"); 199 pr_warn("Socket create failed; reference table exhausted\n");
206 return -ENOMEM; 200 return -ENOMEM;
@@ -226,7 +220,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
226 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 220 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
227 tsk->sent_unacked = 0; 221 tsk->sent_unacked = 0;
228 atomic_set(&tsk->dupl_rcvcnt, 0); 222 atomic_set(&tsk->dupl_rcvcnt, 0);
229 tipc_port_unlock(port);
230 223
231 if (sock->state == SS_READY) { 224 if (sock->state == SS_READY) {
232 tipc_port_set_unreturnable(port, true); 225 tipc_port_set_unreturnable(port, true);
@@ -364,9 +357,7 @@ static int tipc_release(struct socket *sock)
364 } 357 }
365 358
366 tipc_withdraw(port, 0, NULL); 359 tipc_withdraw(port, 0, NULL);
367 spin_lock_bh(port->lock);
368 tipc_ref_discard(port->ref); 360 tipc_ref_discard(port->ref);
369 spin_unlock_bh(port->lock);
370 k_cancel_timer(&port->timer); 361 k_cancel_timer(&port->timer);
371 if (port->connected) { 362 if (port->connected) {
372 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 363 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1651,7 +1642,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
1651 u32 dnode; 1642 u32 dnode;
1652 1643
1653 /* Validate destination and message */ 1644 /* Validate destination and message */
1654 tsk = tipc_port_lock(dport); 1645 tsk = tipc_sk_get(dport);
1655 if (unlikely(!tsk)) { 1646 if (unlikely(!tsk)) {
1656 rc = tipc_msg_eval(buf, &dnode); 1647 rc = tipc_msg_eval(buf, &dnode);
1657 goto exit; 1648 goto exit;
@@ -1672,8 +1663,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
1672 rc = -TIPC_ERR_OVERLOAD; 1663 rc = -TIPC_ERR_OVERLOAD;
1673 } 1664 }
1674 bh_unlock_sock(sk); 1665 bh_unlock_sock(sk);
1675 tipc_port_unlock(port); 1666 tipc_sk_put(tsk);
1676
1677 if (likely(!rc)) 1667 if (likely(!rc))
1678 return 0; 1668 return 0;
1679exit: 1669exit:
@@ -1997,23 +1987,23 @@ restart:
1997 1987
1998static void tipc_sk_timeout(unsigned long ref) 1988static void tipc_sk_timeout(unsigned long ref)
1999{ 1989{
2000 struct tipc_sock *tsk = tipc_port_lock(ref); 1990 struct tipc_sock *tsk;
2001 struct tipc_port *port; 1991 struct tipc_port *port;
2002 struct sock *sk; 1992 struct sock *sk;
2003 struct sk_buff *buf = NULL; 1993 struct sk_buff *buf = NULL;
2004 struct tipc_msg *msg = NULL;
2005 u32 peer_port, peer_node; 1994 u32 peer_port, peer_node;
2006 1995
1996 tsk = tipc_sk_get(ref);
2007 if (!tsk) 1997 if (!tsk)
2008 return; 1998 goto exit;
2009 1999 sk = &tsk->sk;
2010 port = &tsk->port; 2000 port = &tsk->port;
2001
2002 bh_lock_sock(sk);
2011 if (!port->connected) { 2003 if (!port->connected) {
2012 tipc_port_unlock(port); 2004 bh_unlock_sock(sk);
2013 return; 2005 goto exit;
2014 } 2006 }
2015 sk = &tsk->sk;
2016 bh_lock_sock(sk);
2017 peer_port = tipc_port_peerport(port); 2007 peer_port = tipc_port_peerport(port);
2018 peer_node = tipc_port_peernode(port); 2008 peer_node = tipc_port_peernode(port);
2019 2009
@@ -2031,12 +2021,10 @@ static void tipc_sk_timeout(unsigned long ref)
2031 k_start_timer(&port->timer, port->probing_interval); 2021 k_start_timer(&port->timer, port->probing_interval);
2032 } 2022 }
2033 bh_unlock_sock(sk); 2023 bh_unlock_sock(sk);
2034 tipc_port_unlock(port); 2024 if (buf)
2035 if (!buf) 2025 tipc_link_xmit(buf, peer_node, ref);
2036 return; 2026exit:
2037 2027 tipc_sk_put(tsk);
2038 msg = buf_msg(buf);
2039 tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
2040} 2028}
2041 2029
2042static int tipc_sk_show(struct tipc_port *port, char *buf, 2030static int tipc_sk_show(struct tipc_port *port, char *buf,
@@ -2100,13 +2088,13 @@ struct sk_buff *tipc_sk_socks_show(void)
2100 pb = TLV_DATA(rep_tlv); 2088 pb = TLV_DATA(rep_tlv);
2101 pb_len = ULTRA_STRING_MAX_LEN; 2089 pb_len = ULTRA_STRING_MAX_LEN;
2102 2090
2103 tsk = tipc_sk_lock_next(&ref); 2091 tsk = tipc_sk_get_next(&ref);
2104 for (; tsk; tsk = tipc_sk_lock_next(&ref)) { 2092 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2105 bh_lock_sock(&tsk->sk); 2093 lock_sock(&tsk->sk);
2106 str_len += tipc_sk_show(&tsk->port, pb + str_len, 2094 str_len += tipc_sk_show(&tsk->port, pb + str_len,
2107 pb_len - str_len, 0); 2095 pb_len - str_len, 0);
2108 bh_unlock_sock(&tsk->sk); 2096 release_sock(&tsk->sk);
2109 tipc_port_unlock(&tsk->port); 2097 tipc_sk_put(tsk);
2110 } 2098 }
2111 str_len += 1; /* for "\0" */ 2099 str_len += 1; /* for "\0" */
2112 skb_put(buf, TLV_SPACE(str_len)); 2100 skb_put(buf, TLV_SPACE(str_len));
@@ -2122,15 +2110,15 @@ void tipc_sk_reinit(void)
2122{ 2110{
2123 struct tipc_msg *msg; 2111 struct tipc_msg *msg;
2124 u32 ref = 0; 2112 u32 ref = 0;
2125 struct tipc_sock *tsk = tipc_sk_lock_next(&ref); 2113 struct tipc_sock *tsk = tipc_sk_get_next(&ref);
2126 2114
2127 for (; tsk; tsk = tipc_sk_lock_next(&ref)) { 2115 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2128 bh_lock_sock(&tsk->sk); 2116 lock_sock(&tsk->sk);
2129 msg = &tsk->port.phdr; 2117 msg = &tsk->port.phdr;
2130 msg_set_prevnode(msg, tipc_own_addr); 2118 msg_set_prevnode(msg, tipc_own_addr);
2131 msg_set_orignode(msg, tipc_own_addr); 2119 msg_set_orignode(msg, tipc_own_addr);
2132 bh_unlock_sock(&tsk->sk); 2120 release_sock(&tsk->sk);
2133 tipc_port_unlock(&tsk->port); 2121 tipc_sk_put(tsk);
2134 } 2122 }
2135} 2123}
2136 2124