diff options
Diffstat (limited to 'net/tipc/net.c')
-rw-r--r-- | net/tipc/net.c | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/net/tipc/net.c b/net/tipc/net.c index a991bf8a7f74..c39c76201e8e 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/net.c: TIPC network routing code | 2 | * net/tipc/net.c: TIPC network routing code |
3 | * | 3 | * |
4 | * Copyright (c) 1995-2006, Ericsson AB | 4 | * Copyright (c) 1995-2006, Ericsson AB |
5 | * Copyright (c) 2005, Wind River Systems | 5 | * Copyright (c) 2005, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
@@ -49,63 +49,63 @@ | |||
49 | #include "discover.h" | 49 | #include "discover.h" |
50 | #include "config.h" | 50 | #include "config.h" |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * The TIPC locking policy is designed to ensure a very fine locking | 53 | * The TIPC locking policy is designed to ensure a very fine locking |
54 | * granularity, permitting complete parallel access to individual | 54 | * granularity, permitting complete parallel access to individual |
55 | * port and node/link instances. The code consists of three major | 55 | * port and node/link instances. The code consists of three major |
56 | * locking domains, each protected with their own disjunct set of locks. | 56 | * locking domains, each protected with their own disjunct set of locks. |
57 | * | 57 | * |
58 | * 1: The routing hierarchy. | 58 | * 1: The routing hierarchy. |
59 | * Comprises the structures 'zone', 'cluster', 'node', 'link' | 59 | * Comprises the structures 'zone', 'cluster', 'node', 'link' |
60 | * and 'bearer'. The whole hierarchy is protected by a big | 60 | * and 'bearer'. The whole hierarchy is protected by a big |
61 | * read/write lock, tipc_net_lock, to enssure that nothing is added | 61 | * read/write lock, tipc_net_lock, to enssure that nothing is added |
62 | * or removed while code is accessing any of these structures. | 62 | * or removed while code is accessing any of these structures. |
63 | * This layer must not be called from the two others while they | 63 | * This layer must not be called from the two others while they |
64 | * hold any of their own locks. | 64 | * hold any of their own locks. |
65 | * Neither must it itself do any upcalls to the other two before | 65 | * Neither must it itself do any upcalls to the other two before |
66 | * it has released tipc_net_lock and other protective locks. | 66 | * it has released tipc_net_lock and other protective locks. |
67 | * | 67 | * |
68 | * Within the tipc_net_lock domain there are two sub-domains;'node' and | 68 | * Within the tipc_net_lock domain there are two sub-domains;'node' and |
69 | * 'bearer', where local write operations are permitted, | 69 | * 'bearer', where local write operations are permitted, |
70 | * provided that those are protected by individual spin_locks | 70 | * provided that those are protected by individual spin_locks |
71 | * per instance. Code holding tipc_net_lock(read) and a node spin_lock | 71 | * per instance. Code holding tipc_net_lock(read) and a node spin_lock |
72 | * is permitted to poke around in both the node itself and its | 72 | * is permitted to poke around in both the node itself and its |
73 | * subordinate links. I.e, it can update link counters and queues, | 73 | * subordinate links. I.e, it can update link counters and queues, |
74 | * change link state, send protocol messages, and alter the | 74 | * change link state, send protocol messages, and alter the |
75 | * "active_links" array in the node; but it can _not_ remove a link | 75 | * "active_links" array in the node; but it can _not_ remove a link |
76 | * or a node from the overall structure. | 76 | * or a node from the overall structure. |
77 | * Correspondingly, individual bearers may change status within a | 77 | * Correspondingly, individual bearers may change status within a |
78 | * tipc_net_lock(read), protected by an individual spin_lock ber bearer | 78 | * tipc_net_lock(read), protected by an individual spin_lock ber bearer |
79 | * instance, but it needs tipc_net_lock(write) to remove/add any bearers. | 79 | * instance, but it needs tipc_net_lock(write) to remove/add any bearers. |
80 | * | ||
81 | * | 80 | * |
82 | * 2: The transport level of the protocol. | 81 | * |
83 | * This consists of the structures port, (and its user level | 82 | * 2: The transport level of the protocol. |
84 | * representations, such as user_port and tipc_sock), reference and | 83 | * This consists of the structures port, (and its user level |
85 | * tipc_user (port.c, reg.c, socket.c). | 84 | * representations, such as user_port and tipc_sock), reference and |
85 | * tipc_user (port.c, reg.c, socket.c). | ||
86 | * | 86 | * |
87 | * This layer has four different locks: | 87 | * This layer has four different locks: |
88 | * - The tipc_port spin_lock. This is protecting each port instance | 88 | * - The tipc_port spin_lock. This is protecting each port instance |
89 | * from parallel data access and removal. Since we can not place | 89 | * from parallel data access and removal. Since we can not place |
90 | * this lock in the port itself, it has been placed in the | 90 | * this lock in the port itself, it has been placed in the |
91 | * corresponding reference table entry, which has the same life | 91 | * corresponding reference table entry, which has the same life |
92 | * cycle as the module. This entry is difficult to access from | 92 | * cycle as the module. This entry is difficult to access from |
93 | * outside the TIPC core, however, so a pointer to the lock has | 93 | * outside the TIPC core, however, so a pointer to the lock has |
94 | * been added in the port instance, -to be used for unlocking | 94 | * been added in the port instance, -to be used for unlocking |
95 | * only. | 95 | * only. |
96 | * - A read/write lock to protect the reference table itself (teg.c). | 96 | * - A read/write lock to protect the reference table itself (teg.c). |
97 | * (Nobody is using read-only access to this, so it can just as | 97 | * (Nobody is using read-only access to this, so it can just as |
98 | * well be changed to a spin_lock) | 98 | * well be changed to a spin_lock) |
99 | * - A spin lock to protect the registry of kernel/driver users (reg.c) | 99 | * - A spin lock to protect the registry of kernel/driver users (reg.c) |
100 | * - A global spin_lock (tipc_port_lock), which only task is to ensure | 100 | * - A global spin_lock (tipc_port_lock), which only task is to ensure |
101 | * consistency where more than one port is involved in an operation, | 101 | * consistency where more than one port is involved in an operation, |
102 | * i.e., whe a port is part of a linked list of ports. | 102 | * i.e., whe a port is part of a linked list of ports. |
103 | * There are two such lists; 'port_list', which is used for management, | 103 | * There are two such lists; 'port_list', which is used for management, |
104 | * and 'wait_list', which is used to queue ports during congestion. | 104 | * and 'wait_list', which is used to queue ports during congestion. |
105 | * | 105 | * |
106 | * 3: The name table (name_table.c, name_distr.c, subscription.c) | 106 | * 3: The name table (name_table.c, name_distr.c, subscription.c) |
107 | * - There is one big read/write-lock (tipc_nametbl_lock) protecting the | 107 | * - There is one big read/write-lock (tipc_nametbl_lock) protecting the |
108 | * overall name table structure. Nothing must be added/removed to | 108 | * overall name table structure. Nothing must be added/removed to |
109 | * this structure without holding write access to it. | 109 | * this structure without holding write access to it. |
110 | * - There is one local spin_lock per sub_sequence, which can be seen | 110 | * - There is one local spin_lock per sub_sequence, which can be seen |
111 | * as a sub-domain to the tipc_nametbl_lock domain. It is used only | 111 | * as a sub-domain to the tipc_nametbl_lock domain. It is used only |
@@ -118,7 +118,7 @@ | |||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct network tipc_net = { NULL }; | 119 | struct network tipc_net = { NULL }; |
120 | 120 | ||
121 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref) | 121 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref) |
122 | { | 122 | { |
123 | return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); | 123 | return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); |
124 | } | 124 | } |
@@ -224,7 +224,7 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
224 | buf_discard(buf); | 224 | buf_discard(buf); |
225 | } else { | 225 | } else { |
226 | msg_dbg(msg, "NET>REJ>:"); | 226 | msg_dbg(msg, "NET>REJ>:"); |
227 | tipc_reject_msg(buf, msg_destport(msg) ? | 227 | tipc_reject_msg(buf, msg_destport(msg) ? |
228 | TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME); | 228 | TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME); |
229 | } | 229 | } |
230 | return; | 230 | return; |
@@ -236,7 +236,7 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
236 | dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); | 236 | dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); |
237 | if (in_scope(dnode, tipc_own_addr)) { | 237 | if (in_scope(dnode, tipc_own_addr)) { |
238 | if (msg_isdata(msg)) { | 238 | if (msg_isdata(msg)) { |
239 | if (msg_mcast(msg)) | 239 | if (msg_mcast(msg)) |
240 | tipc_port_recv_mcast(buf, NULL); | 240 | tipc_port_recv_mcast(buf, NULL); |
241 | else if (msg_destport(msg)) | 241 | else if (msg_destport(msg)) |
242 | tipc_port_recv_msg(buf); | 242 | tipc_port_recv_msg(buf); |
@@ -284,7 +284,7 @@ int tipc_net_start(void) | |||
284 | (res = tipc_bclink_init())) { | 284 | (res = tipc_bclink_init())) { |
285 | return res; | 285 | return res; |
286 | } | 286 | } |
287 | tipc_subscr_stop(); | 287 | tipc_subscr_stop(); |
288 | tipc_cfg_stop(); | 288 | tipc_cfg_stop(); |
289 | tipc_k_signal((Handler)tipc_subscr_start, 0); | 289 | tipc_k_signal((Handler)tipc_subscr_start, 0); |
290 | tipc_k_signal((Handler)tipc_cfg_init, 0); | 290 | tipc_k_signal((Handler)tipc_cfg_init, 0); |
@@ -298,12 +298,12 @@ void tipc_net_stop(void) | |||
298 | { | 298 | { |
299 | if (tipc_mode != TIPC_NET_MODE) | 299 | if (tipc_mode != TIPC_NET_MODE) |
300 | return; | 300 | return; |
301 | write_lock_bh(&tipc_net_lock); | 301 | write_lock_bh(&tipc_net_lock); |
302 | tipc_bearer_stop(); | 302 | tipc_bearer_stop(); |
303 | tipc_mode = TIPC_NODE_MODE; | 303 | tipc_mode = TIPC_NODE_MODE; |
304 | tipc_bclink_stop(); | 304 | tipc_bclink_stop(); |
305 | net_stop(); | 305 | net_stop(); |
306 | write_unlock_bh(&tipc_net_lock); | 306 | write_unlock_bh(&tipc_net_lock); |
307 | info("Left network mode \n"); | 307 | info("Left network mode \n"); |
308 | } | 308 | } |
309 | 309 | ||