diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-11-11 03:16:20 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-11-11 03:16:20 -0500 |
| commit | 45b86a96f17cb2900f291129b0e67287400e45b2 (patch) | |
| tree | f7968bf36e3947ee42251f7eebc6ea5f24aca202 /net/unix/garbage.c | |
| parent | 072ba49838b42c873c496d72c91bb237914cf3b6 (diff) | |
| parent | 4143c5cb36331155a1823af8b3a8c761a59fed71 (diff) | |
Merge branch 'devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
Diffstat (limited to 'net/unix/garbage.c')
| -rw-r--r-- | net/unix/garbage.c | 49 |
1 files changed, 37 insertions, 12 deletions
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 2a27b84f740b..6d4a9a8de5ef 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
| @@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
| 186 | */ | 186 | */ |
| 187 | struct sock *sk = unix_get_socket(*fp++); | 187 | struct sock *sk = unix_get_socket(*fp++); |
| 188 | if (sk) { | 188 | if (sk) { |
| 189 | hit = true; | 189 | struct unix_sock *u = unix_sk(sk); |
| 190 | func(unix_sk(sk)); | 190 | |
| 191 | /* | ||
| 192 | * Ignore non-candidates, they could | ||
| 193 | * have been added to the queues after | ||
| 194 | * starting the garbage collection | ||
| 195 | */ | ||
| 196 | if (u->gc_candidate) { | ||
| 197 | hit = true; | ||
| 198 | func(u); | ||
| 199 | } | ||
| 191 | } | 200 | } |
| 192 | } | 201 | } |
| 193 | if (hit && hitlist != NULL) { | 202 | if (hit && hitlist != NULL) { |
| @@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u) | |||
| 249 | { | 258 | { |
| 250 | atomic_long_inc(&u->inflight); | 259 | atomic_long_inc(&u->inflight); |
| 251 | /* | 260 | /* |
| 252 | * If this is still a candidate, move it to the end of the | 261 | * If this still might be part of a cycle, move it to the end |
| 253 | * list, so that it's checked even if it was already passed | 262 | * of the list, so that it's checked even if it was already |
| 254 | * over | 263 | * passed over |
| 255 | */ | 264 | */ |
| 256 | if (u->gc_candidate) | 265 | if (u->gc_maybe_cycle) |
| 257 | list_move_tail(&u->link, &gc_candidates); | 266 | list_move_tail(&u->link, &gc_candidates); |
| 258 | } | 267 | } |
| 259 | 268 | ||
| @@ -267,6 +276,7 @@ void unix_gc(void) | |||
| 267 | struct unix_sock *next; | 276 | struct unix_sock *next; |
| 268 | struct sk_buff_head hitlist; | 277 | struct sk_buff_head hitlist; |
| 269 | struct list_head cursor; | 278 | struct list_head cursor; |
| 279 | LIST_HEAD(not_cycle_list); | ||
| 270 | 280 | ||
| 271 | spin_lock(&unix_gc_lock); | 281 | spin_lock(&unix_gc_lock); |
| 272 | 282 | ||
| @@ -282,10 +292,14 @@ void unix_gc(void) | |||
| 282 | * | 292 | * |
| 283 | * Holding unix_gc_lock will protect these candidates from | 293 | * Holding unix_gc_lock will protect these candidates from |
| 284 | * being detached, and hence from gaining an external | 294 | * being detached, and hence from gaining an external |
| 285 | * reference. This also means, that since there are no | 295 | * reference. Since there are no possible receivers, all |
| 286 | * possible receivers, the receive queues of these sockets are | 296 | * buffers currently on the candidates' queues stay there |
| 287 | * static during the GC, even though the dequeue is done | 297 | * during the garbage collection. |
| 288 | * before the detach without atomicity guarantees. | 298 | * |
| 299 | * We also know that no new candidate can be added onto the | ||
| 300 | * receive queues. Other, non candidate sockets _can_ be | ||
| 301 | * added to queue, so we must make sure only to touch | ||
| 302 | * candidates. | ||
| 289 | */ | 303 | */ |
| 290 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { | 304 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
| 291 | long total_refs; | 305 | long total_refs; |
| @@ -299,6 +313,7 @@ void unix_gc(void) | |||
| 299 | if (total_refs == inflight_refs) { | 313 | if (total_refs == inflight_refs) { |
| 300 | list_move_tail(&u->link, &gc_candidates); | 314 | list_move_tail(&u->link, &gc_candidates); |
| 301 | u->gc_candidate = 1; | 315 | u->gc_candidate = 1; |
| 316 | u->gc_maybe_cycle = 1; | ||
| 302 | } | 317 | } |
| 303 | } | 318 | } |
| 304 | 319 | ||
| @@ -325,14 +340,24 @@ void unix_gc(void) | |||
| 325 | list_move(&cursor, &u->link); | 340 | list_move(&cursor, &u->link); |
| 326 | 341 | ||
| 327 | if (atomic_long_read(&u->inflight) > 0) { | 342 | if (atomic_long_read(&u->inflight) > 0) { |
| 328 | list_move_tail(&u->link, &gc_inflight_list); | 343 | list_move_tail(&u->link, ¬_cycle_list); |
| 329 | u->gc_candidate = 0; | 344 | u->gc_maybe_cycle = 0; |
| 330 | scan_children(&u->sk, inc_inflight_move_tail, NULL); | 345 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
| 331 | } | 346 | } |
| 332 | } | 347 | } |
| 333 | list_del(&cursor); | 348 | list_del(&cursor); |
| 334 | 349 | ||
| 335 | /* | 350 | /* |
| 351 | * not_cycle_list contains those sockets which do not make up a | ||
| 352 | * cycle. Restore these to the inflight list. | ||
| 353 | */ | ||
| 354 | while (!list_empty(¬_cycle_list)) { | ||
| 355 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | ||
| 356 | u->gc_candidate = 0; | ||
| 357 | list_move_tail(&u->link, &gc_inflight_list); | ||
| 358 | } | ||
| 359 | |||
| 360 | /* | ||
| 336 | * Now gc_candidates contains only garbage. Restore original | 361 | * Now gc_candidates contains only garbage. Restore original |
| 337 | * inflight counters for these as well, and remove the skbuffs | 362 | * inflight counters for these as well, and remove the skbuffs |
| 338 | * which are creating the cycle(s). | 363 | * which are creating the cycle(s). |
