diff options
| -rw-r--r-- | fs/eventpoll.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5458e80fc558..085c5c063420 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -98,7 +98,7 @@ struct epoll_filefd { | |||
| 98 | struct nested_call_node { | 98 | struct nested_call_node { |
| 99 | struct list_head llink; | 99 | struct list_head llink; |
| 100 | void *cookie; | 100 | void *cookie; |
| 101 | int cpu; | 101 | void *ctx; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| @@ -317,17 +317,17 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) | |||
| 317 | * @nproc: Nested call core function pointer. | 317 | * @nproc: Nested call core function pointer. |
| 318 | * @priv: Opaque data to be passed to the @nproc callback. | 318 | * @priv: Opaque data to be passed to the @nproc callback. |
| 319 | * @cookie: Cookie to be used to identify this nested call. | 319 | * @cookie: Cookie to be used to identify this nested call. |
| 320 | * @ctx: This instance context. | ||
| 320 | * | 321 | * |
| 321 | * Returns: Returns the code returned by the @nproc callback, or -1 if | 322 | * Returns: Returns the code returned by the @nproc callback, or -1 if |
| 322 | * the maximum recursion limit has been exceeded. | 323 | * the maximum recursion limit has been exceeded. |
| 323 | */ | 324 | */ |
| 324 | static int ep_call_nested(struct nested_calls *ncalls, int max_nests, | 325 | static int ep_call_nested(struct nested_calls *ncalls, int max_nests, |
| 325 | int (*nproc)(void *, void *, int), void *priv, | 326 | int (*nproc)(void *, void *, int), void *priv, |
| 326 | void *cookie) | 327 | void *cookie, void *ctx) |
| 327 | { | 328 | { |
| 328 | int error, call_nests = 0; | 329 | int error, call_nests = 0; |
| 329 | unsigned long flags; | 330 | unsigned long flags; |
| 330 | int this_cpu = get_cpu(); | ||
| 331 | struct list_head *lsthead = &ncalls->tasks_call_list; | 331 | struct list_head *lsthead = &ncalls->tasks_call_list; |
| 332 | struct nested_call_node *tncur; | 332 | struct nested_call_node *tncur; |
| 333 | struct nested_call_node tnode; | 333 | struct nested_call_node tnode; |
| @@ -340,7 +340,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, | |||
| 340 | * very much limited. | 340 | * very much limited. |
| 341 | */ | 341 | */ |
| 342 | list_for_each_entry(tncur, lsthead, llink) { | 342 | list_for_each_entry(tncur, lsthead, llink) { |
| 343 | if (tncur->cpu == this_cpu && | 343 | if (tncur->ctx == ctx && |
| 344 | (tncur->cookie == cookie || ++call_nests > max_nests)) { | 344 | (tncur->cookie == cookie || ++call_nests > max_nests)) { |
| 345 | /* | 345 | /* |
| 346 | * Ops ... loop detected or maximum nest level reached. | 346 | * Ops ... loop detected or maximum nest level reached. |
| @@ -352,7 +352,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, | |||
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | /* Add the current task and cookie to the list */ | 354 | /* Add the current task and cookie to the list */ |
| 355 | tnode.cpu = this_cpu; | 355 | tnode.ctx = ctx; |
| 356 | tnode.cookie = cookie; | 356 | tnode.cookie = cookie; |
| 357 | list_add(&tnode.llink, lsthead); | 357 | list_add(&tnode.llink, lsthead); |
| 358 | 358 | ||
| @@ -364,10 +364,9 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, | |||
| 364 | /* Remove the current task from the list */ | 364 | /* Remove the current task from the list */ |
| 365 | spin_lock_irqsave(&ncalls->lock, flags); | 365 | spin_lock_irqsave(&ncalls->lock, flags); |
| 366 | list_del(&tnode.llink); | 366 | list_del(&tnode.llink); |
| 367 | out_unlock: | 367 | out_unlock: |
| 368 | spin_unlock_irqrestore(&ncalls->lock, flags); | 368 | spin_unlock_irqrestore(&ncalls->lock, flags); |
| 369 | 369 | ||
| 370 | put_cpu(); | ||
| 371 | return error; | 370 | return error; |
| 372 | } | 371 | } |
| 373 | 372 | ||
| @@ -408,8 +407,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) | |||
| 408 | */ | 407 | */ |
| 409 | static void ep_poll_safewake(wait_queue_head_t *wq) | 408 | static void ep_poll_safewake(wait_queue_head_t *wq) |
| 410 | { | 409 | { |
| 410 | int this_cpu = get_cpu(); | ||
| 411 | |||
| 411 | ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, | 412 | ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, |
| 412 | ep_poll_wakeup_proc, NULL, wq); | 413 | ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); |
| 414 | |||
| 415 | put_cpu(); | ||
| 413 | } | 416 | } |
| 414 | 417 | ||
| 415 | /* | 418 | /* |
| @@ -663,7 +666,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) | |||
| 663 | * could re-enter here. | 666 | * could re-enter here. |
| 664 | */ | 667 | */ |
| 665 | pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, | 668 | pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, |
| 666 | ep_poll_readyevents_proc, ep, ep); | 669 | ep_poll_readyevents_proc, ep, ep, current); |
| 667 | 670 | ||
| 668 | return pollflags != -1 ? pollflags : 0; | 671 | return pollflags != -1 ? pollflags : 0; |
| 669 | } | 672 | } |
