diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-10-23 06:30:19 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-10-23 06:30:19 -0400 |
| commit | dda93b45389f025fd3422d22cc31cc1ea6040305 (patch) | |
| tree | 44a856744843e24ed1baf6ca4edb1be04809a606 /net/core/netpoll.c | |
| parent | 2e62024c265aa69315ed02835623740030435380 (diff) | |
| parent | b61b8bba18fe2b63d38fdaf9b83de25e2d787dfe (diff) | |
Merge branch 'x86/cache' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/core/netpoll.c')
| -rw-r--r-- | net/core/netpoll.c | 22 |
1 files changed, 3 insertions, 19 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3219a2932463..de1d1ba92f2d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work) | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* | ||
| 139 | * Check whether delayed processing was scheduled for our NIC. If so, | ||
| 140 | * we attempt to grab the poll lock and use ->poll() to pump the card. | ||
| 141 | * If this fails, either we've recursed in ->poll() or it's already | ||
| 142 | * running on another CPU. | ||
| 143 | * | ||
| 144 | * Note: we don't mask interrupts with this lock because we're using | ||
| 145 | * trylock here and interrupts are already disabled in the softirq | ||
| 146 | * case. Further, we test the poll_owner to avoid recursion on UP | ||
| 147 | * systems where the lock doesn't exist. | ||
| 148 | */ | ||
| 149 | static void poll_one_napi(struct napi_struct *napi) | 138 | static void poll_one_napi(struct napi_struct *napi) |
| 150 | { | 139 | { |
| 151 | int work = 0; | 140 | int work; |
| 152 | |||
| 153 | /* net_rx_action's ->poll() invocations and our's are | ||
| 154 | * synchronized by this test which is only made while | ||
| 155 | * holding the napi->poll_lock. | ||
| 156 | */ | ||
| 157 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | ||
| 158 | return; | ||
| 159 | 141 | ||
| 160 | /* If we set this bit but see that it has already been set, | 142 | /* If we set this bit but see that it has already been set, |
| 161 | * that indicates that napi has been disabled and we need | 143 | * that indicates that napi has been disabled and we need |
| @@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
| 330 | /* It is up to the caller to keep npinfo alive. */ | 312 | /* It is up to the caller to keep npinfo alive. */ |
| 331 | struct netpoll_info *npinfo; | 313 | struct netpoll_info *npinfo; |
| 332 | 314 | ||
| 315 | rcu_read_lock_bh(); | ||
| 333 | lockdep_assert_irqs_disabled(); | 316 | lockdep_assert_irqs_disabled(); |
| 334 | 317 | ||
| 335 | npinfo = rcu_dereference_bh(np->dev->npinfo); | 318 | npinfo = rcu_dereference_bh(np->dev->npinfo); |
| @@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
| 374 | skb_queue_tail(&npinfo->txq, skb); | 357 | skb_queue_tail(&npinfo->txq, skb); |
| 375 | schedule_delayed_work(&npinfo->tx_work,0); | 358 | schedule_delayed_work(&npinfo->tx_work,0); |
| 376 | } | 359 | } |
| 360 | rcu_read_unlock_bh(); | ||
| 377 | } | 361 | } |
| 378 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); | 362 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
| 379 | 363 | ||
