aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c41
1 files changed, 10 insertions, 31 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 57557a6a950c..de1d1ba92f2d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
135 } 135 }
136} 136}
137 137
138/*
139 * Check whether delayed processing was scheduled for our NIC. If so,
140 * we attempt to grab the poll lock and use ->poll() to pump the card.
141 * If this fails, either we've recursed in ->poll() or it's already
142 * running on another CPU.
143 *
144 * Note: we don't mask interrupts with this lock because we're using
145 * trylock here and interrupts are already disabled in the softirq
146 * case. Further, we test the poll_owner to avoid recursion on UP
147 * systems where the lock doesn't exist.
148 */
149static void poll_one_napi(struct napi_struct *napi) 138static void poll_one_napi(struct napi_struct *napi)
150{ 139{
151 int work = 0; 140 int work;
152
153 /* net_rx_action's ->poll() invocations and our's are
154 * synchronized by this test which is only made while
155 * holding the napi->poll_lock.
156 */
157 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158 return;
159 141
160 /* If we set this bit but see that it has already been set, 142 /* If we set this bit but see that it has already been set,
161 * that indicates that napi has been disabled and we need 143 * that indicates that napi has been disabled and we need
@@ -187,16 +169,16 @@ static void poll_napi(struct net_device *dev)
187 } 169 }
188} 170}
189 171
190static void netpoll_poll_dev(struct net_device *dev) 172void netpoll_poll_dev(struct net_device *dev)
191{ 173{
192 const struct net_device_ops *ops;
193 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 174 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
175 const struct net_device_ops *ops;
194 176
195 /* Don't do any rx activity if the dev_lock mutex is held 177 /* Don't do any rx activity if the dev_lock mutex is held
196 * the dev_open/close paths use this to block netpoll activity 178 * the dev_open/close paths use this to block netpoll activity
197 * while changing device state 179 * while changing device state
198 */ 180 */
199 if (down_trylock(&ni->dev_lock)) 181 if (!ni || down_trylock(&ni->dev_lock))
200 return; 182 return;
201 183
202 if (!netif_running(dev)) { 184 if (!netif_running(dev)) {
@@ -205,13 +187,8 @@ static void netpoll_poll_dev(struct net_device *dev)
205 } 187 }
206 188
207 ops = dev->netdev_ops; 189 ops = dev->netdev_ops;
208 if (!ops->ndo_poll_controller) { 190 if (ops->ndo_poll_controller)
209 up(&ni->dev_lock); 191 ops->ndo_poll_controller(dev);
210 return;
211 }
212
213 /* Process pending work on NIC */
214 ops->ndo_poll_controller(dev);
215 192
216 poll_napi(dev); 193 poll_napi(dev);
217 194
@@ -219,6 +196,7 @@ static void netpoll_poll_dev(struct net_device *dev)
219 196
220 zap_completion_queue(); 197 zap_completion_queue();
221} 198}
199EXPORT_SYMBOL(netpoll_poll_dev);
222 200
223void netpoll_poll_disable(struct net_device *dev) 201void netpoll_poll_disable(struct net_device *dev)
224{ 202{
@@ -334,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
334 /* It is up to the caller to keep npinfo alive. */ 312 /* It is up to the caller to keep npinfo alive. */
335 struct netpoll_info *npinfo; 313 struct netpoll_info *npinfo;
336 314
315 rcu_read_lock_bh();
337 lockdep_assert_irqs_disabled(); 316 lockdep_assert_irqs_disabled();
338 317
339 npinfo = rcu_dereference_bh(np->dev->npinfo); 318 npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -378,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
378 skb_queue_tail(&npinfo->txq, skb); 357 skb_queue_tail(&npinfo->txq, skb);
379 schedule_delayed_work(&npinfo->tx_work,0); 358 schedule_delayed_work(&npinfo->tx_work,0);
380 } 359 }
360 rcu_read_unlock_bh();
381} 361}
382EXPORT_SYMBOL(netpoll_send_skb_on_dev); 362EXPORT_SYMBOL(netpoll_send_skb_on_dev);
383 363
@@ -613,8 +593,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 593 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
614 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); 594 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
615 595
616 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 596 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
617 !ndev->netdev_ops->ndo_poll_controller) {
618 np_err(np, "%s doesn't support polling, aborting\n", 597 np_err(np, "%s doesn't support polling, aborting\n",
619 np->dev_name); 598 np->dev_name);
620 err = -ENOTSUPP; 599 err = -ENOTSUPP;