diff options
-rw-r--r-- | drivers/net/ibm_emac/ibm_emac_mal.c | 48 | ||||
-rw-r--r-- | drivers/net/ibm_emac/ibm_emac_mal.h | 2 | ||||
-rw-r--r-- | include/linux/netdevice.h | 10 |
3 files changed, 28 insertions, 32 deletions
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index cabd9846a5ee..4e49e8c4f871 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c | |||
@@ -207,10 +207,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance) | |||
207 | 207 | ||
208 | static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) | 208 | static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) |
209 | { | 209 | { |
210 | if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { | 210 | if (likely(napi_schedule_prep(&mal->napi))) { |
211 | MAL_DBG2("%d: schedule_poll" NL, mal->def->index); | 211 | MAL_DBG2("%d: schedule_poll" NL, mal->def->index); |
212 | mal_disable_eob_irq(mal); | 212 | mal_disable_eob_irq(mal); |
213 | __netif_rx_schedule(&mal->poll_dev); | 213 | __napi_schedule(&mal->napi); |
214 | } else | 214 | } else |
215 | MAL_DBG2("%d: already in poll" NL, mal->def->index); | 215 | MAL_DBG2("%d: already in poll" NL, mal->def->index); |
216 | } | 216 | } |
@@ -273,11 +273,11 @@ static irqreturn_t mal_rxde(int irq, void *dev_instance) | |||
273 | return IRQ_HANDLED; | 273 | return IRQ_HANDLED; |
274 | } | 274 | } |
275 | 275 | ||
276 | static int mal_poll(struct net_device *ndev, int *budget) | 276 | static int mal_poll(struct napi_struct *napi, int budget) |
277 | { | 277 | { |
278 | struct ibm_ocp_mal *mal = ndev->priv; | 278 | struct ibm_ocp_mal *mal = container_of(napi, struct ibm_ocp_mal, napi); |
279 | struct list_head *l; | 279 | struct list_head *l; |
280 | int rx_work_limit = min(ndev->quota, *budget), received = 0, done; | 280 | int received = 0; |
281 | 281 | ||
282 | MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, | 282 | MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, |
283 | rx_work_limit); | 283 | rx_work_limit); |
@@ -295,38 +295,34 @@ static int mal_poll(struct net_device *ndev, int *budget) | |||
295 | list_for_each(l, &mal->poll_list) { | 295 | list_for_each(l, &mal->poll_list) { |
296 | struct mal_commac *mc = | 296 | struct mal_commac *mc = |
297 | list_entry(l, struct mal_commac, poll_list); | 297 | list_entry(l, struct mal_commac, poll_list); |
298 | int n = mc->ops->poll_rx(mc->dev, rx_work_limit); | 298 | int n = mc->ops->poll_rx(mc->dev, budget); |
299 | if (n) { | 299 | if (n) { |
300 | received += n; | 300 | received += n; |
301 | rx_work_limit -= n; | 301 | budget -= n; |
302 | if (rx_work_limit <= 0) { | 302 | if (budget <= 0) |
303 | done = 0; | ||
304 | goto more_work; // XXX What if this is the last one ? | 303 | goto more_work; // XXX What if this is the last one ? |
305 | } | ||
306 | } | 304 | } |
307 | } | 305 | } |
308 | 306 | ||
309 | /* We need to disable IRQs to protect from RXDE IRQ here */ | 307 | /* We need to disable IRQs to protect from RXDE IRQ here */ |
310 | local_irq_disable(); | 308 | local_irq_disable(); |
311 | __netif_rx_complete(ndev); | 309 | __napi_complete(napi); |
312 | mal_enable_eob_irq(mal); | 310 | mal_enable_eob_irq(mal); |
313 | local_irq_enable(); | 311 | local_irq_enable(); |
314 | 312 | ||
315 | done = 1; | ||
316 | |||
317 | /* Check for "rotting" packet(s) */ | 313 | /* Check for "rotting" packet(s) */ |
318 | list_for_each(l, &mal->poll_list) { | 314 | list_for_each(l, &mal->poll_list) { |
319 | struct mal_commac *mc = | 315 | struct mal_commac *mc = |
320 | list_entry(l, struct mal_commac, poll_list); | 316 | list_entry(l, struct mal_commac, poll_list); |
321 | if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { | 317 | if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { |
322 | MAL_DBG2("%d: rotting packet" NL, mal->def->index); | 318 | MAL_DBG2("%d: rotting packet" NL, mal->def->index); |
323 | if (netif_rx_reschedule(ndev, received)) | 319 | if (napi_reschedule(napi)) |
324 | mal_disable_eob_irq(mal); | 320 | mal_disable_eob_irq(mal); |
325 | else | 321 | else |
326 | MAL_DBG2("%d: already in poll list" NL, | 322 | MAL_DBG2("%d: already in poll list" NL, |
327 | mal->def->index); | 323 | mal->def->index); |
328 | 324 | ||
329 | if (rx_work_limit > 0) | 325 | if (budget > 0) |
330 | goto again; | 326 | goto again; |
331 | else | 327 | else |
332 | goto more_work; | 328 | goto more_work; |
@@ -335,12 +331,8 @@ static int mal_poll(struct net_device *ndev, int *budget) | |||
335 | } | 331 | } |
336 | 332 | ||
337 | more_work: | 333 | more_work: |
338 | ndev->quota -= received; | 334 | MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, budget, received); |
339 | *budget -= received; | 335 | return received; |
340 | |||
341 | MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, | ||
342 | done ? 0 : 1); | ||
343 | return done ? 0 : 1; | ||
344 | } | 336 | } |
345 | 337 | ||
346 | static void mal_reset(struct ibm_ocp_mal *mal) | 338 | static void mal_reset(struct ibm_ocp_mal *mal) |
@@ -425,11 +417,8 @@ static int __init mal_probe(struct ocp_device *ocpdev) | |||
425 | mal->def = ocpdev->def; | 417 | mal->def = ocpdev->def; |
426 | 418 | ||
427 | INIT_LIST_HEAD(&mal->poll_list); | 419 | INIT_LIST_HEAD(&mal->poll_list); |
428 | set_bit(__LINK_STATE_START, &mal->poll_dev.state); | 420 | mal->napi.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; |
429 | mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; | 421 | mal->napi.poll = mal_poll; |
430 | mal->poll_dev.poll = mal_poll; | ||
431 | mal->poll_dev.priv = mal; | ||
432 | atomic_set(&mal->poll_dev.refcnt, 1); | ||
433 | 422 | ||
434 | INIT_LIST_HEAD(&mal->list); | 423 | INIT_LIST_HEAD(&mal->list); |
435 | 424 | ||
@@ -520,11 +509,8 @@ static void __exit mal_remove(struct ocp_device *ocpdev) | |||
520 | 509 | ||
521 | MAL_DBG("%d: remove" NL, mal->def->index); | 510 | MAL_DBG("%d: remove" NL, mal->def->index); |
522 | 511 | ||
523 | /* Syncronize with scheduled polling, | 512 | /* Synchronize with scheduled polling */ |
524 | stolen from net/core/dev.c:dev_close() | 513 | napi_disable(&mal->napi); |
525 | */ | ||
526 | clear_bit(__LINK_STATE_START, &mal->poll_dev.state); | ||
527 | netif_poll_disable(&mal->poll_dev); | ||
528 | 514 | ||
529 | if (!list_empty(&mal->list)) { | 515 | if (!list_empty(&mal->list)) { |
530 | /* This is *very* bad */ | 516 | /* This is *very* bad */ |
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h index 64bc338acc6c..8f54d621994d 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/drivers/net/ibm_emac/ibm_emac_mal.h | |||
@@ -195,7 +195,7 @@ struct ibm_ocp_mal { | |||
195 | dcr_host_t dcrhost; | 195 | dcr_host_t dcrhost; |
196 | 196 | ||
197 | struct list_head poll_list; | 197 | struct list_head poll_list; |
198 | struct net_device poll_dev; | 198 | struct napi_struct napi; |
199 | 199 | ||
200 | struct list_head list; | 200 | struct list_head list; |
201 | u32 tx_chan_mask; | 201 | u32 tx_chan_mask; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91cd3f3db507..4848c7afa4e7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -349,6 +349,16 @@ static inline void napi_schedule(struct napi_struct *n) | |||
349 | __napi_schedule(n); | 349 | __napi_schedule(n); |
350 | } | 350 | } |
351 | 351 | ||
352 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ | ||
353 | static inline int napi_reschedule(struct napi_struct *napi) | ||
354 | { | ||
355 | if (napi_schedule_prep(napi)) { | ||
356 | __napi_schedule(napi); | ||
357 | return 1; | ||
358 | } | ||
359 | return 0; | ||
360 | } | ||
361 | |||
352 | /** | 362 | /** |
353 | * napi_complete - NAPI processing complete | 363 | * napi_complete - NAPI processing complete |
354 | * @n: napi context | 364 | * @n: napi context |