diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2007-10-16 01:40:50 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-10-16 21:10:28 -0400 |
commit | b3e441c6ed8655a42e7b4da1b6dc7939f259d9c9 (patch) | |
tree | e15ced769e67b7e422c3dd5bd9f8eed70560dd82 /drivers/net | |
parent | 1284cd3a2b740d0118458d2ea470a1e5bc19b187 (diff) |
net: Fix new EMAC driver for NAPI changes
net: Fix new EMAC driver for NAPI changes
This fixes the new EMAC driver for the NAPI updates. The previous patch
by Roland Dreier (already applied) to do that doesn't actually work. This
applies on top of it makes it work on my test Ebony machine.
This patch depends on "net: Add __napi_sycnhronize() to sync with napi poll"
posted previously.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ibm_newemac/mal.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 39f4cb6b0cf3..a680eb05ba60 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c | |||
@@ -45,6 +45,8 @@ int __devinit mal_register_commac(struct mal_instance *mal, | |||
45 | return -EBUSY; | 45 | return -EBUSY; |
46 | } | 46 | } |
47 | 47 | ||
48 | if (list_empty(&mal->list)) | ||
49 | napi_enable(&mal->napi); | ||
48 | mal->tx_chan_mask |= commac->tx_chan_mask; | 50 | mal->tx_chan_mask |= commac->tx_chan_mask; |
49 | mal->rx_chan_mask |= commac->rx_chan_mask; | 51 | mal->rx_chan_mask |= commac->rx_chan_mask; |
50 | list_add(&commac->list, &mal->list); | 52 | list_add(&commac->list, &mal->list); |
@@ -67,6 +69,8 @@ void __devexit mal_unregister_commac(struct mal_instance *mal, | |||
67 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | 69 | mal->tx_chan_mask &= ~commac->tx_chan_mask; |
68 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | 70 | mal->rx_chan_mask &= ~commac->rx_chan_mask; |
69 | list_del_init(&commac->list); | 71 | list_del_init(&commac->list); |
72 | if (list_empty(&mal->list)) | ||
73 | napi_disable(&mal->napi); | ||
70 | 74 | ||
71 | spin_unlock_irqrestore(&mal->lock, flags); | 75 | spin_unlock_irqrestore(&mal->lock, flags); |
72 | } | 76 | } |
@@ -182,7 +186,7 @@ static inline void mal_enable_eob_irq(struct mal_instance *mal) | |||
182 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | 186 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); |
183 | } | 187 | } |
184 | 188 | ||
185 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | 189 | /* synchronized by NAPI state */ |
186 | static inline void mal_disable_eob_irq(struct mal_instance *mal) | 190 | static inline void mal_disable_eob_irq(struct mal_instance *mal) |
187 | { | 191 | { |
188 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | 192 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow |
@@ -317,8 +321,8 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) | |||
317 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) | 321 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) |
318 | msleep(1); | 322 | msleep(1); |
319 | 323 | ||
320 | /* Synchronize with the MAL NAPI poller. */ | 324 | /* Synchronize with the MAL NAPI poller */ |
321 | napi_disable(&mal->napi); | 325 | __napi_synchronize(&mal->napi); |
322 | } | 326 | } |
323 | 327 | ||
324 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | 328 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) |
@@ -326,7 +330,12 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | |||
326 | smp_wmb(); | 330 | smp_wmb(); |
327 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | 331 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); |
328 | 332 | ||
329 | // XXX might want to kick a poll now... | 333 | /* Feels better to trigger a poll here to catch up with events that |
334 | * may have happened on this channel while disabled. It will most | ||
335 | * probably be delayed until the next interrupt but that's mostly a | ||
336 | * non-issue in the context where this is called. | ||
337 | */ | ||
338 | napi_schedule(&mal->napi); | ||
330 | } | 339 | } |
331 | 340 | ||
332 | static int mal_poll(struct napi_struct *napi, int budget) | 341 | static int mal_poll(struct napi_struct *napi, int budget) |
@@ -336,8 +345,7 @@ static int mal_poll(struct napi_struct *napi, int budget) | |||
336 | int received = 0; | 345 | int received = 0; |
337 | unsigned long flags; | 346 | unsigned long flags; |
338 | 347 | ||
339 | MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, | 348 | MAL_DBG2(mal, "poll(%d)" NL, budget); |
340 | rx_work_limit); | ||
341 | again: | 349 | again: |
342 | /* Process TX skbs */ | 350 | /* Process TX skbs */ |
343 | list_for_each(l, &mal->poll_list) { | 351 | list_for_each(l, &mal->poll_list) { |
@@ -528,11 +536,12 @@ static int __devinit mal_probe(struct of_device *ofdev, | |||
528 | } | 536 | } |
529 | 537 | ||
530 | INIT_LIST_HEAD(&mal->poll_list); | 538 | INIT_LIST_HEAD(&mal->poll_list); |
531 | mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; | ||
532 | mal->napi.poll = mal_poll; | ||
533 | INIT_LIST_HEAD(&mal->list); | 539 | INIT_LIST_HEAD(&mal->list); |
534 | spin_lock_init(&mal->lock); | 540 | spin_lock_init(&mal->lock); |
535 | 541 | ||
542 | netif_napi_add(NULL, &mal->napi, mal_poll, | ||
543 | CONFIG_IBM_NEW_EMAC_POLL_WEIGHT); | ||
544 | |||
536 | /* Load power-on reset defaults */ | 545 | /* Load power-on reset defaults */ |
537 | mal_reset(mal); | 546 | mal_reset(mal); |
538 | 547 | ||