diff options
author | Santwona Behera <santwona.behera@sun.com> | 2008-09-12 19:04:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-09-12 19:04:26 -0400 |
commit | cff502a38394fd33693f6233e03fca363dfa956d (patch) | |
tree | 09f1231918943bfc5081d7836acf80f1084546e7 /drivers/net/niu.c | |
parent | d58b622b5de9747c82fcc3548f3ec87a02c24dce (diff) |
niu: panic on reset
The reset_task function in the niu driver does not reset the tx and rx
buffers properly. This leads to panic on reset. This patch is a
modified implementation of the previously posted fix.
Signed-off-by: Santwona Behera <santwona.behera@sun.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/niu.c')
-rw-r--r-- | drivers/net/niu.c | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index e4765b713aba..e3be81eba8a4 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -5984,6 +5984,56 @@ static void niu_netif_start(struct niu *np) | |||
5984 | niu_enable_interrupts(np, 1); | 5984 | niu_enable_interrupts(np, 1); |
5985 | } | 5985 | } |
5986 | 5986 | ||
5987 | static void niu_reset_buffers(struct niu *np) | ||
5988 | { | ||
5989 | int i, j, k, err; | ||
5990 | |||
5991 | if (np->rx_rings) { | ||
5992 | for (i = 0; i < np->num_rx_rings; i++) { | ||
5993 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
5994 | |||
5995 | for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { | ||
5996 | struct page *page; | ||
5997 | |||
5998 | page = rp->rxhash[j]; | ||
5999 | while (page) { | ||
6000 | struct page *next = | ||
6001 | (struct page *) page->mapping; | ||
6002 | u64 base = page->index; | ||
6003 | base = base >> RBR_DESCR_ADDR_SHIFT; | ||
6004 | rp->rbr[k++] = cpu_to_le32(base); | ||
6005 | page = next; | ||
6006 | } | ||
6007 | } | ||
6008 | for (; k < MAX_RBR_RING_SIZE; k++) { | ||
6009 | err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); | ||
6010 | if (unlikely(err)) | ||
6011 | break; | ||
6012 | } | ||
6013 | |||
6014 | rp->rbr_index = rp->rbr_table_size - 1; | ||
6015 | rp->rcr_index = 0; | ||
6016 | rp->rbr_pending = 0; | ||
6017 | rp->rbr_refill_pending = 0; | ||
6018 | } | ||
6019 | } | ||
6020 | if (np->tx_rings) { | ||
6021 | for (i = 0; i < np->num_tx_rings; i++) { | ||
6022 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
6023 | |||
6024 | for (j = 0; j < MAX_TX_RING_SIZE; j++) { | ||
6025 | if (rp->tx_buffs[j].skb) | ||
6026 | (void) release_tx_packet(np, rp, j); | ||
6027 | } | ||
6028 | |||
6029 | rp->pending = MAX_TX_RING_SIZE; | ||
6030 | rp->prod = 0; | ||
6031 | rp->cons = 0; | ||
6032 | rp->wrap_bit = 0; | ||
6033 | } | ||
6034 | } | ||
6035 | } | ||
6036 | |||
5987 | static void niu_reset_task(struct work_struct *work) | 6037 | static void niu_reset_task(struct work_struct *work) |
5988 | { | 6038 | { |
5989 | struct niu *np = container_of(work, struct niu, reset_task); | 6039 | struct niu *np = container_of(work, struct niu, reset_task); |
@@ -6006,6 +6056,12 @@ static void niu_reset_task(struct work_struct *work) | |||
6006 | 6056 | ||
6007 | niu_stop_hw(np); | 6057 | niu_stop_hw(np); |
6008 | 6058 | ||
6059 | spin_unlock_irqrestore(&np->lock, flags); | ||
6060 | |||
6061 | niu_reset_buffers(np); | ||
6062 | |||
6063 | spin_lock_irqsave(&np->lock, flags); | ||
6064 | |||
6009 | err = niu_init_hw(np); | 6065 | err = niu_init_hw(np); |
6010 | if (!err) { | 6066 | if (!err) { |
6011 | np->timer.expires = jiffies + HZ; | 6067 | np->timer.expires = jiffies + HZ; |