summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSantwona Behera <santwona.behera@sun.com>2008-09-12 16:04:26 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2008-10-08 19:44:44 -0700
commite808212e98c4d8032c7bf88a0f122d58daff6621 (patch)
tree01a3d8e3c68eca91e06799bec59ffa4fe8889458
parent4b37352ee6c6551bd8bea079219d352990b648ed (diff)
niu: panic on reset
[ Upstream commit cff502a38394fd33693f6233e03fca363dfa956d ] The reset_task function in the niu driver does not reset the tx and rx buffers properly. This leads to panic on reset. This patch is a modified implementation of the previously posted fix. Signed-off-by: Santwona Behera <santwona.behera@sun.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/net/niu.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d11ba61baa4f..5fd6a650cc2d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -5230,6 +5230,56 @@ static void niu_netif_start(struct niu *np)
niu_enable_interrupts(np, 1);
}
+static void niu_reset_buffers(struct niu *np)
+{
+ int i, j, k, err;
+
+ if (np->rx_rings) {
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
+ struct page *page;
+
+ page = rp->rxhash[j];
+ while (page) {
+ struct page *next =
+ (struct page *) page->mapping;
+ u64 base = page->index;
+ base = base >> RBR_DESCR_ADDR_SHIFT;
+ rp->rbr[k++] = cpu_to_le32(base);
+ page = next;
+ }
+ }
+ for (; k < MAX_RBR_RING_SIZE; k++) {
+ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
+ if (unlikely(err))
+ break;
+ }
+
+ rp->rbr_index = rp->rbr_table_size - 1;
+ rp->rcr_index = 0;
+ rp->rbr_pending = 0;
+ rp->rbr_refill_pending = 0;
+ }
+ }
+ if (np->tx_rings) {
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
+ if (rp->tx_buffs[j].skb)
+ (void) release_tx_packet(np, rp, j);
+ }
+
+ rp->pending = MAX_TX_RING_SIZE;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+ }
+ }
+}
+
static void niu_reset_task(struct work_struct *work)
{
struct niu *np = container_of(work, struct niu, reset_task);
@@ -5252,6 +5302,12 @@ static void niu_reset_task(struct work_struct *work)
niu_stop_hw(np);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ niu_reset_buffers(np);
+
+ spin_lock_irqsave(&np->lock, flags);
+
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;