aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/netxen/netxen_nic.h17
-rw-r--r--drivers/net/netxen/netxen_nic_main.c19
2 files changed, 18 insertions, 18 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7f20a03623a0..181ac0277744 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,23 +95,6 @@
95 95
96#define ADDR_IN_WINDOW1(off) \ 96#define ADDR_IN_WINDOW1(off) \
97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
98/*
99 * In netxen_nic_down(), we must wait for any pending callback requests into
100 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
101 * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
102 * does this synchronization.
103 *
104 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
105 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
106 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
107 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
108 * linkwatch_event() to be executed which also attempts to acquire the rtnl
109 * lock thus causing a deadlock.
110 */
111
112#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
113#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
114extern struct workqueue_struct *netxen_workq;
115 98
116/* 99/*
117 * normalize a 64MB crb address to 32MB PCI window 100 * normalize a 64MB crb address to 32MB PCI window
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a8fb439a4d03..7144c255ce54 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
86 86
87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
88 88
89struct workqueue_struct *netxen_workq; 89/*
90 * In netxen_nic_down(), we must wait for any pending callback requests into
91 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
92 * reenabled right after it is deleted in netxen_nic_down().
93 * FLUSH_SCHEDULED_WORK() does this synchronization.
94 *
95 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
96 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
97 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
98 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
99 * linkwatch_event() to be executed which also attempts to acquire the rtnl
100 * lock thus causing a deadlock.
101 */
102
103static struct workqueue_struct *netxen_workq;
104#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
105#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
106
90static void netxen_watchdog(unsigned long); 107static void netxen_watchdog(unsigned long);
91 108
92static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 109static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,