aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-02-15 15:13:17 -0500
committerRalf Baechle <ralf@linux-mips.org>2010-02-27 06:53:29 -0500
commitf8c2648666b5a1b5ba9bbb662ae569bafd3cc830 (patch)
tree5c4e9806d2e05c3327a731e0bdf5651929d799b1 /drivers
parent1d08f00d576c62f1c7a96900a14648df33b3939a (diff)
Staging: Octeon: Run phy bus accesses on a workqueue.
When directly accessing a phy, we must acquire the mdio bus lock. To do that we cannot be in interrupt context, so we need to move these operations to a workqueue. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/965/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c55
-rw-r--r--drivers/staging/octeon/ethernet.c113
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
3 files changed, 109 insertions, 63 deletions
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index f90d46ed5640..a0d4d4b98bdc 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/phy.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
31#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
@@ -47,14 +48,20 @@ static int number_rgmii_ports;
47static void cvm_oct_rgmii_poll(struct net_device *dev) 48static void cvm_oct_rgmii_poll(struct net_device *dev)
48{ 49{
49 struct octeon_ethernet *priv = netdev_priv(dev); 50 struct octeon_ethernet *priv = netdev_priv(dev);
50 unsigned long flags; 51 unsigned long flags = 0;
51 cvmx_helper_link_info_t link_info; 52 cvmx_helper_link_info_t link_info;
53 int use_global_register_lock = (priv->phydev == NULL);
52 54
53 /* 55 BUG_ON(in_interrupt());
54 * Take the global register lock since we are going to touch 56 if (use_global_register_lock) {
55 * registers that affect more than one port. 57 /*
56 */ 58 * Take the global register lock since we are going to
57 spin_lock_irqsave(&global_register_lock, flags); 59 * touch registers that affect more than one port.
60 */
61 spin_lock_irqsave(&global_register_lock, flags);
62 } else {
63 mutex_lock(&priv->phydev->bus->mdio_lock);
64 }
58 65
59 link_info = cvmx_helper_link_get(priv->port); 66 link_info = cvmx_helper_link_get(priv->port);
60 if (link_info.u64 == priv->link_info) { 67 if (link_info.u64 == priv->link_info) {
@@ -114,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
114 dev->name); 121 dev->name);
115 } 122 }
116 } 123 }
117 spin_unlock_irqrestore(&global_register_lock, flags); 124
125 if (use_global_register_lock)
126 spin_unlock_irqrestore(&global_register_lock, flags);
127 else
128 mutex_unlock(&priv->phydev->bus->mdio_lock);
118 return; 129 return;
119 } 130 }
120 131
@@ -150,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
150 link_info = cvmx_helper_link_autoconf(priv->port); 161 link_info = cvmx_helper_link_autoconf(priv->port);
151 priv->link_info = link_info.u64; 162 priv->link_info = link_info.u64;
152 } 163 }
153 spin_unlock_irqrestore(&global_register_lock, flags); 164
165 if (use_global_register_lock)
166 spin_unlock_irqrestore(&global_register_lock, flags);
167 else {
168 mutex_unlock(&priv->phydev->bus->mdio_lock);
169 }
154 170
155 if (priv->phydev == NULL) { 171 if (priv->phydev == NULL) {
156 /* Tell core. */ 172 /* Tell core. */
@@ -212,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
212 struct net_device *dev = 228 struct net_device *dev =
213 cvm_oct_device[cvmx_helper_get_ipd_port 229 cvm_oct_device[cvmx_helper_get_ipd_port
214 (interface, index)]; 230 (interface, index)];
215 if (dev) 231 struct octeon_ethernet *priv = netdev_priv(dev);
216 cvm_oct_rgmii_poll(dev); 232
233 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
234 queue_work(cvm_oct_poll_queue, &priv->port_work);
235
217 gmx_rx_int_reg.u64 = 0; 236 gmx_rx_int_reg.u64 = 0;
218 gmx_rx_int_reg.s.phy_dupx = 1; 237 gmx_rx_int_reg.s.phy_dupx = 1;
219 gmx_rx_int_reg.s.phy_link = 1; 238 gmx_rx_int_reg.s.phy_link = 1;
@@ -251,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
251 struct net_device *dev = 270 struct net_device *dev =
252 cvm_oct_device[cvmx_helper_get_ipd_port 271 cvm_oct_device[cvmx_helper_get_ipd_port
253 (interface, index)]; 272 (interface, index)];
254 if (dev) 273 struct octeon_ethernet *priv = netdev_priv(dev);
255 cvm_oct_rgmii_poll(dev); 274
275 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
276 queue_work(cvm_oct_poll_queue, &priv->port_work);
277
256 gmx_rx_int_reg.u64 = 0; 278 gmx_rx_int_reg.u64 = 0;
257 gmx_rx_int_reg.s.phy_dupx = 1; 279 gmx_rx_int_reg.s.phy_dupx = 1;
258 gmx_rx_int_reg.s.phy_link = 1; 280 gmx_rx_int_reg.s.phy_link = 1;
@@ -301,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
301 return 0; 323 return 0;
302} 324}
303 325
326static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
327{
328 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
329 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
330}
331
304int cvm_oct_rgmii_init(struct net_device *dev) 332int cvm_oct_rgmii_init(struct net_device *dev)
305{ 333{
306 struct octeon_ethernet *priv = netdev_priv(dev); 334 struct octeon_ethernet *priv = netdev_priv(dev);
@@ -308,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
308 336
309 cvm_oct_common_init(dev); 337 cvm_oct_common_init(dev);
310 dev->netdev_ops->ndo_stop(dev); 338 dev->netdev_ops->ndo_stop(dev);
311 339 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
312 /* 340 /*
313 * Due to GMX errata in CN3XXX series chips, it is necessary 341 * Due to GMX errata in CN3XXX series chips, it is necessary
314 * to take the link down immediately when the PHY changes 342 * to take the link down immediately when the PHY changes
@@ -396,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev)
396 number_rgmii_ports--; 424 number_rgmii_ports--;
397 if (number_rgmii_ports == 0) 425 if (number_rgmii_ports == 0)
398 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); 426 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
427 cancel_work_sync(&priv->port_work);
399} 428}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 5afece0216ca..1771c1035a3c 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -123,9 +123,16 @@ MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
123static unsigned int cvm_oct_mac_addr_offset; 123static unsigned int cvm_oct_mac_addr_offset;
124 124
125/** 125/**
126 * Periodic timer to check auto negotiation 126 * cvm_oct_poll_queue - Workqueue for polling operations.
127 */ 127 */
128static struct timer_list cvm_oct_poll_timer; 128struct workqueue_struct *cvm_oct_poll_queue;
129
130/**
131 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
132 *
133 * Set to one right before cvm_oct_poll_queue is destroyed.
134 */
135atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
129 136
130/** 137/**
131 * Array of every ethernet device owned by this driver indexed by 138 * Array of every ethernet device owned by this driver indexed by
@@ -133,47 +140,39 @@ static struct timer_list cvm_oct_poll_timer;
133 */ 140 */
134struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 141struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
135 142
136/** 143static void cvm_oct_rx_refill_worker(struct work_struct *work);
137 * Periodic timer tick for slow management operations 144static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
138 * 145
139 * @arg: Device to check 146static void cvm_oct_rx_refill_worker(struct work_struct *work)
140 */
141static void cvm_do_timer(unsigned long arg)
142{ 147{
143 static int port; 148 /*
144 if (port < CVMX_PIP_NUM_INPUT_PORTS) { 149 * FPA 0 may have been drained, try to refill it if we need
145 if (cvm_oct_device[port]) { 150 * more than num_packet_buffers / 2, otherwise normal receive
146 struct octeon_ethernet *priv = netdev_priv(cvm_oct_device[port]); 151 * processing will refill it. If it were drained, no packets
147 if (priv->poll) 152 * could be received so cvm_oct_napi_poll would never be
148 priv->poll(cvm_oct_device[port]); 153 * invoked to do the refill.
149 cvm_oct_free_tx_skbs(priv); 154 */
150 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); 155 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
151 } 156
152 port++; 157 if (!atomic_read(&cvm_oct_poll_queue_stopping))
153 /* 158 queue_delayed_work(cvm_oct_poll_queue,
154 * Poll the next port in a 50th of a second. This 159 &cvm_oct_rx_refill_work, HZ);
155 * spreads the polling of ports out a little bit.
156 */
157 mod_timer(&cvm_oct_poll_timer, jiffies + HZ/50);
158 } else {
159 port = 0;
160 /*
161 * FPA 0 may have been drained, try to refill it if we
162 * need more than num_packet_buffers / 2, otherwise
163 * normal receive processing will refill it. If it
164 * were drained, no packets could be received so
165 * cvm_oct_napi_poll would never be invoked to do the
166 * refill.
167 */
168 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
169 /*
170 * All ports have been polled. Start the next iteration through
171 * the ports in one second.
172 */
173 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
174 }
175} 160}
176 161
162static void cvm_oct_tx_clean_worker(struct work_struct *work)
163{
164 struct octeon_ethernet *priv = container_of(work,
165 struct octeon_ethernet,
166 tx_clean_work.work);
167
168 if (priv->poll)
169 priv->poll(cvm_oct_device[priv->port]);
170 cvm_oct_free_tx_skbs(priv);
171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
172 if (!atomic_read(&cvm_oct_poll_queue_stopping))
173 queue_delayed_work(cvm_oct_poll_queue, &priv->tx_clean_work, HZ);
174 }
175
177/** 176/**
178 * Configure common hardware for all interfaces 177 * Configure common hardware for all interfaces
179 */ 178 */
@@ -624,6 +623,12 @@ static int __init cvm_oct_init_module(void)
624 else 623 else
625 cvm_oct_mac_addr_offset = 0; 624 cvm_oct_mac_addr_offset = 0;
626 625
626 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
627 if (cvm_oct_poll_queue == NULL) {
628 pr_err("octeon-ethernet: Cannot create workqueue");
629 return -ENOMEM;
630 }
631
627 cvm_oct_proc_initialize(); 632 cvm_oct_proc_initialize();
628 cvm_oct_configure_common_hw(); 633 cvm_oct_configure_common_hw();
629 634
@@ -719,7 +724,9 @@ static int __init cvm_oct_init_module(void)
719 724
720 /* Initialize the device private structure. */ 725 /* Initialize the device private structure. */
721 priv = netdev_priv(dev); 726 priv = netdev_priv(dev);
722 memset(priv, 0, sizeof(struct octeon_ethernet)); 727
728 INIT_DELAYED_WORK(&priv->tx_clean_work,
729 cvm_oct_tx_clean_worker);
723 730
724 priv->imode = imode; 731 priv->imode = imode;
725 priv->port = port; 732 priv->port = port;
@@ -785,17 +792,15 @@ static int __init cvm_oct_init_module(void)
785 fau -= 792 fau -=
786 cvmx_pko_get_num_queues(priv->port) * 793 cvmx_pko_get_num_queues(priv->port) *
787 sizeof(uint32_t); 794 sizeof(uint32_t);
795 queue_delayed_work(cvm_oct_poll_queue,
796 &priv->tx_clean_work, HZ);
788 } 797 }
789 } 798 }
790 } 799 }
791 800
792 cvm_oct_rx_initialize(); 801 cvm_oct_rx_initialize();
793 802
794 /* Enable the poll timer for checking RGMII status */ 803 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
795 init_timer(&cvm_oct_poll_timer);
796 cvm_oct_poll_timer.data = 0;
797 cvm_oct_poll_timer.function = cvm_do_timer;
798 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
799 804
800 return 0; 805 return 0;
801} 806}
@@ -817,20 +822,28 @@ static void __exit cvm_oct_cleanup_module(void)
817 /* Free the interrupt handler */ 822 /* Free the interrupt handler */
818 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); 823 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
819 824
820 del_timer(&cvm_oct_poll_timer); 825 atomic_inc_return(&cvm_oct_poll_queue_stopping);
826 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
827
821 cvm_oct_rx_shutdown(); 828 cvm_oct_rx_shutdown();
822 cvmx_pko_disable(); 829 cvmx_pko_disable();
823 830
824 /* Free the ethernet devices */ 831 /* Free the ethernet devices */
825 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 832 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
826 if (cvm_oct_device[port]) { 833 if (cvm_oct_device[port]) {
827 cvm_oct_tx_shutdown(cvm_oct_device[port]); 834 struct net_device *dev = cvm_oct_device[port];
828 unregister_netdev(cvm_oct_device[port]); 835 struct octeon_ethernet *priv = netdev_priv(dev);
829 kfree(cvm_oct_device[port]); 836 cancel_delayed_work_sync(&priv->tx_clean_work);
837
838 cvm_oct_tx_shutdown(dev);
839 unregister_netdev(dev);
840 kfree(dev);
830 cvm_oct_device[port] = NULL; 841 cvm_oct_device[port] = NULL;
831 } 842 }
832 } 843 }
833 844
845 destroy_workqueue(cvm_oct_poll_queue);
846
834 cvmx_pko_shutdown(); 847 cvmx_pko_shutdown();
835 cvm_oct_proc_shutdown(); 848 cvm_oct_proc_shutdown();
836 849
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 40b695615431..8d0921061dac 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -61,6 +61,8 @@ struct octeon_ethernet {
61 void (*poll) (struct net_device *dev); 61 void (*poll) (struct net_device *dev);
62 struct hrtimer tx_restart_timer; 62 struct hrtimer tx_restart_timer;
63 ktime_t tx_restart_interval; 63 ktime_t tx_restart_interval;
64 struct delayed_work tx_clean_work;
65 struct work_struct port_work; /* may be unused. */
64}; 66};
65 67
66/** 68/**
@@ -97,6 +99,8 @@ extern int pow_send_group;
97extern int pow_receive_group; 99extern int pow_receive_group;
98extern char pow_send_list[]; 100extern char pow_send_list[];
99extern struct net_device *cvm_oct_device[]; 101extern struct net_device *cvm_oct_device[];
102extern struct workqueue_struct *cvm_oct_poll_queue;
103extern atomic_t cvm_oct_poll_queue_stopping;
100 104
101extern int max_rx_cpus; 105extern int max_rx_cpus;
102extern int rx_napi_weight; 106extern int rx_napi_weight;