aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-02-28 22:23:06 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-28 22:23:06 -0500
commit47871889c601d8199c51a4086f77eebd77c29b0b (patch)
tree40cdcac3bff0ee40cc33dcca61d0577cdf965f77 /drivers/staging
parentc16cc0b464b8876cfd57ce1c1dbcb6f9a6a0bce3 (diff)
parent30ff056c42c665b9ea535d8515890857ae382540 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: drivers/firmware/iscsi_ibft.c
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/go7007/s2250-board.c2
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-defines.h34
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-mem.c124
-rw-r--r--drivers/staging/octeon/ethernet-proc.c144
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c56
-rw-r--r--drivers/staging/octeon/ethernet-rx.c384
-rw-r--r--drivers/staging/octeon/ethernet-rx.h25
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c441
-rw-r--r--drivers/staging/octeon/ethernet-tx.h29
-rw-r--r--drivers/staging/octeon/ethernet-util.h13
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c1
-rw-r--r--drivers/staging/octeon/ethernet.c254
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h58
-rw-r--r--drivers/staging/sm7xx/smtc2d.c2
-rw-r--r--drivers/staging/sm7xx/smtc2d.h2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
23 files changed, 725 insertions, 887 deletions
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c
index 8cf7f2750b3f..c324f6ea002b 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/go7007/s2250-board.c
@@ -159,7 +159,7 @@ static int write_reg(struct i2c_client *client, u8 reg, u8 value)
159 struct go7007 *go = i2c_get_adapdata(client->adapter); 159 struct go7007 *go = i2c_get_adapdata(client->adapter);
160 struct go7007_usb *usb; 160 struct go7007_usb *usb;
161 int rc; 161 int rc;
162 int dev_addr = client->addr; 162 int dev_addr = client->addr << 1; /* firmware wants 8-bit address */
163 u8 *buf; 163 u8 *buf;
164 164
165 if (go == NULL) 165 if (go == NULL)
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index c0a583cc2227..87447c102fa0 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -14,7 +14,6 @@ obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
14octeon-ethernet-objs := ethernet.o 14octeon-ethernet-objs := ethernet.o
15octeon-ethernet-objs += ethernet-mdio.o 15octeon-ethernet-objs += ethernet-mdio.o
16octeon-ethernet-objs += ethernet-mem.o 16octeon-ethernet-objs += ethernet-mem.o
17octeon-ethernet-objs += ethernet-proc.o
18octeon-ethernet-objs += ethernet-rgmii.o 17octeon-ethernet-objs += ethernet-rgmii.o
19octeon-ethernet-objs += ethernet-rx.o 18octeon-ethernet-objs += ethernet-rx.o
20octeon-ethernet-objs += ethernet-sgmii.o 19octeon-ethernet-objs += ethernet-sgmii.o
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index f13131b03c33..6a2cd50a17df 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -41,17 +41,10 @@
41 * Tells the driver to populate the packet buffers with kernel skbuffs. 41 * Tells the driver to populate the packet buffers with kernel skbuffs.
42 * This allows the driver to receive packets without copying them. It also 42 * This allows the driver to receive packets without copying them. It also
43 * means that 32bit userspace can't access the packet buffers. 43 * means that 32bit userspace can't access the packet buffers.
44 * USE_32BIT_SHARED
45 * This define tells the driver to allocate memory for buffers from the
46 * 32bit sahred region instead of the kernel memory space.
47 * USE_HW_TCPUDP_CHECKSUM 44 * USE_HW_TCPUDP_CHECKSUM
48 * Controls if the Octeon TCP/UDP checksum engine is used for packet 45 * Controls if the Octeon TCP/UDP checksum engine is used for packet
49 * output. If this is zero, the kernel will perform the checksum in 46 * output. If this is zero, the kernel will perform the checksum in
50 * software. 47 * software.
51 * USE_MULTICORE_RECEIVE
52 * Process receive interrupts on multiple cores. This spreads the network
53 * load across the first 8 processors. If ths is zero, only one core
54 * processes incomming packets.
55 * USE_ASYNC_IOBDMA 48 * USE_ASYNC_IOBDMA
56 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous 49 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
57 * IOBDMAs to issue IO accesses without stalling. Set this to zero 50 * IOBDMAs to issue IO accesses without stalling. Set this to zero
@@ -75,29 +68,15 @@
75#define CONFIG_CAVIUM_RESERVE32 0 68#define CONFIG_CAVIUM_RESERVE32 0
76#endif 69#endif
77 70
78#if CONFIG_CAVIUM_RESERVE32
79#define USE_32BIT_SHARED 1
80#define USE_SKBUFFS_IN_HW 0
81#define REUSE_SKBUFFS_WITHOUT_FREE 0
82#else
83#define USE_32BIT_SHARED 0
84#define USE_SKBUFFS_IN_HW 1 71#define USE_SKBUFFS_IN_HW 1
85#ifdef CONFIG_NETFILTER 72#ifdef CONFIG_NETFILTER
86#define REUSE_SKBUFFS_WITHOUT_FREE 0 73#define REUSE_SKBUFFS_WITHOUT_FREE 0
87#else 74#else
88#define REUSE_SKBUFFS_WITHOUT_FREE 1 75#define REUSE_SKBUFFS_WITHOUT_FREE 1
89#endif 76#endif
90#endif
91
92/* Max interrupts per second per core */
93#define INTERRUPT_LIMIT 10000
94 77
95/* Don't limit the number of interrupts */
96/*#define INTERRUPT_LIMIT 0 */
97#define USE_HW_TCPUDP_CHECKSUM 1 78#define USE_HW_TCPUDP_CHECKSUM 1
98 79
99#define USE_MULTICORE_RECEIVE 1
100
101/* Enable Random Early Dropping under load */ 80/* Enable Random Early Dropping under load */
102#define USE_RED 1 81#define USE_RED 1
103#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) 82#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
@@ -115,21 +94,12 @@
115/* Use this to not have FPA frees control L2 */ 94/* Use this to not have FPA frees control L2 */
116/*#define DONT_WRITEBACK(x) 0 */ 95/*#define DONT_WRITEBACK(x) 0 */
117 96
118/* Maximum number of packets to process per interrupt. */
119#define MAX_RX_PACKETS 120
120/* Maximum number of SKBs to try to free per xmit packet. */ 97/* Maximum number of SKBs to try to free per xmit packet. */
121#define MAX_SKB_TO_FREE 10
122#define MAX_OUT_QUEUE_DEPTH 1000 98#define MAX_OUT_QUEUE_DEPTH 1000
123 99
124#ifndef CONFIG_SMP 100#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
125#undef USE_MULTICORE_RECEIVE 101#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
126#define USE_MULTICORE_RECEIVE 0
127#endif
128
129#define IP_PROTOCOL_TCP 6
130#define IP_PROTOCOL_UDP 0x11
131 102
132#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
133#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) 103#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
134 104
135 105
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 05a5cc0f43ed..7e0be8d00dc3 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -96,11 +96,11 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
96}; 96};
97 97
98/** 98/**
99 * IOCTL support for PHY control 99 * cvm_oct_ioctl - IOCTL support for PHY control
100 *
101 * @dev: Device to change 100 * @dev: Device to change
102 * @rq: the request 101 * @rq: the request
103 * @cmd: the command 102 * @cmd: the command
103 *
104 * Returns Zero on success 104 * Returns Zero on success
105 */ 105 */
106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -153,7 +153,7 @@ static void cvm_oct_adjust_link(struct net_device *dev)
153 153
154 154
155/** 155/**
156 * Setup the PHY 156 * cvm_oct_phy_setup_device - setup the PHY
157 * 157 *
158 * @dev: Device to setup 158 * @dev: Device to setup
159 * 159 *
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 55d0614a7cd9..a417d4fce12c 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -32,7 +32,6 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h> 35#include <linux/seq_file.h>
37#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
38#include <net/dst.h> 37#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index b595903e2af1..00cc91df6b46 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31 29
32#include <asm/octeon/octeon.h> 30#include <asm/octeon/octeon.h>
33 31
@@ -36,18 +34,19 @@
36#include "cvmx-fpa.h" 34#include "cvmx-fpa.h"
37 35
38/** 36/**
39 * Fill the supplied hardware pool with skbuffs 37 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
40 *
41 * @pool: Pool to allocate an skbuff for 38 * @pool: Pool to allocate an skbuff for
42 * @size: Size of the buffer needed for the pool 39 * @size: Size of the buffer needed for the pool
43 * @elements: Number of buffers to allocate 40 * @elements: Number of buffers to allocate
41 *
42 * Returns the actual number of buffers allocated.
44 */ 43 */
45static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) 44static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
46{ 45{
47 int freed = elements; 46 int freed = elements;
48 while (freed) { 47 while (freed) {
49 48
50 struct sk_buff *skb = dev_alloc_skb(size + 128); 49 struct sk_buff *skb = dev_alloc_skb(size + 256);
51 if (unlikely(skb == NULL)) { 50 if (unlikely(skb == NULL)) {
52 pr_warning 51 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n", 52 ("Failed to allocate skb for hardware pool %d\n",
@@ -55,7 +54,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
55 break; 54 break;
56 } 55 }
57 56
58 skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f)); 57 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 58 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); 59 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
61 freed--; 60 freed--;
@@ -64,8 +63,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
64} 63}
65 64
66/** 65/**
67 * Free the supplied hardware pool of skbuffs 66 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
68 *
69 * @pool: Pool to allocate an skbuff for 67 * @pool: Pool to allocate an skbuff for
70 * @size: Size of the buffer needed for the pool 68 * @size: Size of the buffer needed for the pool
71 * @elements: Number of buffers to allocate 69 * @elements: Number of buffers to allocate
@@ -93,96 +91,76 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
93} 91}
94 92
95/** 93/**
96 * This function fills a hardware pool with memory. Depending 94 * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
97 * on the config defines, this memory might come from the
98 * kernel or global 32bit memory allocated with
99 * cvmx_bootmem_alloc.
100 *
101 * @pool: Pool to populate 95 * @pool: Pool to populate
102 * @size: Size of each buffer in the pool 96 * @size: Size of each buffer in the pool
103 * @elements: Number of buffers to allocate 97 * @elements: Number of buffers to allocate
98 *
99 * Returns the actual number of buffers allocated.
104 */ 100 */
105static int cvm_oct_fill_hw_memory(int pool, int size, int elements) 101static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
106{ 102{
107 char *memory; 103 char *memory;
104 char *fpa;
108 int freed = elements; 105 int freed = elements;
109 106
110 if (USE_32BIT_SHARED) { 107 while (freed) {
111 extern uint64_t octeon_reserve32_memory; 108 /*
112 109 * FPA memory must be 128 byte aligned. Since we are
113 memory = 110 * aligning we need to save the original pointer so we
114 cvmx_bootmem_alloc_range(elements * size, 128, 111 * can feed it to kfree when the memory is returned to
115 octeon_reserve32_memory, 112 * the kernel.
116 octeon_reserve32_memory + 113 *
117 (CONFIG_CAVIUM_RESERVE32 << 20) - 114 * We allocate an extra 256 bytes to allow for
118 1); 115 * alignment and space for the original pointer saved
119 if (memory == NULL) 116 * just before the block.
120 panic("Unable to allocate %u bytes for FPA pool %d\n", 117 */
121 elements * size, pool); 118 memory = kmalloc(size + 256, GFP_ATOMIC);
122 119 if (unlikely(memory == NULL)) {
123 pr_notice("Memory range %p - %p reserved for " 120 pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
124 "hardware\n", memory, 121 elements * size, pool);
125 memory + elements * size - 1); 122 break;
126
127 while (freed) {
128 cvmx_fpa_free(memory, pool, 0);
129 memory += size;
130 freed--;
131 }
132 } else {
133 while (freed) {
134 /* We need to force alignment to 128 bytes here */
135 memory = kmalloc(size + 127, GFP_ATOMIC);
136 if (unlikely(memory == NULL)) {
137 pr_warning("Unable to allocate %u bytes for "
138 "FPA pool %d\n",
139 elements * size, pool);
140 break;
141 }
142 memory = (char *)(((unsigned long)memory + 127) & -128);
143 cvmx_fpa_free(memory, pool, 0);
144 freed--;
145 } 123 }
124 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
125 *((char **)fpa - 1) = memory;
126 cvmx_fpa_free(fpa, pool, 0);
127 freed--;
146 } 128 }
147 return elements - freed; 129 return elements - freed;
148} 130}
149 131
150/** 132/**
151 * Free memory previously allocated with cvm_oct_fill_hw_memory 133 * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
152 *
153 * @pool: FPA pool to free 134 * @pool: FPA pool to free
154 * @size: Size of each buffer in the pool 135 * @size: Size of each buffer in the pool
155 * @elements: Number of buffers that should be in the pool 136 * @elements: Number of buffers that should be in the pool
156 */ 137 */
157static void cvm_oct_free_hw_memory(int pool, int size, int elements) 138static void cvm_oct_free_hw_memory(int pool, int size, int elements)
158{ 139{
159 if (USE_32BIT_SHARED) { 140 char *memory;
160 pr_warning("Warning: 32 shared memory is not freeable\n"); 141 char *fpa;
161 } else { 142 do {
162 char *memory; 143 fpa = cvmx_fpa_alloc(pool);
163 do { 144 if (fpa) {
164 memory = cvmx_fpa_alloc(pool); 145 elements--;
165 if (memory) { 146 fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
166 elements--; 147 memory = *((char **)fpa - 1);
167 kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); 148 kfree(memory);
168 } 149 }
169 } while (memory); 150 } while (fpa);
170 151
171 if (elements < 0) 152 if (elements < 0)
172 pr_warning("Freeing of pool %u had too many " 153 pr_warning("Freeing of pool %u had too many buffers (%d)\n",
173 "buffers (%d)\n", 154 pool, elements);
174 pool, elements); 155 else if (elements > 0)
175 else if (elements > 0) 156 pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
176 pr_warning("Warning: Freeing of pool %u is " 157 pool, elements);
177 "missing %d buffers\n",
178 pool, elements);
179 }
180} 158}
181 159
182int cvm_oct_mem_fill_fpa(int pool, int size, int elements) 160int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
183{ 161{
184 int freed; 162 int freed;
185 if (USE_SKBUFFS_IN_HW) 163 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
186 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); 164 freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
187 else 165 else
188 freed = cvm_oct_fill_hw_memory(pool, size, elements); 166 freed = cvm_oct_fill_hw_memory(pool, size, elements);
@@ -191,7 +169,7 @@ int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
191 169
192void cvm_oct_mem_empty_fpa(int pool, int size, int elements) 170void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
193{ 171{
194 if (USE_SKBUFFS_IN_HW) 172 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
195 cvm_oct_free_hw_skbuff(pool, size, elements); 173 cvm_oct_free_hw_skbuff(pool, size, elements);
196 else 174 else
197 cvm_oct_free_hw_memory(pool, size, elements); 175 cvm_oct_free_hw_memory(pool, size, elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
deleted file mode 100644
index 16308d484d3b..000000000000
--- a/drivers/staging/octeon/ethernet-proc.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/seq_file.h>
29#include <linux/proc_fs.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "octeon-ethernet.h"
35#include "ethernet-defines.h"
36
37#include "cvmx-helper.h"
38#include "cvmx-pip.h"
39
40/**
41 * User is reading /proc/octeon_ethernet_stats
42 *
43 * @m:
44 * @v:
45 * Returns
46 */
47static int cvm_oct_stats_show(struct seq_file *m, void *v)
48{
49 struct octeon_ethernet *priv;
50 int port;
51
52 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
53
54 if (cvm_oct_device[port]) {
55 priv = netdev_priv(cvm_oct_device[port]);
56
57 seq_printf(m, "\nOcteon Port %d (%s)\n", port,
58 cvm_oct_device[port]->name);
59 seq_printf(m,
60 "rx_packets: %12lu\t"
61 "tx_packets: %12lu\n",
62 priv->stats.rx_packets,
63 priv->stats.tx_packets);
64 seq_printf(m,
65 "rx_bytes: %12lu\t"
66 "tx_bytes: %12lu\n",
67 priv->stats.rx_bytes, priv->stats.tx_bytes);
68 seq_printf(m,
69 "rx_errors: %12lu\t"
70 "tx_errors: %12lu\n",
71 priv->stats.rx_errors,
72 priv->stats.tx_errors);
73 seq_printf(m,
74 "rx_dropped: %12lu\t"
75 "tx_dropped: %12lu\n",
76 priv->stats.rx_dropped,
77 priv->stats.tx_dropped);
78 seq_printf(m,
79 "rx_length_errors: %12lu\t"
80 "tx_aborted_errors: %12lu\n",
81 priv->stats.rx_length_errors,
82 priv->stats.tx_aborted_errors);
83 seq_printf(m,
84 "rx_over_errors: %12lu\t"
85 "tx_carrier_errors: %12lu\n",
86 priv->stats.rx_over_errors,
87 priv->stats.tx_carrier_errors);
88 seq_printf(m,
89 "rx_crc_errors: %12lu\t"
90 "tx_fifo_errors: %12lu\n",
91 priv->stats.rx_crc_errors,
92 priv->stats.tx_fifo_errors);
93 seq_printf(m,
94 "rx_frame_errors: %12lu\t"
95 "tx_heartbeat_errors: %12lu\n",
96 priv->stats.rx_frame_errors,
97 priv->stats.tx_heartbeat_errors);
98 seq_printf(m,
99 "rx_fifo_errors: %12lu\t"
100 "tx_window_errors: %12lu\n",
101 priv->stats.rx_fifo_errors,
102 priv->stats.tx_window_errors);
103 seq_printf(m,
104 "rx_missed_errors: %12lu\t"
105 "multicast: %12lu\n",
106 priv->stats.rx_missed_errors,
107 priv->stats.multicast);
108 }
109 }
110
111 return 0;
112}
113
114/**
115 * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
116 *
117 * @inode:
118 * @file:
119 * Returns
120 */
121static int cvm_oct_stats_open(struct inode *inode, struct file *file)
122{
123 return single_open(file, cvm_oct_stats_show, NULL);
124}
125
126static const struct file_operations cvm_oct_stats_operations = {
127 .open = cvm_oct_stats_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = single_release,
131};
132
133void cvm_oct_proc_initialize(void)
134{
135 struct proc_dir_entry *entry =
136 create_proc_entry("octeon_ethernet_stats", 0, NULL);
137 if (entry)
138 entry->proc_fops = &cvm_oct_stats_operations;
139}
140
141void cvm_oct_proc_shutdown(void)
142{
143 remove_proc_entry("octeon_ethernet_stats", NULL);
144}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
deleted file mode 100644
index 82c7d9f78bc4..000000000000
--- a/drivers/staging/octeon/ethernet-proc.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28void cvm_oct_proc_initialize(void);
29void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 3820f1ec11d1..a0d4d4b98bdc 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,7 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h> 29#include <linux/phy.h>
30#include <net/dst.h> 30#include <net/dst.h>
31 31
32#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
@@ -48,14 +48,20 @@ static int number_rgmii_ports;
48static void cvm_oct_rgmii_poll(struct net_device *dev) 48static void cvm_oct_rgmii_poll(struct net_device *dev)
49{ 49{
50 struct octeon_ethernet *priv = netdev_priv(dev); 50 struct octeon_ethernet *priv = netdev_priv(dev);
51 unsigned long flags; 51 unsigned long flags = 0;
52 cvmx_helper_link_info_t link_info; 52 cvmx_helper_link_info_t link_info;
53 int use_global_register_lock = (priv->phydev == NULL);
53 54
54 /* 55 BUG_ON(in_interrupt());
55 * Take the global register lock since we are going to touch 56 if (use_global_register_lock) {
56 * registers that affect more than one port. 57 /*
57 */ 58 * Take the global register lock since we are going to
58 spin_lock_irqsave(&global_register_lock, flags); 59 * touch registers that affect more than one port.
60 */
61 spin_lock_irqsave(&global_register_lock, flags);
62 } else {
63 mutex_lock(&priv->phydev->bus->mdio_lock);
64 }
59 65
60 link_info = cvmx_helper_link_get(priv->port); 66 link_info = cvmx_helper_link_get(priv->port);
61 if (link_info.u64 == priv->link_info) { 67 if (link_info.u64 == priv->link_info) {
@@ -115,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
115 dev->name); 121 dev->name);
116 } 122 }
117 } 123 }
118 spin_unlock_irqrestore(&global_register_lock, flags); 124
125 if (use_global_register_lock)
126 spin_unlock_irqrestore(&global_register_lock, flags);
127 else
128 mutex_unlock(&priv->phydev->bus->mdio_lock);
119 return; 129 return;
120 } 130 }
121 131
@@ -151,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
151 link_info = cvmx_helper_link_autoconf(priv->port); 161 link_info = cvmx_helper_link_autoconf(priv->port);
152 priv->link_info = link_info.u64; 162 priv->link_info = link_info.u64;
153 } 163 }
154 spin_unlock_irqrestore(&global_register_lock, flags); 164
165 if (use_global_register_lock)
166 spin_unlock_irqrestore(&global_register_lock, flags);
167 else {
168 mutex_unlock(&priv->phydev->bus->mdio_lock);
169 }
155 170
156 if (priv->phydev == NULL) { 171 if (priv->phydev == NULL) {
157 /* Tell core. */ 172 /* Tell core. */
@@ -213,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
213 struct net_device *dev = 228 struct net_device *dev =
214 cvm_oct_device[cvmx_helper_get_ipd_port 229 cvm_oct_device[cvmx_helper_get_ipd_port
215 (interface, index)]; 230 (interface, index)];
216 if (dev) 231 struct octeon_ethernet *priv = netdev_priv(dev);
217 cvm_oct_rgmii_poll(dev); 232
233 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
234 queue_work(cvm_oct_poll_queue, &priv->port_work);
235
218 gmx_rx_int_reg.u64 = 0; 236 gmx_rx_int_reg.u64 = 0;
219 gmx_rx_int_reg.s.phy_dupx = 1; 237 gmx_rx_int_reg.s.phy_dupx = 1;
220 gmx_rx_int_reg.s.phy_link = 1; 238 gmx_rx_int_reg.s.phy_link = 1;
@@ -252,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
252 struct net_device *dev = 270 struct net_device *dev =
253 cvm_oct_device[cvmx_helper_get_ipd_port 271 cvm_oct_device[cvmx_helper_get_ipd_port
254 (interface, index)]; 272 (interface, index)];
255 if (dev) 273 struct octeon_ethernet *priv = netdev_priv(dev);
256 cvm_oct_rgmii_poll(dev); 274
275 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
276 queue_work(cvm_oct_poll_queue, &priv->port_work);
277
257 gmx_rx_int_reg.u64 = 0; 278 gmx_rx_int_reg.u64 = 0;
258 gmx_rx_int_reg.s.phy_dupx = 1; 279 gmx_rx_int_reg.s.phy_dupx = 1;
259 gmx_rx_int_reg.s.phy_link = 1; 280 gmx_rx_int_reg.s.phy_link = 1;
@@ -302,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
302 return 0; 323 return 0;
303} 324}
304 325
326static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
327{
328 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
329 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
330}
331
305int cvm_oct_rgmii_init(struct net_device *dev) 332int cvm_oct_rgmii_init(struct net_device *dev)
306{ 333{
307 struct octeon_ethernet *priv = netdev_priv(dev); 334 struct octeon_ethernet *priv = netdev_priv(dev);
@@ -309,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
309 336
310 cvm_oct_common_init(dev); 337 cvm_oct_common_init(dev);
311 dev->netdev_ops->ndo_stop(dev); 338 dev->netdev_ops->ndo_stop(dev);
312 339 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
313 /* 340 /*
314 * Due to GMX errata in CN3XXX series chips, it is necessary 341 * Due to GMX errata in CN3XXX series chips, it is necessary
315 * to take the link down immediately when the PHY changes 342 * to take the link down immediately when the PHY changes
@@ -397,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev)
397 number_rgmii_ports--; 424 number_rgmii_ports--;
398 if (number_rgmii_ports == 0) 425 if (number_rgmii_ports == 0)
399 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); 426 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
427 cancel_work_sync(&priv->port_work);
400} 428}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1b237b7e689d..cb38f9eb2cc0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -27,16 +27,14 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/cache.h> 29#include <linux/cache.h>
30#include <linux/cpumask.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
33#include <linux/ip.h> 34#include <linux/ip.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/prefetch.h> 36#include <linux/prefetch.h>
36#include <linux/ethtool.h> 37#include <linux/smp.h>
37#include <linux/mii.h>
38#include <linux/seq_file.h>
39#include <linux/proc_fs.h>
40#include <net/dst.h> 38#include <net/dst.h>
41#ifdef CONFIG_XFRM 39#ifdef CONFIG_XFRM
42#include <linux/xfrm.h> 40#include <linux/xfrm.h>
@@ -48,8 +46,9 @@
48#include <asm/octeon/octeon.h> 46#include <asm/octeon/octeon.h>
49 47
50#include "ethernet-defines.h" 48#include "ethernet-defines.h"
51#include "octeon-ethernet.h"
52#include "ethernet-mem.h" 49#include "ethernet-mem.h"
50#include "ethernet-rx.h"
51#include "octeon-ethernet.h"
53#include "ethernet-util.h" 52#include "ethernet-util.h"
54 53
55#include "cvmx-helper.h" 54#include "cvmx-helper.h"
@@ -61,62 +60,88 @@
61 60
62#include "cvmx-gmxx-defs.h" 61#include "cvmx-gmxx-defs.h"
63 62
64struct cvm_tasklet_wrapper { 63struct cvm_napi_wrapper {
65 struct tasklet_struct t; 64 struct napi_struct napi;
66}; 65} ____cacheline_aligned_in_smp;
67 66
68/* 67static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
69 * Aligning the tasklet_struct on cachline boundries seems to decrease
70 * throughput even though in theory it would reduce contantion on the
71 * cache lines containing the locks.
72 */
73 68
74static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS]; 69struct cvm_oct_core_state {
70 int baseline_cores;
71 /*
72 * The number of additional cores that could be processing
73 * input packtes.
74 */
75 atomic_t available_cores;
76 cpumask_t cpu_state;
77} ____cacheline_aligned_in_smp;
75 78
76/** 79static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
77 * Interrupt handler. The interrupt occurs whenever the POW 80
78 * transitions from 0->1 packets in our group. 81static void cvm_oct_enable_napi(void *_)
79 *
80 * @cpl:
81 * @dev_id:
82 * @regs:
83 * Returns
84 */
85irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
86{ 82{
87 /* Acknowledge the interrupt */ 83 int cpu = smp_processor_id();
88 if (INTERRUPT_LIMIT) 84 napi_schedule(&cvm_oct_napi[cpu].napi);
89 cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group); 85}
90 else 86
91 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group); 87static void cvm_oct_enable_one_cpu(void)
92 preempt_disable(); 88{
93 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 89 int v;
94 preempt_enable(); 90 int cpu;
95 return IRQ_HANDLED; 91
92 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores);
94 if (v < 0)
95 return;
96
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101 NULL, 0);
102 if (v)
103 panic("Can't enable NAPI.");
104 break;
105 }
106 }
107}
108
109static void cvm_oct_no_more_work(void)
110{
111 int cpu = smp_processor_id();
112
113 /*
114 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets.
116 */
117 if (cpu == 0) {
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119 return;
120 }
121
122 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores);
96} 124}
97 125
98#ifdef CONFIG_NET_POLL_CONTROLLER
99/** 126/**
100 * This is called when the kernel needs to manually poll the 127 * cvm_oct_do_interrupt - interrupt handler.
101 * device. For Octeon, this is simply calling the interrupt 128 *
102 * handler. We actually poll all the devices, not just the 129 * The interrupt occurs whenever the POW has packets in our group.
103 * one supplied.
104 * 130 *
105 * @dev: Device to poll. Unused
106 */ 131 */
107void cvm_oct_poll_controller(struct net_device *dev) 132static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
108{ 133{
109 preempt_disable(); 134 /* Disable the IRQ and start napi_poll. */
110 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 135 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
111 preempt_enable(); 136 cvm_oct_enable_napi(NULL);
137
138 return IRQ_HANDLED;
112} 139}
113#endif
114 140
115/** 141/**
116 * This is called on receive errors, and determines if the packet 142 * cvm_oct_check_rcv_error - process receive errors
117 * can be dropped early-on in cvm_oct_tasklet_rx().
118 *
119 * @work: Work queue entry pointing to the packet. 143 * @work: Work queue entry pointing to the packet.
144 *
120 * Returns Non-zero if the packet can be dropped, zero otherwise. 145 * Returns Non-zero if the packet can be dropped, zero otherwise.
121 */ 146 */
122static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) 147static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
@@ -199,19 +224,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
199} 224}
200 225
201/** 226/**
202 * Tasklet function that is scheduled on a core when an interrupt occurs. 227 * cvm_oct_napi_poll - the NAPI poll function.
228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive.
203 * 230 *
204 * @unused: 231 * Returns the number of packets processed.
205 */ 232 */
206void cvm_oct_tasklet_rx(unsigned long unused) 233static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
207{ 234{
208 const int coreid = cvmx_get_core_num(); 235 const int coreid = cvmx_get_core_num();
209 uint64_t old_group_mask; 236 uint64_t old_group_mask;
210 uint64_t old_scratch; 237 uint64_t old_scratch;
211 int rx_count = 0; 238 int rx_count = 0;
212 int number_to_free; 239 int did_work_request = 0;
213 int num_freed; 240 int packet_not_copied;
214 int packet_not_copied;
215 241
216 /* Prefetch cvm_oct_device since we know we need it soon */ 242 /* Prefetch cvm_oct_device since we know we need it soon */
217 prefetch(cvm_oct_device); 243 prefetch(cvm_oct_device);
@@ -227,59 +253,63 @@ void cvm_oct_tasklet_rx(unsigned long unused)
227 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
228 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
229 255
230 if (USE_ASYNC_IOBDMA) 256 if (USE_ASYNC_IOBDMA) {
231 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
258 did_work_request = 1;
259 }
232 260
233 while (1) { 261 while (rx_count < budget) {
234 struct sk_buff *skb = NULL; 262 struct sk_buff *skb = NULL;
263 struct sk_buff **pskb = NULL;
235 int skb_in_hw; 264 int skb_in_hw;
236 cvmx_wqe_t *work; 265 cvmx_wqe_t *work;
237 266
238 if (USE_ASYNC_IOBDMA) { 267 if (USE_ASYNC_IOBDMA && did_work_request)
239 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); 268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
240 } else { 269 else
241 if ((INTERRUPT_LIMIT == 0) 270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
242 || likely(rx_count < MAX_RX_PACKETS)) 271
243 work =
244 cvmx_pow_work_request_sync
245 (CVMX_POW_NO_WAIT);
246 else
247 work = NULL;
248 }
249 prefetch(work); 272 prefetch(work);
250 if (work == NULL) 273 did_work_request = 0;
274 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
251 break; 280 break;
281 }
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb);
252 284
253 /* 285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
254 * Limit each core to processing MAX_RX_PACKETS 286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
255 * packets without a break. This way the RX can't 287 did_work_request = 1;
256 * starve the TX task. 288 }
257 */ 289
258 if (USE_ASYNC_IOBDMA) { 290 if (rx_count == 0) {
259 291 /*
260 if ((INTERRUPT_LIMIT == 0) 292 * First time through, see if there is enough
261 || likely(rx_count < MAX_RX_PACKETS)) 293 * work waiting to merit waking another
262 cvmx_pow_work_request_async_nocheck 294 * CPU.
263 (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 295 */
264 else { 296 union cvmx_pow_wq_int_cntx counts;
265 cvmx_scratch_write64(CVMX_SCR_SCRATCH, 297 int backlog;
266 0x8000000000000000ull); 298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
267 cvmx_pow_tag_sw_null_nocheck(); 299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
268 } 300 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu();
269 } 303 }
270 304
271 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 305 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
272 if (likely(skb_in_hw)) { 306 if (likely(skb_in_hw)) {
273 skb = 307 skb = *pskb;
274 *(struct sk_buff
275 **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
276 sizeof(void *));
277 prefetch(&skb->head); 308 prefetch(&skb->head);
278 prefetch(&skb->len); 309 prefetch(&skb->len);
279 } 310 }
280 prefetch(cvm_oct_device[work->ipprt]); 311 prefetch(cvm_oct_device[work->ipprt]);
281 312
282 rx_count++;
283 /* Immediately throw away all packets with receive errors */ 313 /* Immediately throw away all packets with receive errors */
284 if (unlikely(work->word2.snoip.rcv_error)) { 314 if (unlikely(work->word2.snoip.rcv_error)) {
285 if (cvm_oct_check_rcv_error(work)) 315 if (cvm_oct_check_rcv_error(work))
@@ -292,39 +322,27 @@ void cvm_oct_tasklet_rx(unsigned long unused)
292 * buffer. 322 * buffer.
293 */ 323 */
294 if (likely(skb_in_hw)) { 324 if (likely(skb_in_hw)) {
295 /* 325 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
296 * This calculation was changed in case the
297 * skb header is using a different address
298 * aliasing type than the buffer. It doesn't
299 * make any differnece now, but the new one is
300 * more correct.
301 */
302 skb->data =
303 skb->head + work->packet_ptr.s.addr -
304 cvmx_ptr_to_phys(skb->head);
305 prefetch(skb->data); 326 prefetch(skb->data);
306 skb->len = work->len; 327 skb->len = work->len;
307 skb_set_tail_pointer(skb, skb->len); 328 skb_set_tail_pointer(skb, skb->len);
308 packet_not_copied = 1; 329 packet_not_copied = 1;
309 } else { 330 } else {
310
311 /* 331 /*
312 * We have to copy the packet. First allocate 332 * We have to copy the packet. First allocate
313 * an skbuff for it. 333 * an skbuff for it.
314 */ 334 */
315 skb = dev_alloc_skb(work->len); 335 skb = dev_alloc_skb(work->len);
316 if (!skb) { 336 if (!skb) {
317 DEBUGPRINT("Port %d failed to allocate " 337 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
318 "skbuff, packet dropped\n", 338 work->ipprt);
319 work->ipprt);
320 cvm_oct_free_work(work); 339 cvm_oct_free_work(work);
321 continue; 340 continue;
322 } 341 }
323 342
324 /* 343 /*
325 * Check if we've received a packet that was 344 * Check if we've received a packet that was
326 * entirely stored in the work entry. This is 345 * entirely stored in the work entry.
327 * untested.
328 */ 346 */
329 if (unlikely(work->word2.s.bufs == 0)) { 347 if (unlikely(work->word2.s.bufs == 0)) {
330 uint8_t *ptr = work->packet_data; 348 uint8_t *ptr = work->packet_data;
@@ -343,15 +361,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
343 /* No packet buffers to free */ 361 /* No packet buffers to free */
344 } else { 362 } else {
345 int segments = work->word2.s.bufs; 363 int segments = work->word2.s.bufs;
346 union cvmx_buf_ptr segment_ptr = 364 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
347 work->packet_ptr;
348 int len = work->len; 365 int len = work->len;
349 366
350 while (segments--) { 367 while (segments--) {
351 union cvmx_buf_ptr next_ptr = 368 union cvmx_buf_ptr next_ptr =
352 *(union cvmx_buf_ptr *) 369 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
353 cvmx_phys_to_ptr(segment_ptr.s. 370
354 addr - 8);
355 /* 371 /*
356 * Octeon Errata PKI-100: The segment size is 372 * Octeon Errata PKI-100: The segment size is
357 * wrong. Until it is fixed, calculate the 373 * wrong. Until it is fixed, calculate the
@@ -361,22 +377,18 @@ void cvm_oct_tasklet_rx(unsigned long unused)
361 * one: int segment_size = 377 * one: int segment_size =
362 * segment_ptr.s.size; 378 * segment_ptr.s.size;
363 */ 379 */
364 int segment_size = 380 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
365 CVMX_FPA_PACKET_POOL_SIZE - 381 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
366 (segment_ptr.s.addr - 382 /*
367 (((segment_ptr.s.addr >> 7) - 383 * Don't copy more than what
368 segment_ptr.s.back) << 7)); 384 * is left in the packet.
369 /* Don't copy more than what is left 385 */
370 in the packet */
371 if (segment_size > len) 386 if (segment_size > len)
372 segment_size = len; 387 segment_size = len;
373 /* Copy the data into the packet */ 388 /* Copy the data into the packet */
374 memcpy(skb_put(skb, segment_size), 389 memcpy(skb_put(skb, segment_size),
375 cvmx_phys_to_ptr(segment_ptr.s. 390 cvmx_phys_to_ptr(segment_ptr.s.addr),
376 addr),
377 segment_size); 391 segment_size);
378 /* Reduce the amount of bytes left
379 to copy */
380 len -= segment_size; 392 len -= segment_size;
381 segment_ptr = next_ptr; 393 segment_ptr = next_ptr;
382 } 394 }
@@ -389,16 +401,15 @@ void cvm_oct_tasklet_rx(unsigned long unused)
389 struct net_device *dev = cvm_oct_device[work->ipprt]; 401 struct net_device *dev = cvm_oct_device[work->ipprt];
390 struct octeon_ethernet *priv = netdev_priv(dev); 402 struct octeon_ethernet *priv = netdev_priv(dev);
391 403
392 /* Only accept packets for devices 404 /*
393 that are currently up */ 405 * Only accept packets for devices that are
406 * currently up.
407 */
394 if (likely(dev->flags & IFF_UP)) { 408 if (likely(dev->flags & IFF_UP)) {
395 skb->protocol = eth_type_trans(skb, dev); 409 skb->protocol = eth_type_trans(skb, dev);
396 skb->dev = dev; 410 skb->dev = dev;
397 411
398 if (unlikely 412 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
399 (work->word2.s.not_IP
400 || work->word2.s.IP_exc
401 || work->word2.s.L4_error))
402 skb->ip_summed = CHECKSUM_NONE; 413 skb->ip_summed = CHECKSUM_NONE;
403 else 414 else
404 skb->ip_summed = CHECKSUM_UNNECESSARY; 415 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -414,15 +425,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
414#endif 425#endif
415 } 426 }
416 netif_receive_skb(skb); 427 netif_receive_skb(skb);
428 rx_count++;
417 } else { 429 } else {
430 /* Drop any packet received for a device that isn't up */
418 /* 431 /*
419 * Drop any packet received for a 432 DEBUGPRINT("%s: Device not up, packet dropped\n",
420 * device that isn't up. 433 dev->name);
421 */ 434 */
422 /*
423 DEBUGPRINT("%s: Device not up, packet dropped\n",
424 dev->name);
425 */
426#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
427 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); 436 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
428#else 437#else
@@ -435,9 +444,8 @@ void cvm_oct_tasklet_rx(unsigned long unused)
435 * Drop any packet received for a device that 444 * Drop any packet received for a device that
436 * doesn't exist. 445 * doesn't exist.
437 */ 446 */
438 DEBUGPRINT("Port %d not controlled by Linux, packet " 447 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
439 "dropped\n", 448 work->ipprt);
440 work->ipprt);
441 dev_kfree_skb_irq(skb); 449 dev_kfree_skb_irq(skb);
442 } 450 }
443 /* 451 /*
@@ -459,47 +467,93 @@ void cvm_oct_tasklet_rx(unsigned long unused)
459 cvm_oct_free_work(work); 467 cvm_oct_free_work(work);
460 } 468 }
461 } 469 }
462
463 /* Restore the original POW group mask */ 470 /* Restore the original POW group mask */
464 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); 471 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
465 if (USE_ASYNC_IOBDMA) { 472 if (USE_ASYNC_IOBDMA) {
466 /* Restore the scratch area */ 473 /* Restore the scratch area */
467 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 474 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
468 } 475 }
476 cvm_oct_rx_refill_pool(0);
469 477
470 if (USE_SKBUFFS_IN_HW) { 478 if (rx_count < budget && napi != NULL) {
471 /* Refill the packet buffer pool */ 479 /* No more work */
472 number_to_free = 480 napi_complete(napi);
473 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 481 cvm_oct_no_more_work();
474
475 if (number_to_free > 0) {
476 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
477 -number_to_free);
478 num_freed =
479 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
480 CVMX_FPA_PACKET_POOL_SIZE,
481 number_to_free);
482 if (num_freed != number_to_free) {
483 cvmx_fau_atomic_add32
484 (FAU_NUM_PACKET_BUFFERS_TO_FREE,
485 number_to_free - num_freed);
486 }
487 }
488 } 482 }
483 return rx_count;
489} 484}
490 485
486#ifdef CONFIG_NET_POLL_CONTROLLER
487/**
488 * cvm_oct_poll_controller - poll for receive packets
489 * device.
490 *
491 * @dev: Device to poll. Unused
492 */
493void cvm_oct_poll_controller(struct net_device *dev)
494{
495 cvm_oct_napi_poll(NULL, 16);
496}
497#endif
498
491void cvm_oct_rx_initialize(void) 499void cvm_oct_rx_initialize(void)
492{ 500{
493 int i; 501 int i;
494 /* Initialize all of the tasklets */ 502 struct net_device *dev_for_napi = NULL;
495 for (i = 0; i < NR_CPUS; i++) 503 union cvmx_pow_wq_int_thrx int_thr;
496 tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0); 504 union cvmx_pow_wq_int_pc int_pc;
505
506 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
507 if (cvm_oct_device[i]) {
508 dev_for_napi = cvm_oct_device[i];
509 break;
510 }
511 }
512
513 if (NULL == dev_for_napi)
514 panic("No net_devices were allocated.");
515
516 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
517 atomic_set(&core_state.available_cores, max_rx_cpus);
518 else
519 atomic_set(&core_state.available_cores, num_online_cpus());
520 core_state.baseline_cores = atomic_read(&core_state.available_cores);
521
522 core_state.cpu_state = CPU_MASK_NONE;
523 for_each_possible_cpu(i) {
524 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
525 cvm_oct_napi_poll, rx_napi_weight);
526 napi_enable(&cvm_oct_napi[i].napi);
527 }
528 /* Register an IRQ hander for to receive POW interrupts */
529 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
530 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
531
532 if (i)
533 panic("Could not acquire Ethernet IRQ %d\n",
534 OCTEON_IRQ_WORKQ0 + pow_receive_group);
535
536 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
537
538 int_thr.u64 = 0;
539 int_thr.s.tc_en = 1;
540 int_thr.s.tc_thr = 1;
541 /* Enable POW interrupt when our port has at least one packet */
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
543
544 int_pc.u64 = 0;
545 int_pc.s.pc_thr = 5;
546 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
547
548
549 /* Scheduld NAPI now. This will indirectly enable interrupts. */
550 cvm_oct_enable_one_cpu();
497} 551}
498 552
499void cvm_oct_rx_shutdown(void) 553void cvm_oct_rx_shutdown(void)
500{ 554{
501 int i; 555 int i;
502 /* Shutdown all of the tasklets */ 556 /* Shutdown all of the NAPIs */
503 for (i = 0; i < NR_CPUS; i++) 557 for_each_possible_cpu(i)
504 tasklet_kill(&cvm_oct_tasklet[i].t); 558 netif_napi_del(&cvm_oct_napi[i].napi);
505} 559}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a9b72b87a7a6..a0743b85d54e 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,10 +24,29 @@
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26*********************************************************************/ 26*********************************************************************/
27#include "cvmx-fau.h"
27 28
28irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
29void cvm_oct_poll_controller(struct net_device *dev); 29void cvm_oct_poll_controller(struct net_device *dev);
30void cvm_oct_tasklet_rx(unsigned long unused);
31
32void cvm_oct_rx_initialize(void); 30void cvm_oct_rx_initialize(void);
33void cvm_oct_rx_shutdown(void); 31void cvm_oct_rx_shutdown(void);
32
33static inline void cvm_oct_rx_refill_pool(int fill_threshold)
34{
35 int number_to_free;
36 int num_freed;
37 /* Refill the packet buffer pool */
38 number_to_free =
39 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
40
41 if (number_to_free > fill_threshold) {
42 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
43 -number_to_free);
44 num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
45 CVMX_FPA_PACKET_POOL_SIZE,
46 number_to_free);
47 if (num_freed != number_to_free) {
48 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
49 number_to_free - num_freed);
50 }
51 }
52}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 6061d01eca2d..2d8589eb461e 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 00dc0f4bad19..b58b8971f939 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 535294105f65..afc2b734d554 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -31,10 +31,6 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h>
37#include <linux/proc_fs.h>
38#include <net/dst.h> 34#include <net/dst.h>
39#ifdef CONFIG_XFRM 35#ifdef CONFIG_XFRM
40#include <linux/xfrm.h> 36#include <linux/xfrm.h>
@@ -52,11 +48,14 @@
52 48
53#include "cvmx-wqe.h" 49#include "cvmx-wqe.h"
54#include "cvmx-fau.h" 50#include "cvmx-fau.h"
51#include "cvmx-pip.h"
55#include "cvmx-pko.h" 52#include "cvmx-pko.h"
56#include "cvmx-helper.h" 53#include "cvmx-helper.h"
57 54
58#include "cvmx-gmxx-defs.h" 55#include "cvmx-gmxx-defs.h"
59 56
57#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
58
60/* 59/*
61 * You can define GET_SKBUFF_QOS() to override how the skbuff output 60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
62 * function determines which output queue is used. The default 61 * function determines which output queue is used. The default
@@ -68,12 +67,81 @@
68#define GET_SKBUFF_QOS(skb) 0 67#define GET_SKBUFF_QOS(skb) 0
69#endif 68#endif
70 69
70static void cvm_oct_tx_do_cleanup(unsigned long arg);
71static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
72
73/* Maximum number of SKBs to try to free per xmit packet. */
74#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
75
76static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
77{
78 int32_t undo;
79 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
80 if (undo > 0)
81 cvmx_fau_atomic_add32(fau, -undo);
82 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
83 return skb_to_free;
84}
85
86static void cvm_oct_kick_tx_poll_watchdog(void)
87{
88 union cvmx_ciu_timx ciu_timx;
89 ciu_timx.u64 = 0;
90 ciu_timx.s.one_shot = 1;
91 ciu_timx.s.len = cvm_oct_tx_poll_interval;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
93}
94
95void cvm_oct_free_tx_skbs(struct net_device *dev)
96{
97 int32_t skb_to_free;
98 int qos, queues_per_port;
99 int total_freed = 0;
100 int total_remaining = 0;
101 unsigned long flags;
102 struct octeon_ethernet *priv = netdev_priv(dev);
103
104 queues_per_port = cvmx_pko_get_num_queues(priv->port);
105 /* Drain any pending packets in the free list */
106 for (qos = 0; qos < queues_per_port; qos++) {
107 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
108 continue;
109 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
110 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
111
112
113 total_freed += skb_to_free;
114 if (skb_to_free > 0) {
115 struct sk_buff *to_free_list = NULL;
116 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
117 while (skb_to_free > 0) {
118 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
119 t->next = to_free_list;
120 to_free_list = t;
121 skb_to_free--;
122 }
123 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
124 /* Do the actual freeing outside of the lock. */
125 while (to_free_list) {
126 struct sk_buff *t = to_free_list;
127 to_free_list = to_free_list->next;
128 dev_kfree_skb_any(t);
129 }
130 }
131 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
132 }
133 if (total_freed >= 0 && netif_queue_stopped(dev))
134 netif_wake_queue(dev);
135 if (total_remaining)
136 cvm_oct_kick_tx_poll_watchdog();
137}
138
71/** 139/**
72 * Packet transmit 140 * cvm_oct_xmit - transmit a packet
73 *
74 * @skb: Packet to send 141 * @skb: Packet to send
75 * @dev: Device info structure 142 * @dev: Device info structure
76 * Returns Always returns zero 143 *
144 * Returns Always returns NETDEV_TX_OK
77 */ 145 */
78int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) 146int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 147{
@@ -81,13 +149,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
81 union cvmx_buf_ptr hw_buffer; 149 union cvmx_buf_ptr hw_buffer;
82 uint64_t old_scratch; 150 uint64_t old_scratch;
83 uint64_t old_scratch2; 151 uint64_t old_scratch2;
84 int dropped;
85 int qos; 152 int qos;
86 int queue_it_up; 153 int i;
154 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
87 struct octeon_ethernet *priv = netdev_priv(dev); 155 struct octeon_ethernet *priv = netdev_priv(dev);
156 struct sk_buff *to_free_list;
88 int32_t skb_to_free; 157 int32_t skb_to_free;
89 int32_t undo;
90 int32_t buffers_to_free; 158 int32_t buffers_to_free;
159 u32 total_to_clean;
160 unsigned long flags;
91#if REUSE_SKBUFFS_WITHOUT_FREE 161#if REUSE_SKBUFFS_WITHOUT_FREE
92 unsigned char *fpa_head; 162 unsigned char *fpa_head;
93#endif 163#endif
@@ -98,9 +168,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
98 */ 168 */
99 prefetch(priv); 169 prefetch(priv);
100 170
101 /* Start off assuming no drop */
102 dropped = 0;
103
104 /* 171 /*
105 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 172 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
106 * completely remove "qos" in the event neither interface 173 * completely remove "qos" in the event neither interface
@@ -135,6 +202,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
135 } 202 }
136 203
137 /* 204 /*
205 * We have space for 6 segment pointers, If there will be more
206 * than that, we must linearize.
207 */
208 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
209 if (unlikely(__skb_linearize(skb))) {
210 queue_type = QUEUE_DROP;
211 if (USE_ASYNC_IOBDMA) {
212 /* Get the number of skbuffs in use by the hardware */
213 CVMX_SYNCIOBDMA;
214 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
215 } else {
216 /* Get the number of skbuffs in use by the hardware */
217 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
218 MAX_SKB_TO_FREE);
219 }
220 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
221 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
222 goto skip_xmit;
223 }
224 }
225
226 /*
138 * The CN3XXX series of parts has an errata (GMX-401) which 227 * The CN3XXX series of parts has an errata (GMX-401) which
139 * causes the GMX block to hang if a collision occurs towards 228 * causes the GMX block to hang if a collision occurs towards
140 * the end of a <68 byte packet. As a workaround for this, we 229 * the end of a <68 byte packet. As a workaround for this, we
@@ -162,13 +251,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
162 } 251 }
163 } 252 }
164 253
165 /* Build the PKO buffer pointer */
166 hw_buffer.u64 = 0;
167 hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
168 hw_buffer.s.pool = 0;
169 hw_buffer.s.size =
170 (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
171
172 /* Build the PKO command */ 254 /* Build the PKO command */
173 pko_command.u64 = 0; 255 pko_command.u64 = 0;
174 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 256 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
@@ -178,7 +260,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
178 pko_command.s.subone0 = 1; 260 pko_command.s.subone0 = 1;
179 261
180 pko_command.s.dontfree = 1; 262 pko_command.s.dontfree = 1;
181 pko_command.s.reg0 = priv->fau + qos * 4; 263
264 /* Build the PKO buffer pointer */
265 hw_buffer.u64 = 0;
266 if (skb_shinfo(skb)->nr_frags == 0) {
267 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
268 hw_buffer.s.pool = 0;
269 hw_buffer.s.size = skb->len;
270 } else {
271 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
272 hw_buffer.s.pool = 0;
273 hw_buffer.s.size = skb_headlen(skb);
274 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
276 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
277 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
278 hw_buffer.s.size = fs->size;
279 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
280 }
281 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
284 pko_command.s.gather = 1;
285 goto dont_put_skbuff_in_hw;
286 }
287
182 /* 288 /*
183 * See if we can put this skb in the FPA pool. Any strange 289 * See if we can put this skb in the FPA pool. Any strange
184 * behavior from the Linux networking stack will most likely 290 * behavior from the Linux networking stack will most likely
@@ -190,7 +296,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
190 * shown a 25% increase in performance under some loads. 296 * shown a 25% increase in performance under some loads.
191 */ 297 */
192#if REUSE_SKBUFFS_WITHOUT_FREE 298#if REUSE_SKBUFFS_WITHOUT_FREE
193 fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); 299 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
194 if (unlikely(skb->data < fpa_head)) { 300 if (unlikely(skb->data < fpa_head)) {
195 /* 301 /*
196 * printk("TX buffer beginning can't meet FPA 302 * printk("TX buffer beginning can't meet FPA
@@ -248,10 +354,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
248 * We can use this buffer in the FPA. We don't need the FAU 354 * We can use this buffer in the FPA. We don't need the FAU
249 * update anymore 355 * update anymore
250 */ 356 */
251 pko_command.s.reg0 = 0;
252 pko_command.s.dontfree = 0; 357 pko_command.s.dontfree = 0;
253 358
254 hw_buffer.s.back = (skb->data - fpa_head) >> 7; 359 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
255 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; 360 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
256 361
257 /* 362 /*
@@ -272,16 +377,16 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
272 skb->tc_verd = 0; 377 skb->tc_verd = 0;
273#endif /* CONFIG_NET_CLS_ACT */ 378#endif /* CONFIG_NET_CLS_ACT */
274#endif /* CONFIG_NET_SCHED */ 379#endif /* CONFIG_NET_SCHED */
380#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
275 381
276dont_put_skbuff_in_hw: 382dont_put_skbuff_in_hw:
277#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
278 383
279 /* Check if we can use the hardware checksumming */ 384 /* Check if we can use the hardware checksumming */
280 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && 385 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
281 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && 386 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
282 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) 387 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
283 && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 388 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
284 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { 389 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
285 /* Use hardware checksum calc */ 390 /* Use hardware checksum calc */
286 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 391 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
287 } 392 }
@@ -299,89 +404,116 @@ dont_put_skbuff_in_hw:
299 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 404 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
300 } 405 }
301 406
302 /* 407 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
303 * We try to claim MAX_SKB_TO_FREE buffers. If there were not
304 * that many available, we have to un-claim (undo) any that
305 * were in excess. If skb_to_free is positive we will free
306 * that many buffers.
307 */
308 undo = skb_to_free > 0 ?
309 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
310 if (undo > 0)
311 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
312 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
313 MAX_SKB_TO_FREE : -skb_to_free;
314 408
315 /* 409 /*
316 * If we're sending faster than the receive can free them then 410 * If we're sending faster than the receive can free them then
317 * don't do the HW free. 411 * don't do the HW free.
318 */ 412 */
319 if ((buffers_to_free < -100) && !pko_command.s.dontfree) { 413 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
320 pko_command.s.dontfree = 1; 414 pko_command.s.dontfree = 1;
321 pko_command.s.reg0 = priv->fau + qos * 4; 415
416 if (pko_command.s.dontfree) {
417 queue_type = QUEUE_CORE;
418 pko_command.s.reg0 = priv->fau+qos*4;
419 } else {
420 queue_type = QUEUE_HW;
322 } 421 }
422 if (USE_ASYNC_IOBDMA)
423 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
323 424
324 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 425 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
325 CVMX_PKO_LOCK_CMD_QUEUE);
326 426
327 /* Drop this packet if we have too many already queued to the HW */ 427 /* Drop this packet if we have too many already queued to the HW */
328 if (unlikely 428 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
329 (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { 429 if (dev->tx_queue_len != 0) {
330 /* 430 /* Drop the lock when notifying the core. */
331 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); 431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
332 */ 432 netif_stop_queue(dev);
333 dropped = 1; 433 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
434 } else {
435 /* If not using normal queueing. */
436 queue_type = QUEUE_DROP;
437 goto skip_xmit;
438 }
334 } 439 }
440
441 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
442 CVMX_PKO_LOCK_NONE);
443
335 /* Send the packet to the output queue */ 444 /* Send the packet to the output queue */
336 else if (unlikely 445 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
337 (cvmx_pko_send_packet_finish 446 priv->queue + qos,
338 (priv->port, priv->queue + qos, pko_command, hw_buffer, 447 pko_command, hw_buffer,
339 CVMX_PKO_LOCK_CMD_QUEUE))) { 448 CVMX_PKO_LOCK_NONE))) {
340 DEBUGPRINT("%s: Failed to send the packet\n", dev->name); 449 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
341 dropped = 1; 450 queue_type = QUEUE_DROP;
451 }
452skip_xmit:
453 to_free_list = NULL;
454
455 switch (queue_type) {
456 case QUEUE_DROP:
457 skb->next = to_free_list;
458 to_free_list = skb;
459 priv->stats.tx_dropped++;
460 break;
461 case QUEUE_HW:
462 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
463 break;
464 case QUEUE_CORE:
465 __skb_queue_tail(&priv->tx_free_list[qos], skb);
466 break;
467 default:
468 BUG();
469 }
470
471 while (skb_to_free > 0) {
472 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
473 t->next = to_free_list;
474 to_free_list = t;
475 skb_to_free--;
476 }
477
478 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
479
480 /* Do the actual freeing outside of the lock. */
481 while (to_free_list) {
482 struct sk_buff *t = to_free_list;
483 to_free_list = to_free_list->next;
484 dev_kfree_skb_any(t);
342 } 485 }
343 486
344 if (USE_ASYNC_IOBDMA) { 487 if (USE_ASYNC_IOBDMA) {
488 CVMX_SYNCIOBDMA;
489 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
345 /* Restore the scratch area */ 490 /* Restore the scratch area */
346 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 491 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
347 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
348 }
349
350 queue_it_up = 0;
351 if (unlikely(dropped)) {
352 dev_kfree_skb_any(skb);
353 priv->stats.tx_dropped++;
354 } else { 493 } else {
355 if (USE_SKBUFFS_IN_HW) { 494 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
356 /* Put this packet on the queue to be freed later */
357 if (pko_command.s.dontfree)
358 queue_it_up = 1;
359 else
360 cvmx_fau_atomic_add32
361 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
362 } else {
363 /* Put this packet on the queue to be freed later */
364 queue_it_up = 1;
365 }
366 } 495 }
367 496
368 if (queue_it_up) { 497 if (total_to_clean & 0x3ff) {
369 spin_lock(&priv->tx_free_list[qos].lock); 498 /*
370 __skb_queue_tail(&priv->tx_free_list[qos], skb); 499 * Schedule the cleanup tasklet every 1024 packets for
371 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); 500 * the pathological case of high traffic on one port
372 spin_unlock(&priv->tx_free_list[qos].lock); 501 * delaying clean up of packets on a different port
373 } else { 502 * that is blocked waiting for the cleanup.
374 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 503 */
504 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
375 } 505 }
376 506
377 return 0; 507 cvm_oct_kick_tx_poll_watchdog();
508
509 return NETDEV_TX_OK;
378} 510}
379 511
380/** 512/**
381 * Packet transmit to the POW 513 * cvm_oct_xmit_pow - transmit a packet to the POW
382 *
383 * @skb: Packet to send 514 * @skb: Packet to send
384 * @dev: Device info structure 515 * @dev: Device info structure
516
385 * Returns Always returns zero 517 * Returns Always returns zero
386 */ 518 */
387int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) 519int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
@@ -459,8 +591,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
459 work->word2.s.dec_ipcomp = 0; /* FIXME */ 591 work->word2.s.dec_ipcomp = 0; /* FIXME */
460#endif 592#endif
461 work->word2.s.tcp_or_udp = 593 work->word2.s.tcp_or_udp =
462 (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 594 (ip_hdr(skb)->protocol == IPPROTO_TCP)
463 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); 595 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
464#if 0 596#if 0
465 /* FIXME */ 597 /* FIXME */
466 work->word2.s.dec_ipsec = 0; 598 work->word2.s.dec_ipsec = 0;
@@ -529,116 +661,63 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
529} 661}
530 662
531/** 663/**
532 * Transmit a work queue entry out of the ethernet port. Both 664 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
533 * the work queue entry and the packet data can optionally be 665 * @dev: Device being shutdown
534 * freed. The work will be freed on error as well.
535 *
536 * @dev: Device to transmit out.
537 * @work_queue_entry:
538 * Work queue entry to send
539 * @do_free: True if the work queue entry and packet data should be
540 * freed. If false, neither will be freed.
541 * @qos: Index into the queues for this port to transmit on. This
542 * is used to implement QoS if their are multiple queues per
543 * port. This parameter must be between 0 and the number of
544 * queues per port minus 1. Values outside of this range will
545 * be change to zero.
546 * 666 *
547 * Returns Zero on success, negative on failure.
548 */ 667 */
549int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 668void cvm_oct_tx_shutdown_dev(struct net_device *dev)
550 int do_free, int qos)
551{ 669{
552 unsigned long flags;
553 union cvmx_buf_ptr hw_buffer;
554 cvmx_pko_command_word0_t pko_command;
555 int dropped;
556 struct octeon_ethernet *priv = netdev_priv(dev); 670 struct octeon_ethernet *priv = netdev_priv(dev);
557 cvmx_wqe_t *work = work_queue_entry; 671 unsigned long flags;
672 int qos;
558 673
559 if (!(dev->flags & IFF_UP)) { 674 for (qos = 0; qos < 16; qos++) {
560 DEBUGPRINT("%s: Device not up\n", dev->name); 675 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
561 if (do_free) 676 while (skb_queue_len(&priv->tx_free_list[qos]))
562 cvm_oct_free_work(work); 677 dev_kfree_skb_any(__skb_dequeue
563 return -1; 678 (&priv->tx_free_list[qos]));
679 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
564 } 680 }
681}
565 682
566 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely 683static void cvm_oct_tx_do_cleanup(unsigned long arg)
567 remove "qos" in the event neither interface supports 684{
568 multiple queues per port */ 685 int port;
569 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
570 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
571 if (qos <= 0)
572 qos = 0;
573 else if (qos >= cvmx_pko_get_num_queues(priv->port))
574 qos = 0;
575 } else
576 qos = 0;
577
578 /* Start off assuming no drop */
579 dropped = 0;
580
581 local_irq_save(flags);
582 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
583 CVMX_PKO_LOCK_CMD_QUEUE);
584
585 /* Build the PKO buffer pointer */
586 hw_buffer.u64 = 0;
587 hw_buffer.s.addr = work->packet_ptr.s.addr;
588 hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
589 hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
590 hw_buffer.s.back = work->packet_ptr.s.back;
591 686
592 /* Build the PKO command */ 687 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
593 pko_command.u64 = 0; 688 if (cvm_oct_device[port]) {
594 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 689 struct net_device *dev = cvm_oct_device[port];
595 pko_command.s.dontfree = !do_free; 690 cvm_oct_free_tx_skbs(dev);
596 pko_command.s.segs = work->word2.s.bufs; 691 }
597 pko_command.s.total_bytes = work->len; 692 }
693}
598 694
599 /* Check if we can use the hardware checksumming */ 695static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
600 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) 696{
601 pko_command.s.ipoffp1 = 0; 697 /* Disable the interrupt. */
602 else 698 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
603 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 699 /* Do the work in the tasklet. */
700 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
701 return IRQ_HANDLED;
702}
604 703
605 /* Send the packet to the output queue */ 704void cvm_oct_tx_initialize(void)
606 if (unlikely 705{
607 (cvmx_pko_send_packet_finish 706 int i;
608 (priv->port, priv->queue + qos, pko_command, hw_buffer,
609 CVMX_PKO_LOCK_CMD_QUEUE))) {
610 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
611 dropped = -1;
612 }
613 local_irq_restore(flags);
614 707
615 if (unlikely(dropped)) { 708 /* Disable the interrupt. */
616 if (do_free) 709 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
617 cvm_oct_free_work(work); 710 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
618 priv->stats.tx_dropped++; 711 i = request_irq(OCTEON_IRQ_TIMER1,
619 } else if (do_free) 712 cvm_oct_tx_cleanup_watchdog, 0,
620 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); 713 "Ethernet", cvm_oct_device);
621 714
622 return dropped; 715 if (i)
716 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
623} 717}
624EXPORT_SYMBOL(cvm_oct_transmit_qos);
625 718
626/** 719void cvm_oct_tx_shutdown(void)
627 * This function frees all skb that are currently queued for TX.
628 *
629 * @dev: Device being shutdown
630 */
631void cvm_oct_tx_shutdown(struct net_device *dev)
632{ 720{
633 struct octeon_ethernet *priv = netdev_priv(dev); 721 /* Free the interrupt handler */
634 unsigned long flags; 722 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
635 int qos;
636
637 for (qos = 0; qos < 16; qos++) {
638 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
639 while (skb_queue_len(&priv->tx_free_list[qos]))
640 dev_kfree_skb_any(__skb_dequeue
641 (&priv->tx_free_list[qos]));
642 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
643 }
644} 723}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index c0bebf750bc0..547680c6c371 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -29,29 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); 29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos); 31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev); 32void cvm_oct_tx_initialize(void);
33 33void cvm_oct_tx_shutdown(void);
34/** 34void cvm_oct_tx_shutdown_dev(struct net_device *dev);
35 * Free dead transmit skbs.
36 *
37 * @priv: The driver data
38 * @skb_to_free: The number of SKBs to free (free none if negative).
39 * @qos: The queue to free from.
40 * @take_lock: If true, acquire the skb list lock.
41 */
42static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
43 int skb_to_free,
44 int qos, int take_lock)
45{
46 /* Free skbuffs not in use by the hardware. */
47 if (skb_to_free > 0) {
48 if (take_lock)
49 spin_lock(&priv->tx_free_list[qos].lock);
50 while (skb_to_free > 0) {
51 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
52 skb_to_free--;
53 }
54 if (take_lock)
55 spin_unlock(&priv->tx_free_list[qos].lock);
56 }
57}
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 37b665918000..23467563fe57 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -30,10 +30,9 @@
30 } while (0) 30 } while (0)
31 31
32/** 32/**
33 * Given a packet data address, return a pointer to the 33 * cvm_oct_get_buffer_ptr - convert packet data address to pointer
34 * beginning of the packet buffer.
35 *
36 * @packet_ptr: Packet data hardware address 34 * @packet_ptr: Packet data hardware address
35 *
37 * Returns Packet buffer pointer 36 * Returns Packet buffer pointer
38 */ 37 */
39static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) 38static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
@@ -43,9 +42,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
43} 42}
44 43
45/** 44/**
46 * Given an IPD/PKO port number, return the logical interface it is 45 * INTERFACE - convert IPD port to locgical interface
47 * on.
48 *
49 * @ipd_port: Port to check 46 * @ipd_port: Port to check
50 * 47 *
51 * Returns Logical interface 48 * Returns Logical interface
@@ -65,9 +62,7 @@ static inline int INTERFACE(int ipd_port)
65} 62}
66 63
67/** 64/**
68 * Given an IPD/PKO port number, return the port's index on a 65 * INDEX - convert IPD/PKO port number to the port's interface index
69 * logical interface.
70 *
71 * @ipd_port: Port to check 66 * @ipd_port: Port to check
72 * 67 *
73 * Returns Index into interface port list 68 * Returns Index into interface port list
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index ee3dc41b2c53..3fca1cc31ed8 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 220de133a6a5..4a2161f70c7f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -29,7 +29,6 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/delay.h>
33#include <linux/phy.h> 32#include <linux/phy.h>
34 33
35#include <net/dst.h> 34#include <net/dst.h>
@@ -43,8 +42,6 @@
43#include "ethernet-tx.h" 42#include "ethernet-tx.h"
44#include "ethernet-mdio.h" 43#include "ethernet-mdio.h"
45#include "ethernet-util.h" 44#include "ethernet-util.h"
46#include "ethernet-proc.h"
47
48 45
49#include "cvmx-pip.h" 46#include "cvmx-pip.h"
50#include "cvmx-pko.h" 47#include "cvmx-pko.h"
@@ -104,13 +101,15 @@ MODULE_PARM_DESC(pow_send_list, "\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" 101 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group."); 102 "\tusing the pow_send_group.");
106 103
107static int disable_core_queueing = 1; 104int max_rx_cpus = -1;
108module_param(disable_core_queueing, int, 0444); 105module_param(max_rx_cpus, int, 0444);
109MODULE_PARM_DESC(disable_core_queueing, "\n" 106MODULE_PARM_DESC(max_rx_cpus, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n" 107 "\t\tThe maximum number of CPUs to use for packet reception.\n"
111 "\tallows packets to be sent without lock contention in the packet\n" 108 "\t\tUse -1 to use all available CPUs.");
112 "\tscheduler resulting in some cases in improved throughput.\n");
113 109
110int rx_napi_weight = 32;
111module_param(rx_napi_weight, int, 0444);
112MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
114 113
115/* 114/*
116 * The offset from mac_addr_base that should be used for the next port 115 * The offset from mac_addr_base that should be used for the next port
@@ -122,9 +121,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
122static unsigned int cvm_oct_mac_addr_offset; 121static unsigned int cvm_oct_mac_addr_offset;
123 122
124/** 123/**
125 * Periodic timer to check auto negotiation 124 * cvm_oct_poll_queue - Workqueue for polling operations.
125 */
126struct workqueue_struct *cvm_oct_poll_queue;
127
128/**
129 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
130 *
131 * Set to one right before cvm_oct_poll_queue is destroyed.
126 */ 132 */
127static struct timer_list cvm_oct_poll_timer; 133atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
128 134
129/** 135/**
130 * Array of every ethernet device owned by this driver indexed by 136 * Array of every ethernet device owned by this driver indexed by
@@ -132,65 +138,44 @@ static struct timer_list cvm_oct_poll_timer;
132 */ 138 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 139struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134 140
135/** 141u64 cvm_oct_tx_poll_interval;
136 * Periodic timer tick for slow management operations 142
137 * 143static void cvm_oct_rx_refill_worker(struct work_struct *work);
138 * @arg: Device to check 144static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
139 */ 145
140static void cvm_do_timer(unsigned long arg) 146static void cvm_oct_rx_refill_worker(struct work_struct *work)
141{ 147{
142 int32_t skb_to_free, undo; 148 /*
143 int queues_per_port; 149 * FPA 0 may have been drained, try to refill it if we need
144 int qos; 150 * more than num_packet_buffers / 2, otherwise normal receive
145 struct octeon_ethernet *priv; 151 * processing will refill it. If it were drained, no packets
146 static int port; 152 * could be received so cvm_oct_napi_poll would never be
153 * invoked to do the refill.
154 */
155 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
147 156
148 if (port >= CVMX_PIP_NUM_INPUT_PORTS) { 157 if (!atomic_read(&cvm_oct_poll_queue_stopping))
149 /* 158 queue_delayed_work(cvm_oct_poll_queue,
150 * All ports have been polled. Start the next 159 &cvm_oct_rx_refill_work, HZ);
151 * iteration through the ports in one second. 160}
152 */ 161
153 port = 0; 162static void cvm_oct_periodic_worker(struct work_struct *work)
154 mod_timer(&cvm_oct_poll_timer, jiffies + HZ); 163{
155 return; 164 struct octeon_ethernet *priv = container_of(work,
156 } 165 struct octeon_ethernet,
157 if (!cvm_oct_device[port]) 166 port_periodic_work.work);
158 goto out;
159 167
160 priv = netdev_priv(cvm_oct_device[port]);
161 if (priv->poll) 168 if (priv->poll)
162 priv->poll(cvm_oct_device[port]); 169 priv->poll(cvm_oct_device[priv->port]);
163
164 queues_per_port = cvmx_pko_get_num_queues(port);
165 /* Drain any pending packets in the free list */
166 for (qos = 0; qos < queues_per_port; qos++) {
167 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
168 continue;
169 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
170 MAX_SKB_TO_FREE);
171 undo = skb_to_free > 0 ?
172 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
173 if (undo > 0)
174 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
175 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
176 MAX_SKB_TO_FREE : -skb_to_free;
177 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
178 }
179 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
180 170
181out: 171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
182 port++; 172
183 /* Poll the next port in a 50th of a second. 173 if (!atomic_read(&cvm_oct_poll_queue_stopping))
184 This spreads the polling of ports out a little bit */ 174 queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
185 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); 175 }
186}
187 176
188/**
189 * Configure common hardware for all interfaces
190 */
191static __init void cvm_oct_configure_common_hw(void) 177static __init void cvm_oct_configure_common_hw(void)
192{ 178{
193 int r;
194 /* Setup the FPA */ 179 /* Setup the FPA */
195 cvmx_fpa_enable(); 180 cvmx_fpa_enable();
196 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, 181 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
@@ -205,28 +190,13 @@ static __init void cvm_oct_configure_common_hw(void)
205 cvmx_helper_setup_red(num_packet_buffers / 4, 190 cvmx_helper_setup_red(num_packet_buffers / 4,
206 num_packet_buffers / 8); 191 num_packet_buffers / 8);
207 192
208 /* Enable the MII interface */
209 if (!octeon_is_simulation())
210 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
211
212 /* Register an IRQ hander for to receive POW interrupts */
213 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
214 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
215 cvm_oct_device);
216
217#if defined(CONFIG_SMP) && 0
218 if (USE_MULTICORE_RECEIVE) {
219 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
220 cpu_online_mask);
221 }
222#endif
223} 193}
224 194
225/** 195/**
226 * Free a work queue entry received in a intercept callback. 196 * cvm_oct_free_work- Free a work queue entry
197 *
198 * @work_queue_entry: Work queue entry to free
227 * 199 *
228 * @work_queue_entry:
229 * Work queue entry to free
230 * Returns Zero on success, Negative on failure. 200 * Returns Zero on success, Negative on failure.
231 */ 201 */
232int cvm_oct_free_work(void *work_queue_entry) 202int cvm_oct_free_work(void *work_queue_entry)
@@ -253,9 +223,9 @@ int cvm_oct_free_work(void *work_queue_entry)
253EXPORT_SYMBOL(cvm_oct_free_work); 223EXPORT_SYMBOL(cvm_oct_free_work);
254 224
255/** 225/**
256 * Get the low level ethernet statistics 226 * cvm_oct_common_get_stats - get the low level ethernet statistics
257 *
258 * @dev: Device to get the statistics from 227 * @dev: Device to get the statistics from
228 *
259 * Returns Pointer to the statistics 229 * Returns Pointer to the statistics
260 */ 230 */
261static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) 231static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
@@ -299,8 +269,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
299} 269}
300 270
301/** 271/**
302 * Change the link MTU. Unimplemented 272 * cvm_oct_common_change_mtu - change the link MTU
303 *
304 * @dev: Device to change 273 * @dev: Device to change
305 * @new_mtu: The new MTU 274 * @new_mtu: The new MTU
306 * 275 *
@@ -364,8 +333,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
364} 333}
365 334
366/** 335/**
367 * Set the multicast list. Currently unimplemented. 336 * cvm_oct_common_set_multicast_list - set the multicast list
368 *
369 * @dev: Device to work on 337 * @dev: Device to work on
370 */ 338 */
371static void cvm_oct_common_set_multicast_list(struct net_device *dev) 339static void cvm_oct_common_set_multicast_list(struct net_device *dev)
@@ -420,10 +388,10 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
420} 388}
421 389
422/** 390/**
423 * Set the hardware MAC address for a device 391 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
424 * 392 * @dev: The device in question.
425 * @dev: Device to change the MAC address for 393 * @addr: Address structure to change it too.
426 * @addr: Address structure to change it too. MAC address is addr + 2. 394
427 * Returns Zero on success 395 * Returns Zero on success
428 */ 396 */
429static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) 397static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
@@ -470,9 +438,9 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
470} 438}
471 439
472/** 440/**
473 * Per network device initialization 441 * cvm_oct_common_init - per network device initialization
474 *
475 * @dev: Device to initialize 442 * @dev: Device to initialize
443 *
476 * Returns Zero on success 444 * Returns Zero on success
477 */ 445 */
478int cvm_oct_common_init(struct net_device *dev) 446int cvm_oct_common_init(struct net_device *dev)
@@ -510,8 +478,11 @@ int cvm_oct_common_init(struct net_device *dev)
510 && (always_use_pow || strstr(pow_send_list, dev->name))) 478 && (always_use_pow || strstr(pow_send_list, dev->name)))
511 priv->queue = -1; 479 priv->queue = -1;
512 480
513 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 481 if (priv->queue != -1) {
514 dev->features |= NETIF_F_IP_CSUM; 482 dev->features |= NETIF_F_SG;
483 if (USE_HW_TCPUDP_CHECKSUM)
484 dev->features |= NETIF_F_IP_CSUM;
485 }
515 486
516 /* We do our own locking, Linux doesn't need to */ 487 /* We do our own locking, Linux doesn't need to */
517 dev->features |= NETIF_F_LLTX; 488 dev->features |= NETIF_F_LLTX;
@@ -625,12 +596,6 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
625 596
626extern void octeon_mdiobus_force_mod_depencency(void); 597extern void octeon_mdiobus_force_mod_depencency(void);
627 598
628/**
629 * Module/ driver initialization. Creates the linux network
630 * devices.
631 *
632 * Returns Zero on success
633 */
634static int __init cvm_oct_init_module(void) 599static int __init cvm_oct_init_module(void)
635{ 600{
636 int num_interfaces; 601 int num_interfaces;
@@ -648,8 +613,12 @@ static int __init cvm_oct_init_module(void)
648 else 613 else
649 cvm_oct_mac_addr_offset = 0; 614 cvm_oct_mac_addr_offset = 0;
650 615
651 cvm_oct_proc_initialize(); 616 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
652 cvm_oct_rx_initialize(); 617 if (cvm_oct_poll_queue == NULL) {
618 pr_err("octeon-ethernet: Cannot create workqueue");
619 return -ENOMEM;
620 }
621
653 cvm_oct_configure_common_hw(); 622 cvm_oct_configure_common_hw();
654 623
655 cvmx_helper_initialize_packet_io_global(); 624 cvmx_helper_initialize_packet_io_global();
@@ -682,6 +651,9 @@ static int __init cvm_oct_init_module(void)
682 */ 651 */
683 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 652 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
684 653
654 /* Initialize the FAU used for counting tx SKBs that need to be freed */
655 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
656
685 if ((pow_send_group != -1)) { 657 if ((pow_send_group != -1)) {
686 struct net_device *dev; 658 struct net_device *dev;
687 pr_info("\tConfiguring device for POW only access\n"); 659 pr_info("\tConfiguring device for POW only access\n");
@@ -689,7 +661,6 @@ static int __init cvm_oct_init_module(void)
689 if (dev) { 661 if (dev) {
690 /* Initialize the device private structure. */ 662 /* Initialize the device private structure. */
691 struct octeon_ethernet *priv = netdev_priv(dev); 663 struct octeon_ethernet *priv = netdev_priv(dev);
692 memset(priv, 0, sizeof(struct octeon_ethernet));
693 664
694 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 665 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
695 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 666 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
@@ -700,19 +671,16 @@ static int __init cvm_oct_init_module(void)
700 skb_queue_head_init(&priv->tx_free_list[qos]); 671 skb_queue_head_init(&priv->tx_free_list[qos]);
701 672
702 if (register_netdev(dev) < 0) { 673 if (register_netdev(dev) < 0) {
703 pr_err("Failed to register ethernet " 674 pr_err("Failed to register ethernet device for POW\n");
704 "device for POW\n");
705 kfree(dev); 675 kfree(dev);
706 } else { 676 } else {
707 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; 677 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
708 pr_info("%s: POW send group %d, receive " 678 pr_info("%s: POW send group %d, receive group %d\n",
709 "group %d\n", 679 dev->name, pow_send_group,
710 dev->name, pow_send_group, 680 pow_receive_group);
711 pow_receive_group);
712 } 681 }
713 } else { 682 } else {
714 pr_err("Failed to allocate ethernet device " 683 pr_err("Failed to allocate ethernet device for POW\n");
715 "for POW\n");
716 } 684 }
717 } 685 }
718 686
@@ -730,17 +698,15 @@ static int __init cvm_oct_init_module(void)
730 struct net_device *dev = 698 struct net_device *dev =
731 alloc_etherdev(sizeof(struct octeon_ethernet)); 699 alloc_etherdev(sizeof(struct octeon_ethernet));
732 if (!dev) { 700 if (!dev) {
733 pr_err("Failed to allocate ethernet device " 701 pr_err("Failed to allocate ethernet device for port %d\n", port);
734 "for port %d\n", port);
735 continue; 702 continue;
736 } 703 }
737 if (disable_core_queueing)
738 dev->tx_queue_len = 0;
739 704
740 /* Initialize the device private structure. */ 705 /* Initialize the device private structure. */
741 priv = netdev_priv(dev); 706 priv = netdev_priv(dev);
742 memset(priv, 0, sizeof(struct octeon_ethernet));
743 707
708 INIT_DELAYED_WORK(&priv->port_periodic_work,
709 cvm_oct_periodic_worker);
744 priv->imode = imode; 710 priv->imode = imode;
745 priv->port = port; 711 priv->port = port;
746 priv->queue = cvmx_pko_get_base_queue(priv->port); 712 priv->queue = cvmx_pko_get_base_queue(priv->port);
@@ -803,44 +769,25 @@ static int __init cvm_oct_init_module(void)
803 fau -= 769 fau -=
804 cvmx_pko_get_num_queues(priv->port) * 770 cvmx_pko_get_num_queues(priv->port) *
805 sizeof(uint32_t); 771 sizeof(uint32_t);
772 queue_delayed_work(cvm_oct_poll_queue,
773 &priv->port_periodic_work, HZ);
806 } 774 }
807 } 775 }
808 } 776 }
809 777
810 if (INTERRUPT_LIMIT) { 778 cvm_oct_tx_initialize();
811 /* 779 cvm_oct_rx_initialize();
812 * Set the POW timer rate to give an interrupt at most
813 * INTERRUPT_LIMIT times per second.
814 */
815 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
816 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
817 16 * 256) << 8);
818 780
819 /* 781 /*
820 * Enable POW timer interrupt. It will count when 782 * 150 uS: about 10 1500-byte packtes at 1GE.
821 * there are packets available. 783 */
822 */ 784 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
823 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
824 0x1ful << 24);
825 } else {
826 /* Enable POW interrupt when our port has at least one packet */
827 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
828 }
829 785
830 /* Enable the poll timer for checking RGMII status */ 786 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
831 init_timer(&cvm_oct_poll_timer);
832 cvm_oct_poll_timer.data = 0;
833 cvm_oct_poll_timer.function = cvm_do_timer;
834 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
835 787
836 return 0; 788 return 0;
837} 789}
838 790
839/**
840 * Module / driver shutdown
841 *
842 * Returns Zero on success
843 */
844static void __exit cvm_oct_cleanup_module(void) 791static void __exit cvm_oct_cleanup_module(void)
845{ 792{
846 int port; 793 int port;
@@ -853,22 +800,31 @@ static void __exit cvm_oct_cleanup_module(void)
853 /* Free the interrupt handler */ 800 /* Free the interrupt handler */
854 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); 801 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
855 802
856 del_timer(&cvm_oct_poll_timer); 803 atomic_inc_return(&cvm_oct_poll_queue_stopping);
804 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
805
857 cvm_oct_rx_shutdown(); 806 cvm_oct_rx_shutdown();
807 cvm_oct_tx_shutdown();
808
858 cvmx_pko_disable(); 809 cvmx_pko_disable();
859 810
860 /* Free the ethernet devices */ 811 /* Free the ethernet devices */
861 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 812 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
862 if (cvm_oct_device[port]) { 813 if (cvm_oct_device[port]) {
863 cvm_oct_tx_shutdown(cvm_oct_device[port]); 814 struct net_device *dev = cvm_oct_device[port];
864 unregister_netdev(cvm_oct_device[port]); 815 struct octeon_ethernet *priv = netdev_priv(dev);
865 kfree(cvm_oct_device[port]); 816 cancel_delayed_work_sync(&priv->port_periodic_work);
817
818 cvm_oct_tx_shutdown_dev(dev);
819 unregister_netdev(dev);
820 kfree(dev);
866 cvm_oct_device[port] = NULL; 821 cvm_oct_device[port] = NULL;
867 } 822 }
868 } 823 }
869 824
825 destroy_workqueue(cvm_oct_poll_queue);
826
870 cvmx_pko_shutdown(); 827 cvmx_pko_shutdown();
871 cvm_oct_proc_shutdown();
872 828
873 cvmx_ipd_free_ptr(); 829 cvmx_ipd_free_ptr();
874 830
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 402a15b9bb0e..d58192563552 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -57,58 +57,12 @@ struct octeon_ethernet {
57 uint64_t link_info; 57 uint64_t link_info;
58 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
59 void (*poll) (struct net_device *dev); 59 void (*poll) (struct net_device *dev);
60 struct delayed_work port_periodic_work;
61 struct work_struct port_work; /* may be unused. */
60}; 62};
61 63
62/**
63 * Free a work queue entry received in a intercept callback.
64 *
65 * @work_queue_entry:
66 * Work queue entry to free
67 * Returns Zero on success, Negative on failure.
68 */
69int cvm_oct_free_work(void *work_queue_entry); 64int cvm_oct_free_work(void *work_queue_entry);
70 65
71/**
72 * Transmit a work queue entry out of the ethernet port. Both
73 * the work queue entry and the packet data can optionally be
74 * freed. The work will be freed on error as well.
75 *
76 * @dev: Device to transmit out.
77 * @work_queue_entry:
78 * Work queue entry to send
79 * @do_free: True if the work queue entry and packet data should be
80 * freed. If false, neither will be freed.
81 * @qos: Index into the queues for this port to transmit on. This
82 * is used to implement QoS if their are multiple queues per
83 * port. This parameter must be between 0 and the number of
84 * queues per port minus 1. Values outside of this range will
85 * be change to zero.
86 *
87 * Returns Zero on success, negative on failure.
88 */
89int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
90 int do_free, int qos);
91
92/**
93 * Transmit a work queue entry out of the ethernet port. Both
94 * the work queue entry and the packet data can optionally be
95 * freed. The work will be freed on error as well. This simply
96 * wraps cvmx_oct_transmit_qos() for backwards compatability.
97 *
98 * @dev: Device to transmit out.
99 * @work_queue_entry:
100 * Work queue entry to send
101 * @do_free: True if the work queue entry and packet data should be
102 * freed. If false, neither will be freed.
103 *
104 * Returns Zero on success, negative on failure.
105 */
106static inline int cvm_oct_transmit(struct net_device *dev,
107 void *work_queue_entry, int do_free)
108{
109 return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
110}
111
112extern int cvm_oct_rgmii_init(struct net_device *dev); 66extern int cvm_oct_rgmii_init(struct net_device *dev);
113extern void cvm_oct_rgmii_uninit(struct net_device *dev); 67extern void cvm_oct_rgmii_uninit(struct net_device *dev);
114extern int cvm_oct_rgmii_open(struct net_device *dev); 68extern int cvm_oct_rgmii_open(struct net_device *dev);
@@ -134,5 +88,11 @@ extern int pow_send_group;
134extern int pow_receive_group; 88extern int pow_receive_group;
135extern char pow_send_list[]; 89extern char pow_send_list[];
136extern struct net_device *cvm_oct_device[]; 90extern struct net_device *cvm_oct_device[];
91extern struct workqueue_struct *cvm_oct_poll_queue;
92extern atomic_t cvm_oct_poll_queue_stopping;
93extern u64 cvm_oct_tx_poll_interval;
94
95extern int max_rx_cpus;
96extern int rx_napi_weight;
137 97
138#endif 98#endif
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
index 133b86c6a678..2fff0a0052d1 100644
--- a/drivers/staging/sm7xx/smtc2d.c
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -5,7 +5,7 @@
5 * Author: Boyod boyod.yang@siliconmotion.com.cn 5 * Author: Boyod boyod.yang@siliconmotion.com.cn
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
index 38d0c335322b..02b4fa29136c 100644
--- a/drivers/staging/sm7xx/smtc2d.h
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -5,7 +5,7 @@
5 * Author: Ge Wang, gewang@siliconmotion.com 5 * Author: Ge Wang, gewang@siliconmotion.com
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 161dbc9c1397..a4f6f49aef48 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index 7f2c34138215..7ee565c2c952 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for