diff options
author | David Daney <ddaney@caviumnetworks.com> | 2009-05-05 20:35:21 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-06-17 06:06:30 -0400 |
commit | 80ff0fd3ab6451407a20c19b80c1643c4a6d6434 (patch) | |
tree | 2d7021ac34269f0bce3ac18f7347bf6946640f36 /drivers/staging/octeon/ethernet-tx.c | |
parent | 38295fb2a09264671c82d490ce77c17d492378e0 (diff) |
Staging: Add octeon-ethernet driver files.
The octeon-ethernet driver supports the sgmii, rgmii, spi, and xaui
ports present on the Cavium OCTEON family of SOCs. These SOCs are
multi-core mips64 processors with existing support over in arch/mips.
The driver files can be categorized into three basic groups:
1) Register definitions, these are named cvmx-*-defs.h
2) Main driver code, these have names that don't start cvmx-.
3) Interface specific functions and other utility code, names starting
with cvmx-
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'drivers/staging/octeon/ethernet-tx.c')
-rw-r--r-- | drivers/staging/octeon/ethernet-tx.c | 634 |
1 files changed, 634 insertions, 0 deletions
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c new file mode 100644 index 00000000000..77b7122c8fd --- /dev/null +++ b/drivers/staging/octeon/ethernet-tx.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /********************************************************************* | ||
2 | * Author: Cavium Networks | ||
3 | * | ||
4 | * Contact: support@caviumnetworks.com | ||
5 | * This file is part of the OCTEON SDK | ||
6 | * | ||
7 | * Copyright (c) 2003-2007 Cavium Networks | ||
8 | * | ||
9 | * This file is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, Version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This file is distributed in the hope that it will be useful, but | ||
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | ||
16 | * NONINFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this file; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * or visit http://www.gnu.org/licenses/. | ||
23 | * | ||
24 | * This file may also be available under a different license from Cavium. | ||
25 | * Contact Cavium Networks for more information | ||
26 | *********************************************************************/ | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/ip.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/mii.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/proc_fs.h> | ||
38 | #include <net/dst.h> | ||
39 | #ifdef CONFIG_XFRM | ||
40 | #include <linux/xfrm.h> | ||
41 | #include <net/xfrm.h> | ||
42 | #endif /* CONFIG_XFRM */ | ||
43 | |||
44 | #include <asm/atomic.h> | ||
45 | |||
46 | #include <asm/octeon/octeon.h> | ||
47 | |||
48 | #include "ethernet-defines.h" | ||
49 | #include "octeon-ethernet.h" | ||
50 | #include "ethernet-util.h" | ||
51 | |||
52 | #include "cvmx-wqe.h" | ||
53 | #include "cvmx-fau.h" | ||
54 | #include "cvmx-pko.h" | ||
55 | #include "cvmx-helper.h" | ||
56 | |||
57 | #include "cvmx-gmxx-defs.h" | ||
58 | |||
59 | /* | ||
60 | * You can define GET_SKBUFF_QOS() to override how the skbuff output | ||
61 | * function determines which output queue is used. The default | ||
62 | * implementation always uses the base queue for the port. If, for | ||
63 | * example, you wanted to use the skb->priority fieid, define | ||
64 | * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) | ||
65 | */ | ||
66 | #ifndef GET_SKBUFF_QOS | ||
67 | #define GET_SKBUFF_QOS(skb) 0 | ||
68 | #endif | ||
69 | |||
70 | /** | ||
71 | * Packet transmit | ||
72 | * | ||
73 | * @skb: Packet to send | ||
74 | * @dev: Device info structure | ||
75 | * Returns Always returns zero | ||
76 | */ | ||
77 | int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) | ||
78 | { | ||
79 | cvmx_pko_command_word0_t pko_command; | ||
80 | union cvmx_buf_ptr hw_buffer; | ||
81 | uint64_t old_scratch; | ||
82 | uint64_t old_scratch2; | ||
83 | int dropped; | ||
84 | int qos; | ||
85 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
86 | int32_t in_use; | ||
87 | int32_t buffers_to_free; | ||
88 | #if REUSE_SKBUFFS_WITHOUT_FREE | ||
89 | unsigned char *fpa_head; | ||
90 | #endif | ||
91 | |||
92 | /* | ||
93 | * Prefetch the private data structure. It is larger that one | ||
94 | * cache line. | ||
95 | */ | ||
96 | prefetch(priv); | ||
97 | |||
98 | /* Start off assuming no drop */ | ||
99 | dropped = 0; | ||
100 | |||
101 | /* | ||
102 | * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to | ||
103 | * completely remove "qos" in the event neither interface | ||
104 | * supports multiple queues per port. | ||
105 | */ | ||
106 | if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || | ||
107 | (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { | ||
108 | qos = GET_SKBUFF_QOS(skb); | ||
109 | if (qos <= 0) | ||
110 | qos = 0; | ||
111 | else if (qos >= cvmx_pko_get_num_queues(priv->port)) | ||
112 | qos = 0; | ||
113 | } else | ||
114 | qos = 0; | ||
115 | |||
116 | if (USE_ASYNC_IOBDMA) { | ||
117 | /* Save scratch in case userspace is using it */ | ||
118 | CVMX_SYNCIOBDMA; | ||
119 | old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); | ||
120 | old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); | ||
121 | |||
122 | /* | ||
123 | * Assume we're going to be able t osend this | ||
124 | * packet. Fetch and increment the number of pending | ||
125 | * packets for output. | ||
126 | */ | ||
127 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, | ||
128 | FAU_NUM_PACKET_BUFFERS_TO_FREE, | ||
129 | 0); | ||
130 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, | ||
131 | priv->fau + qos * 4, 1); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * The CN3XXX series of parts has an errata (GMX-401) which | ||
136 | * causes the GMX block to hang if a collision occurs towards | ||
137 | * the end of a <68 byte packet. As a workaround for this, we | ||
138 | * pad packets to be 68 bytes whenever we are in half duplex | ||
139 | * mode. We don't handle the case of having a small packet but | ||
140 | * no room to add the padding. The kernel should always give | ||
141 | * us at least a cache line | ||
142 | */ | ||
143 | if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { | ||
144 | union cvmx_gmxx_prtx_cfg gmx_prt_cfg; | ||
145 | int interface = INTERFACE(priv->port); | ||
146 | int index = INDEX(priv->port); | ||
147 | |||
148 | if (interface < 2) { | ||
149 | /* We only need to pad packet in half duplex mode */ | ||
150 | gmx_prt_cfg.u64 = | ||
151 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); | ||
152 | if (gmx_prt_cfg.s.duplex == 0) { | ||
153 | int add_bytes = 64 - skb->len; | ||
154 | if ((skb_tail_pointer(skb) + add_bytes) <= | ||
155 | skb_end_pointer(skb)) | ||
156 | memset(__skb_put(skb, add_bytes), 0, | ||
157 | add_bytes); | ||
158 | } | ||
159 | } | ||
160 | } | ||
161 | |||
162 | /* Build the PKO buffer pointer */ | ||
163 | hw_buffer.u64 = 0; | ||
164 | hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data); | ||
165 | hw_buffer.s.pool = 0; | ||
166 | hw_buffer.s.size = | ||
167 | (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head; | ||
168 | |||
169 | /* Build the PKO command */ | ||
170 | pko_command.u64 = 0; | ||
171 | pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ | ||
172 | pko_command.s.segs = 1; | ||
173 | pko_command.s.total_bytes = skb->len; | ||
174 | pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; | ||
175 | pko_command.s.subone0 = 1; | ||
176 | |||
177 | pko_command.s.dontfree = 1; | ||
178 | pko_command.s.reg0 = priv->fau + qos * 4; | ||
179 | /* | ||
180 | * See if we can put this skb in the FPA pool. Any strange | ||
181 | * behavior from the Linux networking stack will most likely | ||
182 | * be caused by a bug in the following code. If some field is | ||
183 | * in use by the network stack and get carried over when a | ||
184 | * buffer is reused, bad thing may happen. If in doubt and | ||
185 | * you dont need the absolute best performance, disable the | ||
186 | * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has | ||
187 | * shown a 25% increase in performance under some loads. | ||
188 | */ | ||
189 | #if REUSE_SKBUFFS_WITHOUT_FREE | ||
190 | fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); | ||
191 | if (unlikely(skb->data < fpa_head)) { | ||
192 | /* | ||
193 | * printk("TX buffer beginning can't meet FPA | ||
194 | * alignment constraints\n"); | ||
195 | */ | ||
196 | goto dont_put_skbuff_in_hw; | ||
197 | } | ||
198 | if (unlikely | ||
199 | ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { | ||
200 | /* | ||
201 | printk("TX buffer isn't large enough for the FPA\n"); | ||
202 | */ | ||
203 | goto dont_put_skbuff_in_hw; | ||
204 | } | ||
205 | if (unlikely(skb_shared(skb))) { | ||
206 | /* | ||
207 | printk("TX buffer sharing data with someone else\n"); | ||
208 | */ | ||
209 | goto dont_put_skbuff_in_hw; | ||
210 | } | ||
211 | if (unlikely(skb_cloned(skb))) { | ||
212 | /* | ||
213 | printk("TX buffer has been cloned\n"); | ||
214 | */ | ||
215 | goto dont_put_skbuff_in_hw; | ||
216 | } | ||
217 | if (unlikely(skb_header_cloned(skb))) { | ||
218 | /* | ||
219 | printk("TX buffer header has been cloned\n"); | ||
220 | */ | ||
221 | goto dont_put_skbuff_in_hw; | ||
222 | } | ||
223 | if (unlikely(skb->destructor)) { | ||
224 | /* | ||
225 | printk("TX buffer has a destructor\n"); | ||
226 | */ | ||
227 | goto dont_put_skbuff_in_hw; | ||
228 | } | ||
229 | if (unlikely(skb_shinfo(skb)->nr_frags)) { | ||
230 | /* | ||
231 | printk("TX buffer has fragments\n"); | ||
232 | */ | ||
233 | goto dont_put_skbuff_in_hw; | ||
234 | } | ||
235 | if (unlikely | ||
236 | (skb->truesize != | ||
237 | sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { | ||
238 | /* | ||
239 | printk("TX buffer truesize has been changed\n"); | ||
240 | */ | ||
241 | goto dont_put_skbuff_in_hw; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * We can use this buffer in the FPA. We don't need the FAU | ||
246 | * update anymore | ||
247 | */ | ||
248 | pko_command.s.reg0 = 0; | ||
249 | pko_command.s.dontfree = 0; | ||
250 | |||
251 | hw_buffer.s.back = (skb->data - fpa_head) >> 7; | ||
252 | *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; | ||
253 | |||
254 | /* | ||
255 | * The skbuff will be reused without ever being freed. We must | ||
256 | * cleanup a bunch of Linux stuff. | ||
257 | */ | ||
258 | dst_release(skb->dst); | ||
259 | skb->dst = NULL; | ||
260 | #ifdef CONFIG_XFRM | ||
261 | secpath_put(skb->sp); | ||
262 | skb->sp = NULL; | ||
263 | #endif | ||
264 | nf_reset(skb); | ||
265 | |||
266 | #ifdef CONFIG_NET_SCHED | ||
267 | skb->tc_index = 0; | ||
268 | #ifdef CONFIG_NET_CLS_ACT | ||
269 | skb->tc_verd = 0; | ||
270 | #endif /* CONFIG_NET_CLS_ACT */ | ||
271 | #endif /* CONFIG_NET_SCHED */ | ||
272 | |||
273 | dont_put_skbuff_in_hw: | ||
274 | #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ | ||
275 | |||
276 | /* Check if we can use the hardware checksumming */ | ||
277 | if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && | ||
278 | (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && | ||
279 | ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) | ||
280 | && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) | ||
281 | || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { | ||
282 | /* Use hardware checksum calc */ | ||
283 | pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; | ||
284 | } | ||
285 | |||
286 | if (USE_ASYNC_IOBDMA) { | ||
287 | /* Get the number of skbuffs in use by the hardware */ | ||
288 | CVMX_SYNCIOBDMA; | ||
289 | in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH); | ||
290 | buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); | ||
291 | } else { | ||
292 | /* Get the number of skbuffs in use by the hardware */ | ||
293 | in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1); | ||
294 | buffers_to_free = | ||
295 | cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * If we're sending faster than the receive can free them then | ||
300 | * don't do the HW free. | ||
301 | */ | ||
302 | if ((buffers_to_free < -100) && !pko_command.s.dontfree) { | ||
303 | pko_command.s.dontfree = 1; | ||
304 | pko_command.s.reg0 = priv->fau + qos * 4; | ||
305 | } | ||
306 | |||
307 | cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, | ||
308 | CVMX_PKO_LOCK_CMD_QUEUE); | ||
309 | |||
310 | /* Drop this packet if we have too many already queued to the HW */ | ||
311 | if (unlikely | ||
312 | (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { | ||
313 | /* | ||
314 | DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); | ||
315 | */ | ||
316 | dropped = 1; | ||
317 | } | ||
318 | /* Send the packet to the output queue */ | ||
319 | else if (unlikely | ||
320 | (cvmx_pko_send_packet_finish | ||
321 | (priv->port, priv->queue + qos, pko_command, hw_buffer, | ||
322 | CVMX_PKO_LOCK_CMD_QUEUE))) { | ||
323 | DEBUGPRINT("%s: Failed to send the packet\n", dev->name); | ||
324 | dropped = 1; | ||
325 | } | ||
326 | |||
327 | if (USE_ASYNC_IOBDMA) { | ||
328 | /* Restore the scratch area */ | ||
329 | cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); | ||
330 | cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); | ||
331 | } | ||
332 | |||
333 | if (unlikely(dropped)) { | ||
334 | dev_kfree_skb_any(skb); | ||
335 | cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); | ||
336 | priv->stats.tx_dropped++; | ||
337 | } else { | ||
338 | if (USE_SKBUFFS_IN_HW) { | ||
339 | /* Put this packet on the queue to be freed later */ | ||
340 | if (pko_command.s.dontfree) | ||
341 | skb_queue_tail(&priv->tx_free_list[qos], skb); | ||
342 | else { | ||
343 | cvmx_fau_atomic_add32 | ||
344 | (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); | ||
345 | cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); | ||
346 | } | ||
347 | } else { | ||
348 | /* Put this packet on the queue to be freed later */ | ||
349 | skb_queue_tail(&priv->tx_free_list[qos], skb); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /* Free skbuffs not in use by the hardware, possibly two at a time */ | ||
354 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) { | ||
355 | spin_lock(&priv->tx_free_list[qos].lock); | ||
356 | /* | ||
357 | * Check again now that we have the lock. It might | ||
358 | * have changed. | ||
359 | */ | ||
360 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) | ||
361 | dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); | ||
362 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) | ||
363 | dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); | ||
364 | spin_unlock(&priv->tx_free_list[qos].lock); | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * Packet transmit to the POW | ||
372 | * | ||
373 | * @skb: Packet to send | ||
374 | * @dev: Device info structure | ||
375 | * Returns Always returns zero | ||
376 | */ | ||
377 | int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) | ||
378 | { | ||
379 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
380 | void *packet_buffer; | ||
381 | void *copy_location; | ||
382 | |||
383 | /* Get a work queue entry */ | ||
384 | cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); | ||
385 | if (unlikely(work == NULL)) { | ||
386 | DEBUGPRINT("%s: Failed to allocate a work queue entry\n", | ||
387 | dev->name); | ||
388 | priv->stats.tx_dropped++; | ||
389 | dev_kfree_skb(skb); | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | /* Get a packet buffer */ | ||
394 | packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); | ||
395 | if (unlikely(packet_buffer == NULL)) { | ||
396 | DEBUGPRINT("%s: Failed to allocate a packet buffer\n", | ||
397 | dev->name); | ||
398 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); | ||
399 | priv->stats.tx_dropped++; | ||
400 | dev_kfree_skb(skb); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Calculate where we need to copy the data to. We need to | ||
406 | * leave 8 bytes for a next pointer (unused). We also need to | ||
407 | * include any configure skip. Then we need to align the IP | ||
408 | * packet src and dest into the same 64bit word. The below | ||
409 | * calculation may add a little extra, but that doesn't | ||
410 | * hurt. | ||
411 | */ | ||
412 | copy_location = packet_buffer + sizeof(uint64_t); | ||
413 | copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; | ||
414 | |||
415 | /* | ||
416 | * We have to copy the packet since whoever processes this | ||
417 | * packet will free it to a hardware pool. We can't use the | ||
418 | * trick of counting outstanding packets like in | ||
419 | * cvm_oct_xmit. | ||
420 | */ | ||
421 | memcpy(copy_location, skb->data, skb->len); | ||
422 | |||
423 | /* | ||
424 | * Fill in some of the work queue fields. We may need to add | ||
425 | * more if the software at the other end needs them. | ||
426 | */ | ||
427 | work->hw_chksum = skb->csum; | ||
428 | work->len = skb->len; | ||
429 | work->ipprt = priv->port; | ||
430 | work->qos = priv->port & 0x7; | ||
431 | work->grp = pow_send_group; | ||
432 | work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; | ||
433 | work->tag = pow_send_group; /* FIXME */ | ||
434 | /* Default to zero. Sets of zero later are commented out */ | ||
435 | work->word2.u64 = 0; | ||
436 | work->word2.s.bufs = 1; | ||
437 | work->packet_ptr.u64 = 0; | ||
438 | work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); | ||
439 | work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; | ||
440 | work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; | ||
441 | work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; | ||
442 | |||
443 | if (skb->protocol == htons(ETH_P_IP)) { | ||
444 | work->word2.s.ip_offset = 14; | ||
445 | #if 0 | ||
446 | work->word2.s.vlan_valid = 0; /* FIXME */ | ||
447 | work->word2.s.vlan_cfi = 0; /* FIXME */ | ||
448 | work->word2.s.vlan_id = 0; /* FIXME */ | ||
449 | work->word2.s.dec_ipcomp = 0; /* FIXME */ | ||
450 | #endif | ||
451 | work->word2.s.tcp_or_udp = | ||
452 | (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) | ||
453 | || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); | ||
454 | #if 0 | ||
455 | /* FIXME */ | ||
456 | work->word2.s.dec_ipsec = 0; | ||
457 | /* We only support IPv4 right now */ | ||
458 | work->word2.s.is_v6 = 0; | ||
459 | /* Hardware would set to zero */ | ||
460 | work->word2.s.software = 0; | ||
461 | /* No error, packet is internal */ | ||
462 | work->word2.s.L4_error = 0; | ||
463 | #endif | ||
464 | work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) | ||
465 | || (ip_hdr(skb)->frag_off == | ||
466 | 1 << 14)); | ||
467 | #if 0 | ||
468 | /* Assume Linux is sending a good packet */ | ||
469 | work->word2.s.IP_exc = 0; | ||
470 | #endif | ||
471 | work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); | ||
472 | work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); | ||
473 | #if 0 | ||
474 | /* This is an IP packet */ | ||
475 | work->word2.s.not_IP = 0; | ||
476 | /* No error, packet is internal */ | ||
477 | work->word2.s.rcv_error = 0; | ||
478 | /* No error, packet is internal */ | ||
479 | work->word2.s.err_code = 0; | ||
480 | #endif | ||
481 | |||
482 | /* | ||
483 | * When copying the data, include 4 bytes of the | ||
484 | * ethernet header to align the same way hardware | ||
485 | * does. | ||
486 | */ | ||
487 | memcpy(work->packet_data, skb->data + 10, | ||
488 | sizeof(work->packet_data)); | ||
489 | } else { | ||
490 | #if 0 | ||
491 | work->word2.snoip.vlan_valid = 0; /* FIXME */ | ||
492 | work->word2.snoip.vlan_cfi = 0; /* FIXME */ | ||
493 | work->word2.snoip.vlan_id = 0; /* FIXME */ | ||
494 | work->word2.snoip.software = 0; /* Hardware would set to zero */ | ||
495 | #endif | ||
496 | work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); | ||
497 | work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); | ||
498 | work->word2.snoip.is_bcast = | ||
499 | (skb->pkt_type == PACKET_BROADCAST); | ||
500 | work->word2.snoip.is_mcast = | ||
501 | (skb->pkt_type == PACKET_MULTICAST); | ||
502 | work->word2.snoip.not_IP = 1; /* IP was done up above */ | ||
503 | #if 0 | ||
504 | /* No error, packet is internal */ | ||
505 | work->word2.snoip.rcv_error = 0; | ||
506 | /* No error, packet is internal */ | ||
507 | work->word2.snoip.err_code = 0; | ||
508 | #endif | ||
509 | memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); | ||
510 | } | ||
511 | |||
512 | /* Submit the packet to the POW */ | ||
513 | cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, | ||
514 | work->grp); | ||
515 | priv->stats.tx_packets++; | ||
516 | priv->stats.tx_bytes += skb->len; | ||
517 | dev_kfree_skb(skb); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * Transmit a work queue entry out of the ethernet port. Both | ||
523 | * the work queue entry and the packet data can optionally be | ||
524 | * freed. The work will be freed on error as well. | ||
525 | * | ||
526 | * @dev: Device to transmit out. | ||
527 | * @work_queue_entry: | ||
528 | * Work queue entry to send | ||
529 | * @do_free: True if the work queue entry and packet data should be | ||
530 | * freed. If false, neither will be freed. | ||
531 | * @qos: Index into the queues for this port to transmit on. This | ||
532 | * is used to implement QoS if their are multiple queues per | ||
533 | * port. This parameter must be between 0 and the number of | ||
534 | * queues per port minus 1. Values outside of this range will | ||
535 | * be change to zero. | ||
536 | * | ||
537 | * Returns Zero on success, negative on failure. | ||
538 | */ | ||
539 | int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, | ||
540 | int do_free, int qos) | ||
541 | { | ||
542 | unsigned long flags; | ||
543 | union cvmx_buf_ptr hw_buffer; | ||
544 | cvmx_pko_command_word0_t pko_command; | ||
545 | int dropped; | ||
546 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
547 | cvmx_wqe_t *work = work_queue_entry; | ||
548 | |||
549 | if (!(dev->flags & IFF_UP)) { | ||
550 | DEBUGPRINT("%s: Device not up\n", dev->name); | ||
551 | if (do_free) | ||
552 | cvm_oct_free_work(work); | ||
553 | return -1; | ||
554 | } | ||
555 | |||
556 | /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely | ||
557 | remove "qos" in the event neither interface supports | ||
558 | multiple queues per port */ | ||
559 | if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || | ||
560 | (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { | ||
561 | if (qos <= 0) | ||
562 | qos = 0; | ||
563 | else if (qos >= cvmx_pko_get_num_queues(priv->port)) | ||
564 | qos = 0; | ||
565 | } else | ||
566 | qos = 0; | ||
567 | |||
568 | /* Start off assuming no drop */ | ||
569 | dropped = 0; | ||
570 | |||
571 | local_irq_save(flags); | ||
572 | cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, | ||
573 | CVMX_PKO_LOCK_CMD_QUEUE); | ||
574 | |||
575 | /* Build the PKO buffer pointer */ | ||
576 | hw_buffer.u64 = 0; | ||
577 | hw_buffer.s.addr = work->packet_ptr.s.addr; | ||
578 | hw_buffer.s.pool = CVMX_FPA_PACKET_POOL; | ||
579 | hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE; | ||
580 | hw_buffer.s.back = work->packet_ptr.s.back; | ||
581 | |||
582 | /* Build the PKO command */ | ||
583 | pko_command.u64 = 0; | ||
584 | pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ | ||
585 | pko_command.s.dontfree = !do_free; | ||
586 | pko_command.s.segs = work->word2.s.bufs; | ||
587 | pko_command.s.total_bytes = work->len; | ||
588 | |||
589 | /* Check if we can use the hardware checksumming */ | ||
590 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) | ||
591 | pko_command.s.ipoffp1 = 0; | ||
592 | else | ||
593 | pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; | ||
594 | |||
595 | /* Send the packet to the output queue */ | ||
596 | if (unlikely | ||
597 | (cvmx_pko_send_packet_finish | ||
598 | (priv->port, priv->queue + qos, pko_command, hw_buffer, | ||
599 | CVMX_PKO_LOCK_CMD_QUEUE))) { | ||
600 | DEBUGPRINT("%s: Failed to send the packet\n", dev->name); | ||
601 | dropped = -1; | ||
602 | } | ||
603 | local_irq_restore(flags); | ||
604 | |||
605 | if (unlikely(dropped)) { | ||
606 | if (do_free) | ||
607 | cvm_oct_free_work(work); | ||
608 | priv->stats.tx_dropped++; | ||
609 | } else if (do_free) | ||
610 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); | ||
611 | |||
612 | return dropped; | ||
613 | } | ||
614 | EXPORT_SYMBOL(cvm_oct_transmit_qos); | ||
615 | |||
616 | /** | ||
617 | * This function frees all skb that are currenty queued for TX. | ||
618 | * | ||
619 | * @dev: Device being shutdown | ||
620 | */ | ||
621 | void cvm_oct_tx_shutdown(struct net_device *dev) | ||
622 | { | ||
623 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
624 | unsigned long flags; | ||
625 | int qos; | ||
626 | |||
627 | for (qos = 0; qos < 16; qos++) { | ||
628 | spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); | ||
629 | while (skb_queue_len(&priv->tx_free_list[qos])) | ||
630 | dev_kfree_skb_any(__skb_dequeue | ||
631 | (&priv->tx_free_list[qos])); | ||
632 | spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); | ||
633 | } | ||
634 | } | ||