From 986ce4510b37ba684aaebd3eb0e6ee0c03906ed8 Mon Sep 17 00:00:00 2001 From: Pete Eberlein Date: Wed, 13 Jan 2010 19:15:48 -0300 Subject: V4L/DVB: s2250: Fix write_reg i2c address The kernel i2c model uses right-aligned 7-bit i2c addresses, but the 2250 firmware uses an 8-bit address in the usb vendor request. A previous patch by Jean Delvare shifted the i2c addresses 1 bit to the right, and this patch fixes the write_reg function to shift it back before sending the vendor request. To unsubscribe from this list: send the line "unsubscribe linux-media" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Signed-off-by: Pete Eberlein Signed-off-by: Mauro Carvalho Chehab --- drivers/staging/go7007/s2250-board.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c index 8cf7f2750b3f..c324f6ea002b 100644 --- a/drivers/staging/go7007/s2250-board.c +++ b/drivers/staging/go7007/s2250-board.c @@ -159,7 +159,7 @@ static int write_reg(struct i2c_client *client, u8 reg, u8 value) struct go7007 *go = i2c_get_adapdata(client->adapter); struct go7007_usb *usb; int rc; - int dev_addr = client->addr; + int dev_addr = client->addr << 1; /* firmware wants 8-bit address */ u8 *buf; if (go == NULL) -- cgit v1.2.2 From 6568a234363978e1aebb5b7c9840ed87eed20362 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Jan 2010 11:05:01 -0800 Subject: Staging: Octeon Ethernet: Remove unused code. Remove unused code, reindent, and join some spilt strings. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/842/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-defines.h | 10 ---- drivers/staging/octeon/ethernet-mem.c | 81 ++++++++----------------- drivers/staging/octeon/ethernet-rx.c | 77 ++++++++---------------- drivers/staging/octeon/ethernet-tx.c | 99 ------------------------------- drivers/staging/octeon/ethernet.c | 31 ++-------- drivers/staging/octeon/octeon-ethernet.h | 41 ------------- 6 files changed, 55 insertions(+), 284 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index f13131b03c33..6b8065f594bf 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -41,9 +41,6 @@ * Tells the driver to populate the packet buffers with kernel skbuffs. * This allows the driver to receive packets without copying them. It also * means that 32bit userspace can't access the packet buffers. - * USE_32BIT_SHARED - * This define tells the driver to allocate memory for buffers from the - * 32bit sahred region instead of the kernel memory space. * USE_HW_TCPUDP_CHECKSUM * Controls if the Octeon TCP/UDP checksum engine is used for packet * output. If this is zero, the kernel will perform the checksum in @@ -75,19 +72,12 @@ #define CONFIG_CAVIUM_RESERVE32 0 #endif -#if CONFIG_CAVIUM_RESERVE32 -#define USE_32BIT_SHARED 1 -#define USE_SKBUFFS_IN_HW 0 -#define REUSE_SKBUFFS_WITHOUT_FREE 0 -#else -#define USE_32BIT_SHARED 0 #define USE_SKBUFFS_IN_HW 1 #ifdef CONFIG_NETFILTER #define REUSE_SKBUFFS_WITHOUT_FREE 0 #else #define REUSE_SKBUFFS_WITHOUT_FREE 1 #endif -#endif /* Max interrupts per second per core */ #define INTERRUPT_LIMIT 10000 diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index b595903e2af1..7090521471b2 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -26,8 +26,6 @@ **********************************************************************/ #include #include -#include -#include #include @@ -107,42 +105,17 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) char *memory; int freed = elements; - if (USE_32BIT_SHARED) { - extern uint64_t octeon_reserve32_memory; - - memory = - cvmx_bootmem_alloc_range(elements * size, 128, - octeon_reserve32_memory, - octeon_reserve32_memory + - (CONFIG_CAVIUM_RESERVE32 << 20) - - 1); - if (memory == NULL) - panic("Unable to allocate %u bytes for FPA pool %d\n", - elements * size, pool); - - pr_notice("Memory range %p - %p reserved for " - "hardware\n", memory, - memory + elements * size - 1); - - while (freed) { - cvmx_fpa_free(memory, pool, 0); - memory += size; - freed--; - } - } else { - while (freed) { - /* We need to force alignment to 128 bytes here */ - memory = kmalloc(size + 127, GFP_ATOMIC); - if (unlikely(memory == NULL)) { - pr_warning("Unable to allocate %u bytes for " - "FPA pool %d\n", - elements * size, pool); - break; - } - memory = (char *)(((unsigned long)memory + 127) & -128); - cvmx_fpa_free(memory, pool, 0); - freed--; + while (freed) { + /* We need to force alignment to 128 bytes here */ + memory = kmalloc(size + 127, GFP_ATOMIC); + if (unlikely(memory == NULL)) { + pr_warning("Unable to allocate %u bytes for FPA pool %d\n", + elements * size, pool); + break; } + memory = (char *)(((unsigned long)memory + 127) & -128); + cvmx_fpa_free(memory, pool, 0); + freed--; } return elements - freed; } @@ -156,27 +129,21 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) */ static void cvm_oct_free_hw_memory(int pool, int size, int elements) { - if (USE_32BIT_SHARED) { - pr_warning("Warning: 32 shared memory is not freeable\n"); - } else { - char *memory; - do { - memory = cvmx_fpa_alloc(pool); - if (memory) { - elements--; - kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); - } - } while (memory); + char *memory; + do { + memory = cvmx_fpa_alloc(pool); + if (memory) { + elements--; + kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); + } + } while (memory); - if (elements < 0) - pr_warning("Freeing of pool %u had too many " - "buffers (%d)\n", - pool, elements); - else if (elements > 0) - pr_warning("Warning: Freeing of pool %u is " - "missing %d buffers\n", - pool, elements); - } + if (elements < 0) + pr_warning("Freeing of pool %u had too many buffers (%d)\n", + pool, elements); + else if (elements > 0) + pr_warning("Warning: Freeing of pool %u is missing %d buffers\n", + pool, elements); } int cvm_oct_mem_fill_fpa(int pool, int size, int elements) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1b237b7e689d..f63459a96dad 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -33,10 +33,6 @@ #include #include #include -#include -#include -#include -#include #include #ifdef CONFIG_XFRM #include @@ -292,39 +288,27 @@ void cvm_oct_tasklet_rx(unsigned long unused) * buffer. */ if (likely(skb_in_hw)) { - /* - * This calculation was changed in case the - * skb header is using a different address - * aliasing type than the buffer. It doesn't - * make any differnece now, but the new one is - * more correct. - */ - skb->data = - skb->head + work->packet_ptr.s.addr - - cvmx_ptr_to_phys(skb->head); + skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); prefetch(skb->data); skb->len = work->len; skb_set_tail_pointer(skb, skb->len); packet_not_copied = 1; } else { - /* * We have to copy the packet. First allocate * an skbuff for it. */ skb = dev_alloc_skb(work->len); if (!skb) { - DEBUGPRINT("Port %d failed to allocate " - "skbuff, packet dropped\n", - work->ipprt); + DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n", + work->ipprt); cvm_oct_free_work(work); continue; } /* * Check if we've received a packet that was - * entirely stored in the work entry. This is - * untested. + * entirely stored in the work entry. */ if (unlikely(work->word2.s.bufs == 0)) { uint8_t *ptr = work->packet_data; @@ -343,15 +327,13 @@ void cvm_oct_tasklet_rx(unsigned long unused) /* No packet buffers to free */ } else { int segments = work->word2.s.bufs; - union cvmx_buf_ptr segment_ptr = - work->packet_ptr; + union cvmx_buf_ptr segment_ptr = work->packet_ptr; int len = work->len; while (segments--) { union cvmx_buf_ptr next_ptr = - *(union cvmx_buf_ptr *) - cvmx_phys_to_ptr(segment_ptr.s. - addr - 8); + *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); + /* * Octeon Errata PKI-100: The segment size is * wrong. Until it is fixed, calculate the @@ -361,22 +343,18 @@ void cvm_oct_tasklet_rx(unsigned long unused) * one: int segment_size = * segment_ptr.s.size; */ - int segment_size = - CVMX_FPA_PACKET_POOL_SIZE - - (segment_ptr.s.addr - - (((segment_ptr.s.addr >> 7) - - segment_ptr.s.back) << 7)); - /* Don't copy more than what is left - in the packet */ + int segment_size = CVMX_FPA_PACKET_POOL_SIZE - + (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); + /* + * Don't copy more than what + * is left in the packet. + */ if (segment_size > len) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), - cvmx_phys_to_ptr(segment_ptr.s. - addr), + cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size); - /* Reduce the amount of bytes left - to copy */ len -= segment_size; segment_ptr = next_ptr; } @@ -389,16 +367,15 @@ void cvm_oct_tasklet_rx(unsigned long unused) struct net_device *dev = cvm_oct_device[work->ipprt]; struct octeon_ethernet *priv = netdev_priv(dev); - /* Only accept packets for devices - that are currently up */ + /* + * Only accept packets for devices that are + * currently up. + */ if (likely(dev->flags & IFF_UP)) { skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; - if (unlikely - (work->word2.s.not_IP - || work->word2.s.IP_exc - || work->word2.s.L4_error)) + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -415,14 +392,11 @@ void cvm_oct_tasklet_rx(unsigned long unused) } netif_receive_skb(skb); } else { + /* Drop any packet received for a device that isn't up */ /* - * Drop any packet received for a - * device that isn't up. - */ - /* - DEBUGPRINT("%s: Device not up, packet dropped\n", - dev->name); - */ + DEBUGPRINT("%s: Device not up, packet dropped\n", + dev->name); + */ #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); #else @@ -435,9 +409,8 @@ void cvm_oct_tasklet_rx(unsigned long unused) * Drop any packet received for a device that * doesn't exist. */ - DEBUGPRINT("Port %d not controlled by Linux, packet " - "dropped\n", - work->ipprt); + DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n", + work->ipprt); dev_kfree_skb_irq(skb); } /* diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 535294105f65..a3594bb0a45d 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -31,10 +31,6 @@ #include #include #include -#include -#include -#include -#include #include #ifdef CONFIG_XFRM #include @@ -528,101 +524,6 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) return 0; } -/** - * Transmit a work queue entry out of the ethernet port. Both - * the work queue entry and the packet data can optionally be - * freed. The work will be freed on error as well. - * - * @dev: Device to transmit out. - * @work_queue_entry: - * Work queue entry to send - * @do_free: True if the work queue entry and packet data should be - * freed. If false, neither will be freed. - * @qos: Index into the queues for this port to transmit on. This - * is used to implement QoS if their are multiple queues per - * port. This parameter must be between 0 and the number of - * queues per port minus 1. Values outside of this range will - * be change to zero. - * - * Returns Zero on success, negative on failure. - */ -int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, - int do_free, int qos) -{ - unsigned long flags; - union cvmx_buf_ptr hw_buffer; - cvmx_pko_command_word0_t pko_command; - int dropped; - struct octeon_ethernet *priv = netdev_priv(dev); - cvmx_wqe_t *work = work_queue_entry; - - if (!(dev->flags & IFF_UP)) { - DEBUGPRINT("%s: Device not up\n", dev->name); - if (do_free) - cvm_oct_free_work(work); - return -1; - } - - /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely - remove "qos" in the event neither interface supports - multiple queues per port */ - if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || - (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { - if (qos <= 0) - qos = 0; - else if (qos >= cvmx_pko_get_num_queues(priv->port)) - qos = 0; - } else - qos = 0; - - /* Start off assuming no drop */ - dropped = 0; - - local_irq_save(flags); - cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, - CVMX_PKO_LOCK_CMD_QUEUE); - - /* Build the PKO buffer pointer */ - hw_buffer.u64 = 0; - hw_buffer.s.addr = work->packet_ptr.s.addr; - hw_buffer.s.pool = CVMX_FPA_PACKET_POOL; - hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE; - hw_buffer.s.back = work->packet_ptr.s.back; - - /* Build the PKO command */ - pko_command.u64 = 0; - pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ - pko_command.s.dontfree = !do_free; - pko_command.s.segs = work->word2.s.bufs; - pko_command.s.total_bytes = work->len; - - /* Check if we can use the hardware checksumming */ - if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) - pko_command.s.ipoffp1 = 0; - else - pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; - - /* Send the packet to the output queue */ - if (unlikely - (cvmx_pko_send_packet_finish - (priv->port, priv->queue + qos, pko_command, hw_buffer, - CVMX_PKO_LOCK_CMD_QUEUE))) { - DEBUGPRINT("%s: Failed to send the packet\n", dev->name); - dropped = -1; - } - local_irq_restore(flags); - - if (unlikely(dropped)) { - if (do_free) - cvm_oct_free_work(work); - priv->stats.tx_dropped++; - } else if (do_free) - cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); - - return dropped; -} -EXPORT_SYMBOL(cvm_oct_transmit_qos); - /** * This function frees all skb that are currently queued for TX. * diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 4cfd4b136b32..4e054262a005 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -104,14 +104,6 @@ MODULE_PARM_DESC(pow_send_list, "\n" "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" "\tusing the pow_send_group."); -static int disable_core_queueing = 1; -module_param(disable_core_queueing, int, 0444); -MODULE_PARM_DESC(disable_core_queueing, "\n" - "\tWhen set the networking core's tx_queue_len is set to zero. This\n" - "\tallows packets to be sent without lock contention in the packet\n" - "\tscheduler resulting in some cases in improved throughput.\n"); - - /* * The offset from mac_addr_base that should be used for the next port * that is configured. By convention, if any mgmt ports exist on the @@ -205,10 +197,6 @@ static __init void cvm_oct_configure_common_hw(void) cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); - /* Enable the MII interface */ - if (!octeon_is_simulation()) - cvmx_write_csr(CVMX_SMIX_EN(0), 1); - /* Register an IRQ hander for to receive POW interrupts */ r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet", @@ -689,7 +677,6 @@ static int __init cvm_oct_init_module(void) if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); - memset(priv, 0, sizeof(struct octeon_ethernet)); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; @@ -700,19 +687,16 @@ static int __init cvm_oct_init_module(void) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { - pr_err("Failed to register ethernet " - "device for POW\n"); + pr_err("Failed to register ethernet device for POW\n"); kfree(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; - pr_info("%s: POW send group %d, receive " - "group %d\n", - dev->name, pow_send_group, - pow_receive_group); + pr_info("%s: POW send group %d, receive group %d\n", + dev->name, pow_send_group, + pow_receive_group); } } else { - pr_err("Failed to allocate ethernet device " - "for POW\n"); + pr_err("Failed to allocate ethernet device for POW\n"); } } @@ -730,12 +714,9 @@ static int __init cvm_oct_init_module(void) struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { - pr_err("Failed to allocate ethernet device " - "for port %d\n", port); + pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } - if (disable_core_queueing) - dev->tx_queue_len = 0; /* Initialize the device private structure. */ priv = netdev_priv(dev); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 402a15b9bb0e..208da27bc02d 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -68,47 +68,6 @@ struct octeon_ethernet { */ int cvm_oct_free_work(void *work_queue_entry); -/** - * Transmit a work queue entry out of the ethernet port. Both - * the work queue entry and the packet data can optionally be - * freed. The work will be freed on error as well. - * - * @dev: Device to transmit out. - * @work_queue_entry: - * Work queue entry to send - * @do_free: True if the work queue entry and packet data should be - * freed. If false, neither will be freed. - * @qos: Index into the queues for this port to transmit on. This - * is used to implement QoS if their are multiple queues per - * port. This parameter must be between 0 and the number of - * queues per port minus 1. Values outside of this range will - * be change to zero. - * - * Returns Zero on success, negative on failure. - */ -int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, - int do_free, int qos); - -/** - * Transmit a work queue entry out of the ethernet port. Both - * the work queue entry and the packet data can optionally be - * freed. The work will be freed on error as well. This simply - * wraps cvmx_oct_transmit_qos() for backwards compatability. - * - * @dev: Device to transmit out. - * @work_queue_entry: - * Work queue entry to send - * @do_free: True if the work queue entry and packet data should be - * freed. If false, neither will be freed. - * - * Returns Zero on success, negative on failure. - */ -static inline int cvm_oct_transmit(struct net_device *dev, - void *work_queue_entry, int do_free) -{ - return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0); -} - extern int cvm_oct_rgmii_init(struct net_device *dev); extern void cvm_oct_rgmii_uninit(struct net_device *dev); extern int cvm_oct_rgmii_open(struct net_device *dev); -- cgit v1.2.2 From 166bdaa9aad9903bf4330ef68feb37f220c9eac8 Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 27 Jan 2010 13:22:53 -0800 Subject: Staging: Octeon Ethernet: Fix memory allocation. After aligning the blocks returned by kmalloc, we need to save the original pointer so they can be correctly freed. There are no guarantees about the alignment of SKB data, so we need to handle worst case alignment. Since right shifts over subtraction have no distributive property, we need to fix the back pointer calculation. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/884/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-mem.c | 45 ++++++++++++++++++++++------------- drivers/staging/octeon/ethernet-tx.c | 6 ++--- 2 files changed, 31 insertions(+), 20 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index 7090521471b2..53ed2f7ffdfd 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -4,7 +4,7 @@ * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * - * Copyright (c) 2003-2007 Cavium Networks + * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -45,7 +45,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) int freed = elements; while (freed) { - struct sk_buff *skb = dev_alloc_skb(size + 128); + struct sk_buff *skb = dev_alloc_skb(size + 256); if (unlikely(skb == NULL)) { pr_warning ("Failed to allocate skb for hardware pool %d\n", @@ -53,7 +53,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) break; } - skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f)); + skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); freed--; @@ -91,10 +91,7 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) } /** - * This function fills a hardware pool with memory. Depending - * on the config defines, this memory might come from the - * kernel or global 32bit memory allocated with - * cvmx_bootmem_alloc. + * This function fills a hardware pool with memory. * * @pool: Pool to populate * @size: Size of each buffer in the pool @@ -103,18 +100,29 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) static int cvm_oct_fill_hw_memory(int pool, int size, int elements) { char *memory; + char *fpa; int freed = elements; while (freed) { - /* We need to force alignment to 128 bytes here */ - memory = kmalloc(size + 127, GFP_ATOMIC); + /* + * FPA memory must be 128 byte aligned. Since we are + * aligning we need to save the original pointer so we + * can feed it to kfree when the memory is returned to + * the kernel. + * + * We allocate an extra 256 bytes to allow for + * alignment and space for the original pointer saved + * just before the block. + */ + memory = kmalloc(size + 256, GFP_ATOMIC); if (unlikely(memory == NULL)) { pr_warning("Unable to allocate %u bytes for FPA pool %d\n", elements * size, pool); break; } - memory = (char *)(((unsigned long)memory + 127) & -128); - cvmx_fpa_free(memory, pool, 0); + fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); + *((char **)fpa - 1) = memory; + cvmx_fpa_free(fpa, pool, 0); freed--; } return elements - freed; @@ -130,13 +138,16 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) static void cvm_oct_free_hw_memory(int pool, int size, int elements) { char *memory; + char *fpa; do { - memory = cvmx_fpa_alloc(pool); - if (memory) { + fpa = cvmx_fpa_alloc(pool); + if (fpa) { elements--; - kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); + fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa)); + memory = *((char **)fpa - 1); + kfree(memory); } - } while (memory); + } while (fpa); if (elements < 0) pr_warning("Freeing of pool %u had too many buffers (%d)\n", @@ -149,7 +160,7 @@ static void cvm_oct_free_hw_memory(int pool, int size, int elements) int cvm_oct_mem_fill_fpa(int pool, int size, int elements) { int freed; - if (USE_SKBUFFS_IN_HW) + if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL) freed = cvm_oct_fill_hw_skbuff(pool, size, elements); else freed = cvm_oct_fill_hw_memory(pool, size, elements); @@ -158,7 +169,7 @@ int cvm_oct_mem_fill_fpa(int pool, int size, int elements) void cvm_oct_mem_empty_fpa(int pool, int size, int elements) { - if (USE_SKBUFFS_IN_HW) + if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL) cvm_oct_free_hw_skbuff(pool, size, elements); else cvm_oct_free_hw_memory(pool, size, elements); diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index a3594bb0a45d..e5695d964d9a 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -4,7 +4,7 @@ * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * - * Copyright (c) 2003-2007 Cavium Networks + * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -186,7 +186,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) * shown a 25% increase in performance under some loads. */ #if REUSE_SKBUFFS_WITHOUT_FREE - fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); + fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); if (unlikely(skb->data < fpa_head)) { /* * printk("TX buffer beginning can't meet FPA @@ -247,7 +247,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) pko_command.s.reg0 = 0; pko_command.s.dontfree = 0; - hw_buffer.s.back = (skb->data - fpa_head) >> 7; + hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; /* -- cgit v1.2.2 From 6888fc87768eaa218b6244f2e78c55416706981a Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Jan 2010 11:05:03 -0800 Subject: Staging: Octeon Ethernet: Rewrite transmit code. Stop the queue if too many packets are queued. Restart it from a high resolution timer. Rearrange and simplify locking and SKB freeing code Signed-off-by: David Daney To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/843/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/Kconfig | 1 + drivers/staging/octeon/ethernet-tx.c | 172 ++++++++++++++++++++----------- drivers/staging/octeon/ethernet-tx.h | 27 +---- drivers/staging/octeon/ethernet.c | 69 ++++++------- drivers/staging/octeon/octeon-ethernet.h | 4 + 5 files changed, 150 insertions(+), 123 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 638ad6b35891..579b8f129e6e 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -3,6 +3,7 @@ config OCTEON_ETHERNET depends on CPU_CAVIUM_OCTEON select PHYLIB select MDIO_OCTEON + select HIGH_RES_TIMERS help This driver supports the builtin ethernet ports on Cavium Networks' products in the Octeon family. This driver supports the diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index e5695d964d9a..05b58f8b58fd 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -64,6 +64,49 @@ #define GET_SKBUFF_QOS(skb) 0 #endif + +static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) +{ + int32_t undo; + undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; + if (undo > 0) + cvmx_fau_atomic_add32(fau, -undo); + skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; + return skb_to_free; +} + +void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv) +{ + int32_t skb_to_free; + int qos, queues_per_port; + queues_per_port = cvmx_pko_get_num_queues(priv->port); + /* Drain any pending packets in the free list */ + for (qos = 0; qos < queues_per_port; qos++) { + if (skb_queue_len(&priv->tx_free_list[qos]) == 0) + continue; + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); + + while (skb_to_free > 0) { + dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos])); + skb_to_free--; + } + } +} + +enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer) +{ + struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer); + struct net_device *dev = cvm_oct_device[priv->port]; + + cvm_oct_free_tx_skbs(priv); + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + return HRTIMER_NORESTART; +} + /** * Packet transmit * @@ -77,13 +120,13 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) union cvmx_buf_ptr hw_buffer; uint64_t old_scratch; uint64_t old_scratch2; - int dropped; int qos; - int queue_it_up; + enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; struct octeon_ethernet *priv = netdev_priv(dev); + struct sk_buff *to_free_list; int32_t skb_to_free; - int32_t undo; int32_t buffers_to_free; + unsigned long flags; #if REUSE_SKBUFFS_WITHOUT_FREE unsigned char *fpa_head; #endif @@ -94,9 +137,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) */ prefetch(priv); - /* Start off assuming no drop */ - dropped = 0; - /* * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to * completely remove "qos" in the event neither interface @@ -268,9 +308,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) skb->tc_verd = 0; #endif /* CONFIG_NET_CLS_ACT */ #endif /* CONFIG_NET_SCHED */ +#endif /* REUSE_SKBUFFS_WITHOUT_FREE */ dont_put_skbuff_in_hw: -#endif /* REUSE_SKBUFFS_WITHOUT_FREE */ /* Check if we can use the hardware checksumming */ if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && @@ -295,18 +335,7 @@ dont_put_skbuff_in_hw: cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); } - /* - * We try to claim MAX_SKB_TO_FREE buffers. If there were not - * that many available, we have to un-claim (undo) any that - * were in excess. If skb_to_free is positive we will free - * that many buffers. - */ - undo = skb_to_free > 0 ? - MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; - if (undo > 0) - cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); - skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? - MAX_SKB_TO_FREE : -skb_to_free; + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); /* * If we're sending faster than the receive can free them then @@ -317,60 +346,83 @@ dont_put_skbuff_in_hw: pko_command.s.reg0 = priv->fau + qos * 4; } - cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, - CVMX_PKO_LOCK_CMD_QUEUE); + if (pko_command.s.dontfree) + queue_type = QUEUE_CORE; + else + queue_type = QUEUE_HW; + + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); /* Drop this packet if we have too many already queued to the HW */ - if (unlikely - (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { - /* - DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); - */ - dropped = 1; + if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { + if (dev->tx_queue_len != 0) { + /* Drop the lock when notifying the core. */ + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + netif_stop_queue(dev); + hrtimer_start(&priv->tx_restart_timer, + priv->tx_restart_interval, HRTIMER_MODE_REL); + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + + } else { + /* If not using normal queueing. */ + queue_type = QUEUE_DROP; + goto skip_xmit; + } } + + cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, + CVMX_PKO_LOCK_NONE); + /* Send the packet to the output queue */ - else if (unlikely - (cvmx_pko_send_packet_finish - (priv->port, priv->queue + qos, pko_command, hw_buffer, - CVMX_PKO_LOCK_CMD_QUEUE))) { + if (unlikely(cvmx_pko_send_packet_finish(priv->port, + priv->queue + qos, + pko_command, hw_buffer, + CVMX_PKO_LOCK_NONE))) { DEBUGPRINT("%s: Failed to send the packet\n", dev->name); - dropped = 1; + queue_type = QUEUE_DROP; } +skip_xmit: + to_free_list = NULL; - if (USE_ASYNC_IOBDMA) { - /* Restore the scratch area */ - cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); - cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); + switch (queue_type) { + case QUEUE_DROP: + skb->next = to_free_list; + to_free_list = skb; + priv->stats.tx_dropped++; + break; + case QUEUE_HW: + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); + break; + case QUEUE_CORE: + __skb_queue_tail(&priv->tx_free_list[qos], skb); + break; + default: + BUG(); } - queue_it_up = 0; - if (unlikely(dropped)) { - dev_kfree_skb_any(skb); - priv->stats.tx_dropped++; - } else { - if (USE_SKBUFFS_IN_HW) { - /* Put this packet on the queue to be freed later */ - if (pko_command.s.dontfree) - queue_it_up = 1; - else - cvmx_fau_atomic_add32 - (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); - } else { - /* Put this packet on the queue to be freed later */ - queue_it_up = 1; - } + while (skb_to_free > 0) { + struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); + t->next = to_free_list; + to_free_list = t; + skb_to_free--; } - if (queue_it_up) { - spin_lock(&priv->tx_free_list[qos].lock); - __skb_queue_tail(&priv->tx_free_list[qos], skb); - cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); - spin_unlock(&priv->tx_free_list[qos].lock); - } else { - cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + + /* Do the actual freeing outside of the lock. */ + while (to_free_list) { + struct sk_buff *t = to_free_list; + to_free_list = to_free_list->next; + dev_kfree_skb_any(t); } - return 0; + if (USE_ASYNC_IOBDMA) { + /* Restore the scratch area */ + cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); + cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); + } + + return NETDEV_TX_OK; } /** diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h index c0bebf750bc0..b628d8c8421d 100644 --- a/drivers/staging/octeon/ethernet-tx.h +++ b/drivers/staging/octeon/ethernet-tx.h @@ -30,28 +30,5 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, int do_free, int qos); void cvm_oct_tx_shutdown(struct net_device *dev); - -/** - * Free dead transmit skbs. - * - * @priv: The driver data - * @skb_to_free: The number of SKBs to free (free none if negative). - * @qos: The queue to free from. - * @take_lock: If true, acquire the skb list lock. - */ -static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv, - int skb_to_free, - int qos, int take_lock) -{ - /* Free skbuffs not in use by the hardware. */ - if (skb_to_free > 0) { - if (take_lock) - spin_lock(&priv->tx_free_list[qos].lock); - while (skb_to_free > 0) { - dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); - skb_to_free--; - } - if (take_lock) - spin_unlock(&priv->tx_free_list[qos].lock); - } -} +void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv); +enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer); diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 4e054262a005..973178a80c93 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -131,50 +131,29 @@ struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; */ static void cvm_do_timer(unsigned long arg) { - int32_t skb_to_free, undo; - int queues_per_port; - int qos; - struct octeon_ethernet *priv; static int port; - - if (port >= CVMX_PIP_NUM_INPUT_PORTS) { + if (port < CVMX_PIP_NUM_INPUT_PORTS) { + if (cvm_oct_device[port]) { + struct octeon_ethernet *priv = netdev_priv(cvm_oct_device[port]); + if (priv->poll) + priv->poll(cvm_oct_device[port]); + cvm_oct_free_tx_skbs(priv); + cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); + } + port++; /* - * All ports have been polled. Start the next - * iteration through the ports in one second. + * Poll the next port in a 50th of a second. This + * spreads the polling of ports out a little bit. */ + mod_timer(&cvm_oct_poll_timer, jiffies + HZ/50); + } else { port = 0; + /* + * All ports have been polled. Start the next iteration through + * the ports in one second. + */ mod_timer(&cvm_oct_poll_timer, jiffies + HZ); - return; } - if (!cvm_oct_device[port]) - goto out; - - priv = netdev_priv(cvm_oct_device[port]); - if (priv->poll) - priv->poll(cvm_oct_device[port]); - - queues_per_port = cvmx_pko_get_num_queues(port); - /* Drain any pending packets in the free list */ - for (qos = 0; qos < queues_per_port; qos++) { - if (skb_queue_len(&priv->tx_free_list[qos]) == 0) - continue; - skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, - MAX_SKB_TO_FREE); - undo = skb_to_free > 0 ? - MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; - if (undo > 0) - cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); - skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? - MAX_SKB_TO_FREE : -skb_to_free; - cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); - } - cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); - -out: - port++; - /* Poll the next port in a 50th of a second. - This spreads the polling of ports out a little bit */ - mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); } /** @@ -678,6 +657,18 @@ static int __init cvm_oct_init_module(void) /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); + hrtimer_init(&priv->tx_restart_timer, + CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + priv->tx_restart_timer.function = cvm_oct_restart_tx; + + /* + * Default for 10GE 5000nS enough time to + * transmit about 100 64byte packtes. 1GE + * interfaces will get 50000nS below. + */ + priv->tx_restart_interval = ktime_set(0, 5000); + dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; @@ -757,6 +748,7 @@ static int __init cvm_oct_init_module(void) case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; + priv->tx_restart_interval = ktime_set(0, 50000); strcpy(dev->name, "eth%d"); break; @@ -768,6 +760,7 @@ static int __init cvm_oct_init_module(void) case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; + priv->tx_restart_interval = ktime_set(0, 50000); strcpy(dev->name, "eth%d"); break; } diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 208da27bc02d..203c6a920af5 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -31,6 +31,8 @@ #ifndef OCTEON_ETHERNET_H #define OCTEON_ETHERNET_H +#include + /** * This is the definition of the Ethernet driver's private * driver state stored in netdev_priv(dev). @@ -57,6 +59,8 @@ struct octeon_ethernet { uint64_t link_info; /* Called periodically to check link status */ void (*poll) (struct net_device *dev); + struct hrtimer tx_restart_timer; + ktime_t tx_restart_interval; }; /** -- cgit v1.2.2 From 3368c784bcf77124aaf39372e627016c36bd4472 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Jan 2010 11:05:04 -0800 Subject: Staging: Octeon Ethernet: Convert to NAPI. Convert the driver to be a reasonably well behaved NAPI citizen. There is one NAPI instance per CPU shared between all input ports. As receive backlog increases, NAPI is scheduled on additional CPUs. Receive buffer refill code factored out so it can also be called from the periodic timer. This is needed to recover from temporary buffer starvation conditions. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/839/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-defines.h | 18 -- drivers/staging/octeon/ethernet-rx.c | 300 +++++++++++++++++++----------- drivers/staging/octeon/ethernet-rx.h | 25 ++- drivers/staging/octeon/ethernet.c | 52 ++---- drivers/staging/octeon/octeon-ethernet.h | 3 + 5 files changed, 235 insertions(+), 163 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 6b8065f594bf..9c4910e45d28 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -45,10 +45,6 @@ * Controls if the Octeon TCP/UDP checksum engine is used for packet * output. If this is zero, the kernel will perform the checksum in * software. - * USE_MULTICORE_RECEIVE - * Process receive interrupts on multiple cores. This spreads the network - * load across the first 8 processors. If ths is zero, only one core - * processes incomming packets. * USE_ASYNC_IOBDMA * Use asynchronous IO access to hardware. This uses Octeon's asynchronous * IOBDMAs to issue IO accesses without stalling. Set this to zero @@ -79,15 +75,8 @@ #define REUSE_SKBUFFS_WITHOUT_FREE 1 #endif -/* Max interrupts per second per core */ -#define INTERRUPT_LIMIT 10000 - -/* Don't limit the number of interrupts */ -/*#define INTERRUPT_LIMIT 0 */ #define USE_HW_TCPUDP_CHECKSUM 1 -#define USE_MULTICORE_RECEIVE 1 - /* Enable Random Early Dropping under load */ #define USE_RED 1 #define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) @@ -105,17 +94,10 @@ /* Use this to not have FPA frees control L2 */ /*#define DONT_WRITEBACK(x) 0 */ -/* Maximum number of packets to process per interrupt. */ -#define MAX_RX_PACKETS 120 /* Maximum number of SKBs to try to free per xmit packet. */ #define MAX_SKB_TO_FREE 10 #define MAX_OUT_QUEUE_DEPTH 1000 -#ifndef CONFIG_SMP -#undef USE_MULTICORE_RECEIVE -#define USE_MULTICORE_RECEIVE 0 -#endif - #define IP_PROTOCOL_TCP 6 #define IP_PROTOCOL_UDP 0x11 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index f63459a96dad..b2e6ab6a3349 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -4,7 +4,7 @@ * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * - * Copyright (c) 2003-2007 Cavium Networks + * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -27,12 +27,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #ifdef CONFIG_XFRM #include @@ -44,8 +46,9 @@ #include #include "ethernet-defines.h" -#include "octeon-ethernet.h" #include "ethernet-mem.h" +#include "ethernet-rx.h" +#include "octeon-ethernet.h" #include "ethernet-util.h" #include "cvmx-helper.h" @@ -57,56 +60,82 @@ #include "cvmx-gmxx-defs.h" -struct cvm_tasklet_wrapper { - struct tasklet_struct t; -}; +struct cvm_napi_wrapper { + struct napi_struct napi; +} ____cacheline_aligned_in_smp; -/* - * Aligning the tasklet_struct on cachline boundries seems to decrease - * throughput even though in theory it would reduce contantion on the - * cache lines containing the locks. - */ +static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; -static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS]; +struct cvm_oct_core_state { + int baseline_cores; + /* + * The number of additional cores that could be processing + * input packtes. + */ + atomic_t available_cores; + cpumask_t cpu_state; +} ____cacheline_aligned_in_smp; -/** - * Interrupt handler. The interrupt occurs whenever the POW - * transitions from 0->1 packets in our group. - * - * @cpl: - * @dev_id: - * @regs: - * Returns - */ -irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) +static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; + +static void cvm_oct_enable_napi(void *_) { - /* Acknowledge the interrupt */ - if (INTERRUPT_LIMIT) - cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group); - else - cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group); - preempt_disable(); - tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); - preempt_enable(); - return IRQ_HANDLED; + int cpu = smp_processor_id(); + napi_schedule(&cvm_oct_napi[cpu].napi); +} + +static void cvm_oct_enable_one_cpu(void) +{ + int v; + int cpu; + + /* Check to see if more CPUs are available for receive processing... */ + v = atomic_sub_if_positive(1, &core_state.available_cores); + if (v < 0) + return; + + /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ + for_each_online_cpu(cpu) { + if (!cpu_test_and_set(cpu, core_state.cpu_state)) { + v = smp_call_function_single(cpu, cvm_oct_enable_napi, + NULL, 0); + if (v) + panic("Can't enable NAPI."); + break; + } + } +} + +static void cvm_oct_no_more_work(void) +{ + int cpu = smp_processor_id(); + + /* + * CPU zero is special. It always has the irq enabled when + * waiting for incoming packets. + */ + if (cpu == 0) { + enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); + return; + } + + cpu_clear(cpu, core_state.cpu_state); + atomic_add(1, &core_state.available_cores); } -#ifdef CONFIG_NET_POLL_CONTROLLER /** - * This is called when the kernel needs to manually poll the - * device. For Octeon, this is simply calling the interrupt - * handler. We actually poll all the devices, not just the - * one supplied. + * Interrupt handler. The interrupt occurs whenever the POW + * has packets in our group. * - * @dev: Device to poll. Unused */ -void cvm_oct_poll_controller(struct net_device *dev) +static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { - preempt_disable(); - tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); - preempt_enable(); + /* Disable the IRQ and start napi_poll. */ + disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); + cvm_oct_enable_napi(NULL); + + return IRQ_HANDLED; } -#endif /** * This is called on receive errors, and determines if the packet @@ -195,19 +224,19 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) } /** - * Tasklet function that is scheduled on a core when an interrupt occurs. + * The NAPI poll function. * - * @unused: + * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller + * @budget: Maximum number of packets to receive. */ -void cvm_oct_tasklet_rx(unsigned long unused) +static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { - const int coreid = cvmx_get_core_num(); - uint64_t old_group_mask; - uint64_t old_scratch; - int rx_count = 0; - int number_to_free; - int num_freed; - int packet_not_copied; + const int coreid = cvmx_get_core_num(); + uint64_t old_group_mask; + uint64_t old_scratch; + int rx_count = 0; + int did_work_request = 0; + int packet_not_copied; /* Prefetch cvm_oct_device since we know we need it soon */ prefetch(cvm_oct_device); @@ -223,59 +252,63 @@ void cvm_oct_tasklet_rx(unsigned long unused) cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); - if (USE_ASYNC_IOBDMA) + if (USE_ASYNC_IOBDMA) { cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); + did_work_request = 1; + } - while (1) { + while (rx_count < budget) { struct sk_buff *skb = NULL; + struct sk_buff **pskb = NULL; int skb_in_hw; cvmx_wqe_t *work; - if (USE_ASYNC_IOBDMA) { + if (USE_ASYNC_IOBDMA && did_work_request) work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); - } else { - if ((INTERRUPT_LIMIT == 0) - || likely(rx_count < MAX_RX_PACKETS)) - work = - cvmx_pow_work_request_sync - (CVMX_POW_NO_WAIT); - else - work = NULL; - } + else + work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); + prefetch(work); - if (work == NULL) + did_work_request = 0; + if (work == NULL) { + union cvmx_pow_wq_int wq_int; + wq_int.u64 = 0; + wq_int.s.iq_dis = 1 << pow_receive_group; + wq_int.s.wq_int = 1 << pow_receive_group; + cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); break; + } + pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); + prefetch(pskb); - /* - * Limit each core to processing MAX_RX_PACKETS - * packets without a break. This way the RX can't - * starve the TX task. - */ - if (USE_ASYNC_IOBDMA) { - - if ((INTERRUPT_LIMIT == 0) - || likely(rx_count < MAX_RX_PACKETS)) - cvmx_pow_work_request_async_nocheck - (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); - else { - cvmx_scratch_write64(CVMX_SCR_SCRATCH, - 0x8000000000000000ull); - cvmx_pow_tag_sw_null_nocheck(); - } + if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { + cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); + did_work_request = 1; + } + + if (rx_count == 0) { + /* + * First time through, see if there is enough + * work waiting to merit waking another + * CPU. + */ + union cvmx_pow_wq_int_cntx counts; + int backlog; + int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); + counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); + backlog = counts.s.iq_cnt + counts.s.ds_cnt; + if (backlog > budget * cores_in_use && napi != NULL) + cvm_oct_enable_one_cpu(); } skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; if (likely(skb_in_hw)) { - skb = - *(struct sk_buff - **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - - sizeof(void *)); + skb = *pskb; prefetch(&skb->head); prefetch(&skb->len); } prefetch(cvm_oct_device[work->ipprt]); - rx_count++; /* Immediately throw away all packets with receive errors */ if (unlikely(work->word2.snoip.rcv_error)) { if (cvm_oct_check_rcv_error(work)) @@ -391,6 +424,7 @@ void cvm_oct_tasklet_rx(unsigned long unused) #endif } netif_receive_skb(skb); + rx_count++; } else { /* Drop any packet received for a device that isn't up */ /* @@ -432,47 +466,93 @@ void cvm_oct_tasklet_rx(unsigned long unused) cvm_oct_free_work(work); } } - /* Restore the original POW group mask */ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); if (USE_ASYNC_IOBDMA) { /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); } + cvm_oct_rx_refill_pool(0); - if (USE_SKBUFFS_IN_HW) { - /* Refill the packet buffer pool */ - number_to_free = - cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); - - if (number_to_free > 0) { - cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, - -number_to_free); - num_freed = - cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, - CVMX_FPA_PACKET_POOL_SIZE, - number_to_free); - if (num_freed != number_to_free) { - cvmx_fau_atomic_add32 - (FAU_NUM_PACKET_BUFFERS_TO_FREE, - number_to_free - num_freed); - } - } + if (rx_count < budget && napi != NULL) { + /* No more work */ + napi_complete(napi); + cvm_oct_no_more_work(); } + return rx_count; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * This is called when the kernel needs to manually poll the + * device. + * + * @dev: Device to poll. Unused + */ +void cvm_oct_poll_controller(struct net_device *dev) +{ + cvm_oct_napi_poll(NULL, 16); } +#endif void cvm_oct_rx_initialize(void) { int i; - /* Initialize all of the tasklets */ - for (i = 0; i < NR_CPUS; i++) - tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0); + struct net_device *dev_for_napi = NULL; + union cvmx_pow_wq_int_thrx int_thr; + union cvmx_pow_wq_int_pc int_pc; + + for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { + if (cvm_oct_device[i]) { + dev_for_napi = cvm_oct_device[i]; + break; + } + } + + if (NULL == dev_for_napi) + panic("No net_devices were allocated."); + + if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) + atomic_set(&core_state.available_cores, max_rx_cpus); + else + atomic_set(&core_state.available_cores, num_online_cpus()); + core_state.baseline_cores = atomic_read(&core_state.available_cores); + + core_state.cpu_state = CPU_MASK_NONE; + for_each_possible_cpu(i) { + netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, + cvm_oct_napi_poll, rx_napi_weight); + napi_enable(&cvm_oct_napi[i].napi); + } + /* Register an IRQ hander for to receive POW interrupts */ + i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, + cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); + + if (i) + panic("Could not acquire Ethernet IRQ %d\n", + OCTEON_IRQ_WORKQ0 + pow_receive_group); + + disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); + + int_thr.u64 = 0; + int_thr.s.tc_en = 1; + int_thr.s.tc_thr = 1; + /* Enable POW interrupt when our port has at least one packet */ + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); + + int_pc.u64 = 0; + int_pc.s.pc_thr = 5; + cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); + + + /* Scheduld NAPI now. This will indirectly enable interrupts. */ + cvm_oct_enable_one_cpu(); } void cvm_oct_rx_shutdown(void) { int i; - /* Shutdown all of the tasklets */ - for (i = 0; i < NR_CPUS; i++) - tasklet_kill(&cvm_oct_tasklet[i].t); + /* Shutdown all of the NAPIs */ + for_each_possible_cpu(i) + netif_napi_del(&cvm_oct_napi[i].napi); } diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h index a9b72b87a7a6..a0743b85d54e 100644 --- a/drivers/staging/octeon/ethernet-rx.h +++ b/drivers/staging/octeon/ethernet-rx.h @@ -24,10 +24,29 @@ * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information *********************************************************************/ +#include "cvmx-fau.h" -irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id); void cvm_oct_poll_controller(struct net_device *dev); -void cvm_oct_tasklet_rx(unsigned long unused); - void cvm_oct_rx_initialize(void); void cvm_oct_rx_shutdown(void); + +static inline void cvm_oct_rx_refill_pool(int fill_threshold) +{ + int number_to_free; + int num_freed; + /* Refill the packet buffer pool */ + number_to_free = + cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + + if (number_to_free > fill_threshold) { + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + -number_to_free); + num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, + CVMX_FPA_PACKET_POOL_SIZE, + number_to_free); + if (num_freed != number_to_free) { + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + number_to_free - num_freed); + } + } +} diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 973178a80c93..9f5b7419e777 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -104,6 +104,16 @@ MODULE_PARM_DESC(pow_send_list, "\n" "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" "\tusing the pow_send_group."); +int max_rx_cpus = -1; +module_param(max_rx_cpus, int, 0444); +MODULE_PARM_DESC(max_rx_cpus, "\n" + "\t\tThe maximum number of CPUs to use for packet reception.\n" + "\t\tUse -1 to use all available CPUs."); + +int rx_napi_weight = 32; +module_param(rx_napi_weight, int, 0444); +MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); + /* * The offset from mac_addr_base that should be used for the next port * that is configured. By convention, if any mgmt ports exist on the @@ -148,6 +158,15 @@ static void cvm_do_timer(unsigned long arg) mod_timer(&cvm_oct_poll_timer, jiffies + HZ/50); } else { port = 0; + /* + * FPA 0 may have been drained, try to refill it if we + * need more than num_packet_buffers / 2, otherwise + * normal receive processing will refill it. If it + * were drained, no packets could be received so + * cvm_oct_napi_poll would never be invoked to do the + * refill. + */ + cvm_oct_rx_refill_pool(num_packet_buffers / 2); /* * All ports have been polled. Start the next iteration through * the ports in one second. @@ -161,7 +180,6 @@ static void cvm_do_timer(unsigned long arg) */ static __init void cvm_oct_configure_common_hw(void) { - int r; /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, @@ -176,17 +194,6 @@ static __init void cvm_oct_configure_common_hw(void) cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); - /* Register an IRQ hander for to receive POW interrupts */ - r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, - cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet", - cvm_oct_device); - -#if defined(CONFIG_SMP) && 0 - if (USE_MULTICORE_RECEIVE) { - irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group, - cpu_online_mask); - } -#endif } /** @@ -616,7 +623,6 @@ static int __init cvm_oct_init_module(void) cvm_oct_mac_addr_offset = 0; cvm_oct_proc_initialize(); - cvm_oct_rx_initialize(); cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); @@ -781,25 +787,7 @@ static int __init cvm_oct_init_module(void) } } - if (INTERRUPT_LIMIT) { - /* - * Set the POW timer rate to give an interrupt at most - * INTERRUPT_LIMIT times per second. - */ - cvmx_write_csr(CVMX_POW_WQ_INT_PC, - octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT * - 16 * 256) << 8); - - /* - * Enable POW timer interrupt. It will count when - * there are packets available. - */ - cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), - 0x1ful << 24); - } else { - /* Enable POW interrupt when our port has at least one packet */ - cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001); - } + cvm_oct_rx_initialize(); /* Enable the poll timer for checking RGMII status */ init_timer(&cvm_oct_poll_timer); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 203c6a920af5..40b695615431 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -98,4 +98,7 @@ extern int pow_receive_group; extern char pow_send_list[]; extern struct net_device *cvm_oct_device[]; +extern int max_rx_cpus; +extern int rx_napi_weight; + #endif -- cgit v1.2.2 From 924cc2680fbe181066ec138d369691d28d913ea2 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Jan 2010 11:05:05 -0800 Subject: Staging: Octeon Ethernet: Enable scatter-gather. Octeon ethernet hardware can handle NETIF_F_SG, so we enable it. A gather list of up to six fragments will fit in the SKB's CB structure, so no extra memory is required. If a SKB has more than six fragments, we must linearize it. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/838/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-tx.c | 57 +++++++++++++++++++++++++++++++----- drivers/staging/octeon/ethernet.c | 7 +++-- 2 files changed, 55 insertions(+), 9 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 05b58f8b58fd..bc67e416e421 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -53,6 +53,8 @@ #include "cvmx-gmxx-defs.h" +#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) + /* * You can define GET_SKBUFF_QOS() to override how the skbuff output * function determines which output queue is used. The default @@ -121,6 +123,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) uint64_t old_scratch; uint64_t old_scratch2; int qos; + int i; enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; struct octeon_ethernet *priv = netdev_priv(dev); struct sk_buff *to_free_list; @@ -170,6 +173,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) MAX_SKB_TO_FREE); } + /* + * We have space for 6 segment pointers, If there will be more + * than that, we must linearize. + */ + if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { + if (unlikely(__skb_linearize(skb))) { + queue_type = QUEUE_DROP; + if (USE_ASYNC_IOBDMA) { + /* Get the number of skbuffs in use by the hardware */ + CVMX_SYNCIOBDMA; + skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + } else { + /* Get the number of skbuffs in use by the hardware */ + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, + MAX_SKB_TO_FREE); + } + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + goto skip_xmit; + } + } + /* * The CN3XXX series of parts has an errata (GMX-401) which * causes the GMX block to hang if a collision occurs towards @@ -198,13 +223,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) } } - /* Build the PKO buffer pointer */ - hw_buffer.u64 = 0; - hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data); - hw_buffer.s.pool = 0; - hw_buffer.s.size = - (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head; - /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ @@ -215,6 +233,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) pko_command.s.dontfree = 1; pko_command.s.reg0 = priv->fau + qos * 4; + + /* Build the PKO buffer pointer */ + hw_buffer.u64 = 0; + if (skb_shinfo(skb)->nr_frags == 0) { + hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.pool = 0; + hw_buffer.s.size = skb->len; + } else { + hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.pool = 0; + hw_buffer.s.size = skb_headlen(skb); + CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; + hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); + hw_buffer.s.size = fs->size; + CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; + } + hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); + hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; + pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; + pko_command.s.gather = 1; + goto dont_put_skbuff_in_hw; + } + /* * See if we can put this skb in the FPA pool. Any strange * behavior from the Linux networking stack will most likely diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 9f5b7419e777..9d632020b9ee 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -484,8 +484,11 @@ int cvm_oct_common_init(struct net_device *dev) && (always_use_pow || strstr(pow_send_list, dev->name))) priv->queue = -1; - if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) - dev->features |= NETIF_F_IP_CSUM; + if (priv->queue != -1) { + dev->features |= NETIF_F_SG; + if (USE_HW_TCPUDP_CHECKSUM) + dev->features |= NETIF_F_IP_CSUM; + } /* We do our own locking, Linux doesn't need to */ dev->features |= NETIF_F_LLTX; -- cgit v1.2.2 From 081f6749ae33f72b4fafea4c02976e163ef6ef37 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Jan 2010 11:05:06 -0800 Subject: Staging: Octeon Ethernet: Use constants from in.h Signed-off-by: David Daney To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/837/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-defines.h | 3 --- drivers/staging/octeon/ethernet-tx.c | 8 ++++---- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 9c4910e45d28..00a8561726ba 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -98,9 +98,6 @@ #define MAX_SKB_TO_FREE 10 #define MAX_OUT_QUEUE_DEPTH 1000 -#define IP_PROTOCOL_TCP 6 -#define IP_PROTOCOL_UDP 0x11 - #define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t)) #define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index bc67e416e421..62258bd31456 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -359,8 +359,8 @@ dont_put_skbuff_in_hw: if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) - && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) - || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { + && ((ip_hdr(skb)->protocol == IPPROTO_TCP) + || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; } @@ -550,8 +550,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) work->word2.s.dec_ipcomp = 0; /* FIXME */ #endif work->word2.s.tcp_or_udp = - (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) - || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); + (ip_hdr(skb)->protocol == IPPROTO_TCP) + || (ip_hdr(skb)->protocol == IPPROTO_UDP); #if 0 /* FIXME */ work->word2.s.dec_ipsec = 0; -- cgit v1.2.2 From f7a904dffe30a02636053d8022498ced7e44d31c Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Mon, 4 Jan 2010 17:16:51 +0800 Subject: MIPS: Loongson: Change the Email address of Wu Zhangjin Currently wuzj@lemote.com is not usable; change it to wuzhangjin@gmail.com. Signed-off-by: Wu Zhangjin Cc: linux-mips@linux-mips.org Cc: yanh@lemote.com Cc: huhb@lemote.com Cc: zhangfx@lemote.com Patchwork: http://patchwork.linux-mips.org/patch/829/ Signed-off-by: Ralf Baechle --- drivers/staging/sm7xx/smtc2d.c | 2 +- drivers/staging/sm7xx/smtc2d.h | 2 +- drivers/staging/sm7xx/smtcfb.c | 2 +- drivers/staging/sm7xx/smtcfb.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c index 133b86c6a678..2fff0a0052d1 100644 --- a/drivers/staging/sm7xx/smtc2d.c +++ b/drivers/staging/sm7xx/smtc2d.c @@ -5,7 +5,7 @@ * Author: Boyod boyod.yang@siliconmotion.com.cn * * Copyright (C) 2009 Lemote, Inc. - * Author: Wu Zhangjin, wuzj@lemote.com + * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h index 38d0c335322b..02b4fa29136c 100644 --- a/drivers/staging/sm7xx/smtc2d.h +++ b/drivers/staging/sm7xx/smtc2d.h @@ -5,7 +5,7 @@ * Author: Ge Wang, gewang@siliconmotion.com * * Copyright (C) 2009 Lemote, Inc. - * Author: Wu Zhangjin, wuzj@lemote.com + * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c index 161dbc9c1397..a4f6f49aef48 100644 --- a/drivers/staging/sm7xx/smtcfb.c +++ b/drivers/staging/sm7xx/smtcfb.c @@ -6,7 +6,7 @@ * Boyod boyod.yang@siliconmotion.com.cn * * Copyright (C) 2009 Lemote, Inc. - * Author: Wu Zhangjin, wuzj@lemote.com + * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h index 7f2c34138215..7ee565c2c952 100644 --- a/drivers/staging/sm7xx/smtcfb.h +++ b/drivers/staging/sm7xx/smtcfb.h @@ -6,7 +6,7 @@ * Boyod boyod.yang@siliconmotion.com.cn * * Copyright (C) 2009 Lemote, Inc. - * Author: Wu Zhangjin, wuzj@lemote.com + * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for -- cgit v1.2.2 From 1d08f00d576c62f1c7a96900a14648df33b3939a Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 15 Feb 2010 12:13:16 -0800 Subject: Staging: octeon: remove unneeded includes Signed-off-by: David Daney To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/964/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-mdio.h | 1 - drivers/staging/octeon/ethernet-rgmii.c | 1 - drivers/staging/octeon/ethernet-sgmii.c | 1 - drivers/staging/octeon/ethernet-spi.c | 1 - drivers/staging/octeon/ethernet-xaui.c | 1 - drivers/staging/octeon/ethernet.c | 1 - 6 files changed, 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h index 55d0614a7cd9..a417d4fce12c 100644 --- a/drivers/staging/octeon/ethernet-mdio.h +++ b/drivers/staging/octeon/ethernet-mdio.h @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 3820f1ec11d1..f90d46ed5640 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -26,7 +26,6 @@ **********************************************************************/ #include #include -#include #include #include diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c index 6061d01eca2d..2d8589eb461e 100644 --- a/drivers/staging/octeon/ethernet-sgmii.c +++ b/drivers/staging/octeon/ethernet-sgmii.c @@ -26,7 +26,6 @@ **********************************************************************/ #include #include -#include #include #include diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 00dc0f4bad19..b58b8971f939 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c @@ -26,7 +26,6 @@ **********************************************************************/ #include #include -#include #include #include diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c index ee3dc41b2c53..3fca1cc31ed8 100644 --- a/drivers/staging/octeon/ethernet-xaui.c +++ b/drivers/staging/octeon/ethernet-xaui.c @@ -26,7 +26,6 @@ **********************************************************************/ #include #include -#include #include #include diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 9d632020b9ee..5afece0216ca 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include -- cgit v1.2.2 From f8c2648666b5a1b5ba9bbb662ae569bafd3cc830 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 15 Feb 2010 12:13:17 -0800 Subject: Staging: Octeon: Run phy bus accesses on a workqueue. When directly accessing a phy, we must acquire the mdio bus lock. To do that we cannot be in interrupt context, so we need to move these operations to a workqueue. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/965/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-rgmii.c | 55 +++++++++++---- drivers/staging/octeon/ethernet.c | 113 +++++++++++++++++-------------- drivers/staging/octeon/octeon-ethernet.h | 4 ++ 3 files changed, 109 insertions(+), 63 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index f90d46ed5640..a0d4d4b98bdc 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -26,6 +26,7 @@ **********************************************************************/ #include #include +#include #include #include @@ -47,14 +48,20 @@ static int number_rgmii_ports; static void cvm_oct_rgmii_poll(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); - unsigned long flags; + unsigned long flags = 0; cvmx_helper_link_info_t link_info; + int use_global_register_lock = (priv->phydev == NULL); - /* - * Take the global register lock since we are going to touch - * registers that affect more than one port. - */ - spin_lock_irqsave(&global_register_lock, flags); + BUG_ON(in_interrupt()); + if (use_global_register_lock) { + /* + * Take the global register lock since we are going to + * touch registers that affect more than one port. + */ + spin_lock_irqsave(&global_register_lock, flags); + } else { + mutex_lock(&priv->phydev->bus->mdio_lock); + } link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) { @@ -114,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev) dev->name); } } - spin_unlock_irqrestore(&global_register_lock, flags); + + if (use_global_register_lock) + spin_unlock_irqrestore(&global_register_lock, flags); + else + mutex_unlock(&priv->phydev->bus->mdio_lock); return; } @@ -150,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev) link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; } - spin_unlock_irqrestore(&global_register_lock, flags); + + if (use_global_register_lock) + spin_unlock_irqrestore(&global_register_lock, flags); + else { + mutex_unlock(&priv->phydev->bus->mdio_lock); + } if (priv->phydev == NULL) { /* Tell core. */ @@ -212,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) struct net_device *dev = cvm_oct_device[cvmx_helper_get_ipd_port (interface, index)]; - if (dev) - cvm_oct_rgmii_poll(dev); + struct octeon_ethernet *priv = netdev_priv(dev); + + if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) + queue_work(cvm_oct_poll_queue, &priv->port_work); + gmx_rx_int_reg.u64 = 0; gmx_rx_int_reg.s.phy_dupx = 1; gmx_rx_int_reg.s.phy_link = 1; @@ -251,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) struct net_device *dev = cvm_oct_device[cvmx_helper_get_ipd_port (interface, index)]; - if (dev) - cvm_oct_rgmii_poll(dev); + struct octeon_ethernet *priv = netdev_priv(dev); + + if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) + queue_work(cvm_oct_poll_queue, &priv->port_work); + gmx_rx_int_reg.u64 = 0; gmx_rx_int_reg.s.phy_dupx = 1; gmx_rx_int_reg.s.phy_link = 1; @@ -301,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev) return 0; } +static void cvm_oct_rgmii_immediate_poll(struct work_struct *work) +{ + struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work); + cvm_oct_rgmii_poll(cvm_oct_device[priv->port]); +} + int cvm_oct_rgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); @@ -308,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev) cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); - + INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll); /* * Due to GMX errata in CN3XXX series chips, it is necessary * to take the link down immediately when the PHY changes @@ -396,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev) number_rgmii_ports--; if (number_rgmii_ports == 0) free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); + cancel_work_sync(&priv->port_work); } diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 5afece0216ca..1771c1035a3c 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -123,9 +123,16 @@ MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); static unsigned int cvm_oct_mac_addr_offset; /** - * Periodic timer to check auto negotiation + * cvm_oct_poll_queue - Workqueue for polling operations. */ -static struct timer_list cvm_oct_poll_timer; +struct workqueue_struct *cvm_oct_poll_queue; + +/** + * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. + * + * Set to one right before cvm_oct_poll_queue is destroyed. + */ +atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); /** * Array of every ethernet device owned by this driver indexed by @@ -133,47 +140,39 @@ static struct timer_list cvm_oct_poll_timer; */ struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; -/** - * Periodic timer tick for slow management operations - * - * @arg: Device to check - */ -static void cvm_do_timer(unsigned long arg) +static void cvm_oct_rx_refill_worker(struct work_struct *work); +static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); + +static void cvm_oct_rx_refill_worker(struct work_struct *work) { - static int port; - if (port < CVMX_PIP_NUM_INPUT_PORTS) { - if (cvm_oct_device[port]) { - struct octeon_ethernet *priv = netdev_priv(cvm_oct_device[port]); - if (priv->poll) - priv->poll(cvm_oct_device[port]); - cvm_oct_free_tx_skbs(priv); - cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); - } - port++; - /* - * Poll the next port in a 50th of a second. This - * spreads the polling of ports out a little bit. - */ - mod_timer(&cvm_oct_poll_timer, jiffies + HZ/50); - } else { - port = 0; - /* - * FPA 0 may have been drained, try to refill it if we - * need more than num_packet_buffers / 2, otherwise - * normal receive processing will refill it. If it - * were drained, no packets could be received so - * cvm_oct_napi_poll would never be invoked to do the - * refill. - */ - cvm_oct_rx_refill_pool(num_packet_buffers / 2); - /* - * All ports have been polled. Start the next iteration through - * the ports in one second. - */ - mod_timer(&cvm_oct_poll_timer, jiffies + HZ); - } + /* + * FPA 0 may have been drained, try to refill it if we need + * more than num_packet_buffers / 2, otherwise normal receive + * processing will refill it. If it were drained, no packets + * could be received so cvm_oct_napi_poll would never be + * invoked to do the refill. + */ + cvm_oct_rx_refill_pool(num_packet_buffers / 2); + + if (!atomic_read(&cvm_oct_poll_queue_stopping)) + queue_delayed_work(cvm_oct_poll_queue, + &cvm_oct_rx_refill_work, HZ); } +static void cvm_oct_tx_clean_worker(struct work_struct *work) +{ + struct octeon_ethernet *priv = container_of(work, + struct octeon_ethernet, + tx_clean_work.work); + + if (priv->poll) + priv->poll(cvm_oct_device[priv->port]); + cvm_oct_free_tx_skbs(priv); + cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]); + if (!atomic_read(&cvm_oct_poll_queue_stopping)) + queue_delayed_work(cvm_oct_poll_queue, &priv->tx_clean_work, HZ); + } + /** * Configure common hardware for all interfaces */ @@ -624,6 +623,12 @@ static int __init cvm_oct_init_module(void) else cvm_oct_mac_addr_offset = 0; + cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); + if (cvm_oct_poll_queue == NULL) { + pr_err("octeon-ethernet: Cannot create workqueue"); + return -ENOMEM; + } + cvm_oct_proc_initialize(); cvm_oct_configure_common_hw(); @@ -719,7 +724,9 @@ static int __init cvm_oct_init_module(void) /* Initialize the device private structure. */ priv = netdev_priv(dev); - memset(priv, 0, sizeof(struct octeon_ethernet)); + + INIT_DELAYED_WORK(&priv->tx_clean_work, + cvm_oct_tx_clean_worker); priv->imode = imode; priv->port = port; @@ -785,17 +792,15 @@ static int __init cvm_oct_init_module(void) fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); + queue_delayed_work(cvm_oct_poll_queue, + &priv->tx_clean_work, HZ); } } } cvm_oct_rx_initialize(); - /* Enable the poll timer for checking RGMII status */ - init_timer(&cvm_oct_poll_timer); - cvm_oct_poll_timer.data = 0; - cvm_oct_poll_timer.function = cvm_do_timer; - mod_timer(&cvm_oct_poll_timer, jiffies + HZ); + queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); return 0; } @@ -817,20 +822,28 @@ static void __exit cvm_oct_cleanup_module(void) /* Free the interrupt handler */ free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); - del_timer(&cvm_oct_poll_timer); + atomic_inc_return(&cvm_oct_poll_queue_stopping); + cancel_delayed_work_sync(&cvm_oct_rx_refill_work); + cvm_oct_rx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { - cvm_oct_tx_shutdown(cvm_oct_device[port]); - unregister_netdev(cvm_oct_device[port]); - kfree(cvm_oct_device[port]); + struct net_device *dev = cvm_oct_device[port]; + struct octeon_ethernet *priv = netdev_priv(dev); + cancel_delayed_work_sync(&priv->tx_clean_work); + + cvm_oct_tx_shutdown(dev); + unregister_netdev(dev); + kfree(dev); cvm_oct_device[port] = NULL; } } + destroy_workqueue(cvm_oct_poll_queue); + cvmx_pko_shutdown(); cvm_oct_proc_shutdown(); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 40b695615431..8d0921061dac 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -61,6 +61,8 @@ struct octeon_ethernet { void (*poll) (struct net_device *dev); struct hrtimer tx_restart_timer; ktime_t tx_restart_interval; + struct delayed_work tx_clean_work; + struct work_struct port_work; /* may be unused. */ }; /** @@ -97,6 +99,8 @@ extern int pow_send_group; extern int pow_receive_group; extern char pow_send_list[]; extern struct net_device *cvm_oct_device[]; +extern struct workqueue_struct *cvm_oct_poll_queue; +extern atomic_t cvm_oct_poll_queue_stopping; extern int max_rx_cpus; extern int rx_napi_weight; -- cgit v1.2.2 From 4898c560103fb8075c10a8e9d70e0ca26873075e Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 15 Feb 2010 15:06:47 -0800 Subject: Staging: Octeon: Free transmit SKBs in a timely manner If we wait for the once-per-second cleanup to free transmit SKBs, sockets with small transmit buffer sizes might spend most of their time blocked waiting for the cleanup. Normally we do a cleanup for each transmitted packet. We add a watchdog type timer so that we also schedule a timeout for 150uS after a packet is transmitted. The watchdog is reset for each transmitted packet, so for high packet rates, it never expires. At these high rates, the cleanups are done for each packet so the extra watchdog initiated cleanups are neither needed nor triggered. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Cc: Eric Dumazet Patchwork: http://patchwork.linux-mips.org/patch/968/ Signed-off-by: Ralf Baechle This version has spelling and comment changes based on feedback from Eric Dumazet. --- drivers/staging/octeon/Kconfig | 1 - drivers/staging/octeon/ethernet-defines.h | 5 +- drivers/staging/octeon/ethernet-tx.c | 137 ++++++++++++++++++++++++------ drivers/staging/octeon/ethernet-tx.h | 6 +- drivers/staging/octeon/ethernet.c | 47 +++++----- drivers/staging/octeon/octeon-ethernet.h | 9 +- 6 files changed, 142 insertions(+), 63 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 579b8f129e6e..638ad6b35891 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -3,7 +3,6 @@ config OCTEON_ETHERNET depends on CPU_CAVIUM_OCTEON select PHYLIB select MDIO_OCTEON - select HIGH_RES_TIMERS help This driver supports the builtin ethernet ports on Cavium Networks' products in the Octeon family. This driver supports the diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 00a8561726ba..6a2cd50a17df 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -95,10 +95,11 @@ /*#define DONT_WRITEBACK(x) 0 */ /* Maximum number of SKBs to try to free per xmit packet. */ -#define MAX_SKB_TO_FREE 10 #define MAX_OUT_QUEUE_DEPTH 1000 -#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t)) +#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t)) +#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t)) + #define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 62258bd31456..5175247ce0a8 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -48,6 +48,7 @@ #include "cvmx-wqe.h" #include "cvmx-fau.h" +#include "cvmx-pip.h" #include "cvmx-pko.h" #include "cvmx-helper.h" @@ -66,6 +67,11 @@ #define GET_SKBUFF_QOS(skb) 0 #endif +static void cvm_oct_tx_do_cleanup(unsigned long arg); +static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); + +/* Maximum number of SKBs to try to free per xmit packet. */ +#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) { @@ -77,10 +83,24 @@ static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) return skb_to_free; } -void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv) +static void cvm_oct_kick_tx_poll_watchdog(void) +{ + union cvmx_ciu_timx ciu_timx; + ciu_timx.u64 = 0; + ciu_timx.s.one_shot = 1; + ciu_timx.s.len = cvm_oct_tx_poll_interval; + cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); +} + +void cvm_oct_free_tx_skbs(struct net_device *dev) { int32_t skb_to_free; int qos, queues_per_port; + int total_freed = 0; + int total_remaining = 0; + unsigned long flags; + struct octeon_ethernet *priv = netdev_priv(dev); + queues_per_port = cvmx_pko_get_num_queues(priv->port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { @@ -89,24 +109,31 @@ void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv) skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); - while (skb_to_free > 0) { - dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos])); - skb_to_free--; + + total_freed += skb_to_free; + if (skb_to_free > 0) { + struct sk_buff *to_free_list = NULL; + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + while (skb_to_free > 0) { + struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); + t->next = to_free_list; + to_free_list = t; + skb_to_free--; + } + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + /* Do the actual freeing outside of the lock. */ + while (to_free_list) { + struct sk_buff *t = to_free_list; + to_free_list = to_free_list->next; + dev_kfree_skb_any(t); + } } + total_remaining += skb_queue_len(&priv->tx_free_list[qos]); } -} - -enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer) -{ - struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer); - struct net_device *dev = cvm_oct_device[priv->port]; - - cvm_oct_free_tx_skbs(priv); - - if (netif_queue_stopped(dev)) + if (total_freed >= 0 && netif_queue_stopped(dev)) netif_wake_queue(dev); - - return HRTIMER_NORESTART; + if (total_remaining) + cvm_oct_kick_tx_poll_watchdog(); } /** @@ -129,6 +156,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *to_free_list; int32_t skb_to_free; int32_t buffers_to_free; + u32 total_to_clean; unsigned long flags; #if REUSE_SKBUFFS_WITHOUT_FREE unsigned char *fpa_head; @@ -232,7 +260,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) pko_command.s.subone0 = 1; pko_command.s.dontfree = 1; - pko_command.s.reg0 = priv->fau + qos * 4; /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; @@ -327,7 +354,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) * We can use this buffer in the FPA. We don't need the FAU * update anymore */ - pko_command.s.reg0 = 0; pko_command.s.dontfree = 0; hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); @@ -384,15 +410,17 @@ dont_put_skbuff_in_hw: * If we're sending faster than the receive can free them then * don't do the HW free. */ - if ((buffers_to_free < -100) && !pko_command.s.dontfree) { + if ((buffers_to_free < -100) && !pko_command.s.dontfree) pko_command.s.dontfree = 1; - pko_command.s.reg0 = priv->fau + qos * 4; - } - if (pko_command.s.dontfree) + if (pko_command.s.dontfree) { queue_type = QUEUE_CORE; - else + pko_command.s.reg0 = priv->fau+qos*4; + } else { queue_type = QUEUE_HW; + } + if (USE_ASYNC_IOBDMA) + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); @@ -402,10 +430,7 @@ dont_put_skbuff_in_hw: /* Drop the lock when notifying the core. */ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); netif_stop_queue(dev); - hrtimer_start(&priv->tx_restart_timer, - priv->tx_restart_interval, HRTIMER_MODE_REL); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); - } else { /* If not using normal queueing. */ queue_type = QUEUE_DROP; @@ -460,11 +485,27 @@ skip_xmit: } if (USE_ASYNC_IOBDMA) { + CVMX_SYNCIOBDMA; + total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); + } else { + total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); } + if (total_to_clean & 0x3ff) { + /* + * Schedule the cleanup tasklet every 1024 packets for + * the pathological case of high traffic on one port + * delaying clean up of packets on a different port + * that is blocked waiting for the cleanup. + */ + tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); + } + + cvm_oct_kick_tx_poll_watchdog(); + return NETDEV_TX_OK; } @@ -624,7 +665,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) * * @dev: Device being shutdown */ -void cvm_oct_tx_shutdown(struct net_device *dev) +void cvm_oct_tx_shutdown_dev(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); unsigned long flags; @@ -638,3 +679,45 @@ void cvm_oct_tx_shutdown(struct net_device *dev) spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); } } + +static void cvm_oct_tx_do_cleanup(unsigned long arg) +{ + int port; + + for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { + if (cvm_oct_device[port]) { + struct net_device *dev = cvm_oct_device[port]; + cvm_oct_free_tx_skbs(dev); + } + } +} + +static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) +{ + /* Disable the interrupt. */ + cvmx_write_csr(CVMX_CIU_TIMX(1), 0); + /* Do the work in the tasklet. */ + tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); + return IRQ_HANDLED; +} + +void cvm_oct_tx_initialize(void) +{ + int i; + + /* Disable the interrupt. */ + cvmx_write_csr(CVMX_CIU_TIMX(1), 0); + /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */ + i = request_irq(OCTEON_IRQ_TIMER1, + cvm_oct_tx_cleanup_watchdog, 0, + "Ethernet", cvm_oct_device); + + if (i) + panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); +} + +void cvm_oct_tx_shutdown(void) +{ + /* Free the interrupt handler */ + free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); +} diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h index b628d8c8421d..547680c6c371 100644 --- a/drivers/staging/octeon/ethernet-tx.h +++ b/drivers/staging/octeon/ethernet-tx.h @@ -29,6 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev); int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, int do_free, int qos); -void cvm_oct_tx_shutdown(struct net_device *dev); -void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv); -enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer); +void cvm_oct_tx_initialize(void); +void cvm_oct_tx_shutdown(void); +void cvm_oct_tx_shutdown_dev(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 1771c1035a3c..5ee60ab0b236 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -140,6 +140,8 @@ atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); */ struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; +u64 cvm_oct_tx_poll_interval; + static void cvm_oct_rx_refill_worker(struct work_struct *work); static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); @@ -159,18 +161,19 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work) &cvm_oct_rx_refill_work, HZ); } -static void cvm_oct_tx_clean_worker(struct work_struct *work) +static void cvm_oct_periodic_worker(struct work_struct *work) { struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, - tx_clean_work.work); + port_periodic_work.work); if (priv->poll) priv->poll(cvm_oct_device[priv->port]); - cvm_oct_free_tx_skbs(priv); + cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]); + if (!atomic_read(&cvm_oct_poll_queue_stopping)) - queue_delayed_work(cvm_oct_poll_queue, &priv->tx_clean_work, HZ); + queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } /** @@ -662,6 +665,9 @@ static int __init cvm_oct_init_module(void) */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + /* Initialize the FAU used for counting tx SKBs that need to be freed */ + cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); + if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); @@ -670,18 +676,6 @@ static int __init cvm_oct_init_module(void) /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); - hrtimer_init(&priv->tx_restart_timer, - CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - priv->tx_restart_timer.function = cvm_oct_restart_tx; - - /* - * Default for 10GE 5000nS enough time to - * transmit about 100 64byte packtes. 1GE - * interfaces will get 50000nS below. - */ - priv->tx_restart_interval = ktime_set(0, 5000); - dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; @@ -725,9 +719,8 @@ static int __init cvm_oct_init_module(void) /* Initialize the device private structure. */ priv = netdev_priv(dev); - INIT_DELAYED_WORK(&priv->tx_clean_work, - cvm_oct_tx_clean_worker); - + INIT_DELAYED_WORK(&priv->port_periodic_work, + cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); @@ -763,7 +756,6 @@ static int __init cvm_oct_init_module(void) case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; - priv->tx_restart_interval = ktime_set(0, 50000); strcpy(dev->name, "eth%d"); break; @@ -775,7 +767,6 @@ static int __init cvm_oct_init_module(void) case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; - priv->tx_restart_interval = ktime_set(0, 50000); strcpy(dev->name, "eth%d"); break; } @@ -793,13 +784,19 @@ static int __init cvm_oct_init_module(void) cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); queue_delayed_work(cvm_oct_poll_queue, - &priv->tx_clean_work, HZ); + &priv->port_periodic_work, HZ); } } } + cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); + /* + * 150 uS: about 10 1500-byte packtes at 1GE. + */ + cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); + queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); return 0; @@ -826,6 +823,8 @@ static void __exit cvm_oct_cleanup_module(void) cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); + cvm_oct_tx_shutdown(); + cvmx_pko_disable(); /* Free the ethernet devices */ @@ -833,9 +832,9 @@ static void __exit cvm_oct_cleanup_module(void) if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); - cancel_delayed_work_sync(&priv->tx_clean_work); + cancel_delayed_work_sync(&priv->port_periodic_work); - cvm_oct_tx_shutdown(dev); + cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); kfree(dev); cvm_oct_device[port] = NULL; diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 8d0921061dac..db2a3cc048e7 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -4,7 +4,7 @@ * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * - * Copyright (c) 2003-2007 Cavium Networks + * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -31,8 +31,6 @@ #ifndef OCTEON_ETHERNET_H #define OCTEON_ETHERNET_H -#include - /** * This is the definition of the Ethernet driver's private * driver state stored in netdev_priv(dev). @@ -59,9 +57,7 @@ struct octeon_ethernet { uint64_t link_info; /* Called periodically to check link status */ void (*poll) (struct net_device *dev); - struct hrtimer tx_restart_timer; - ktime_t tx_restart_interval; - struct delayed_work tx_clean_work; + struct delayed_work port_periodic_work; struct work_struct port_work; /* may be unused. */ }; @@ -101,6 +97,7 @@ extern char pow_send_list[]; extern struct net_device *cvm_oct_device[]; extern struct workqueue_struct *cvm_oct_poll_queue; extern atomic_t cvm_oct_poll_queue_stopping; +extern u64 cvm_oct_tx_poll_interval; extern int max_rx_cpus; extern int rx_napi_weight; -- cgit v1.2.2 From ec977c5b473e29dbfdac8f2c7477eccc2142e3bc Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 16 Feb 2010 17:25:32 -0800 Subject: Staging: Octeon: Reformat a bunch of comments. Many of the comments didn't follow kerneldoc guidlines. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/971/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/ethernet-mdio.c | 6 ++--- drivers/staging/octeon/ethernet-mem.c | 16 ++++++------ drivers/staging/octeon/ethernet-rx.c | 17 +++++++------ drivers/staging/octeon/ethernet-tx.c | 14 +++++------ drivers/staging/octeon/ethernet-util.h | 13 +++------- drivers/staging/octeon/ethernet.c | 42 ++++++++++---------------------- drivers/staging/octeon/octeon-ethernet.h | 7 ------ 7 files changed, 44 insertions(+), 71 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 05a5cc0f43ed..7e0be8d00dc3 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -96,11 +96,11 @@ const struct ethtool_ops cvm_oct_ethtool_ops = { }; /** - * IOCTL support for PHY control - * + * cvm_oct_ioctl - IOCTL support for PHY control * @dev: Device to change * @rq: the request * @cmd: the command + * * Returns Zero on success */ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -153,7 +153,7 @@ static void cvm_oct_adjust_link(struct net_device *dev) /** - * Setup the PHY + * cvm_oct_phy_setup_device - setup the PHY * * @dev: Device to setup * diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index 53ed2f7ffdfd..00cc91df6b46 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -34,11 +34,12 @@ #include "cvmx-fpa.h" /** - * Fill the supplied hardware pool with skbuffs - * + * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs * @pool: Pool to allocate an skbuff for * @size: Size of the buffer needed for the pool * @elements: Number of buffers to allocate + * + * Returns the actual number of buffers allocated. */ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) { @@ -62,8 +63,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) } /** - * Free the supplied hardware pool of skbuffs - * + * cvm_oct_free_hw_skbuff- free hardware pool skbuffs * @pool: Pool to allocate an skbuff for * @size: Size of the buffer needed for the pool * @elements: Number of buffers to allocate @@ -91,11 +91,12 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) } /** - * This function fills a hardware pool with memory. - * + * cvm_oct_fill_hw_memory - fill a hardware pool with memory. * @pool: Pool to populate * @size: Size of each buffer in the pool * @elements: Number of buffers to allocate + * + * Returns the actual number of buffers allocated. */ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) { @@ -129,8 +130,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) } /** - * Free memory previously allocated with cvm_oct_fill_hw_memory - * + * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory * @pool: FPA pool to free * @size: Size of each buffer in the pool * @elements: Number of buffers that should be in the pool diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index b2e6ab6a3349..cb38f9eb2cc0 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -124,8 +124,9 @@ static void cvm_oct_no_more_work(void) } /** - * Interrupt handler. The interrupt occurs whenever the POW - * has packets in our group. + * cvm_oct_do_interrupt - interrupt handler. + * + * The interrupt occurs whenever the POW has packets in our group. * */ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) @@ -138,10 +139,9 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) } /** - * This is called on receive errors, and determines if the packet - * can be dropped early-on in cvm_oct_tasklet_rx(). - * + * cvm_oct_check_rcv_error - process receive errors * @work: Work queue entry pointing to the packet. + * * Returns Non-zero if the packet can be dropped, zero otherwise. */ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) @@ -224,10 +224,11 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) } /** - * The NAPI poll function. - * + * cvm_oct_napi_poll - the NAPI poll function. * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller * @budget: Maximum number of packets to receive. + * + * Returns the number of packets processed. */ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { @@ -484,7 +485,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) #ifdef CONFIG_NET_POLL_CONTROLLER /** - * This is called when the kernel needs to manually poll the + * cvm_oct_poll_controller - poll for receive packets * device. * * @dev: Device to poll. Unused diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 5175247ce0a8..afc2b734d554 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -137,11 +137,11 @@ void cvm_oct_free_tx_skbs(struct net_device *dev) } /** - * Packet transmit - * + * cvm_oct_xmit - transmit a packet * @skb: Packet to send * @dev: Device info structure - * Returns Always returns zero + * + * Returns Always returns NETDEV_TX_OK */ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -510,10 +510,10 @@ skip_xmit: } /** - * Packet transmit to the POW - * + * cvm_oct_xmit_pow - transmit a packet to the POW * @skb: Packet to send * @dev: Device info structure + * Returns Always returns zero */ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) @@ -661,9 +661,9 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) } /** - * This function frees all skb that are currently queued for TX. - * + * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. * @dev: Device being shutdown + * */ void cvm_oct_tx_shutdown_dev(struct net_device *dev) { diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h index 37b665918000..23467563fe57 100644 --- a/drivers/staging/octeon/ethernet-util.h +++ b/drivers/staging/octeon/ethernet-util.h @@ -30,10 +30,9 @@ } while (0) /** - * Given a packet data address, return a pointer to the - * beginning of the packet buffer. - * + * cvm_oct_get_buffer_ptr - convert packet data address to pointer * @packet_ptr: Packet data hardware address + * * Returns Packet buffer pointer */ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) @@ -43,9 +42,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) } /** - * Given an IPD/PKO port number, return the logical interface it is - * on. - * + * INTERFACE - convert IPD port to locgical interface * @ipd_port: Port to check * * Returns Logical interface @@ -65,9 +62,7 @@ static inline int INTERFACE(int ipd_port) } /** - * Given an IPD/PKO port number, return the port's index on a - * logical interface. - * + * INDEX - convert IPD/PKO port number to the port's interface index * @ipd_port: Port to check * * Returns Index into interface port list diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 5ee60ab0b236..45cb4c7d422d 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -176,9 +176,6 @@ static void cvm_oct_periodic_worker(struct work_struct *work) queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } -/** - * Configure common hardware for all interfaces - */ static __init void cvm_oct_configure_common_hw(void) { /* Setup the FPA */ @@ -198,10 +195,10 @@ static __init void cvm_oct_configure_common_hw(void) } /** - * Free a work queue entry received in a intercept callback. + * cvm_oct_free_work- Free a work queue entry + * + * @work_queue_entry: Work queue entry to free * - * @work_queue_entry: - * Work queue entry to free * Returns Zero on success, Negative on failure. */ int cvm_oct_free_work(void *work_queue_entry) @@ -228,9 +225,9 @@ int cvm_oct_free_work(void *work_queue_entry) EXPORT_SYMBOL(cvm_oct_free_work); /** - * Get the low level ethernet statistics - * + * cvm_oct_common_get_stats - get the low level ethernet statistics * @dev: Device to get the statistics from + * * Returns Pointer to the statistics */ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) @@ -274,8 +271,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) } /** - * Change the link MTU. Unimplemented - * + * cvm_oct_common_change_mtu - change the link MTU * @dev: Device to change * @new_mtu: The new MTU * @@ -339,8 +335,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) } /** - * Set the multicast list. Currently unimplemented. - * + * cvm_oct_common_set_multicast_list - set the multicast list * @dev: Device to work on */ static void cvm_oct_common_set_multicast_list(struct net_device *dev) @@ -395,10 +390,10 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev) } /** - * Set the hardware MAC address for a device - * - * @dev: Device to change the MAC address for - * @addr: Address structure to change it too. MAC address is addr + 2. + * cvm_oct_common_set_mac_address - set the hardware MAC address for a device + * @dev: The device in question. + * @addr: Address structure to change it too. + * Returns Zero on success */ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) @@ -445,9 +440,9 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) } /** - * Per network device initialization - * + * cvm_oct_common_init - per network device initialization * @dev: Device to initialize + * * Returns Zero on success */ int cvm_oct_common_init(struct net_device *dev) @@ -603,12 +598,6 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = { extern void octeon_mdiobus_force_mod_depencency(void); -/** - * Module/ driver initialization. Creates the linux network - * devices. - * - * Returns Zero on success - */ static int __init cvm_oct_init_module(void) { int num_interfaces; @@ -802,11 +791,6 @@ static int __init cvm_oct_init_module(void) return 0; } -/** - * Module / driver shutdown - * - * Returns Zero on success - */ static void __exit cvm_oct_cleanup_module(void) { int port; diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index db2a3cc048e7..d58192563552 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -61,13 +61,6 @@ struct octeon_ethernet { struct work_struct port_work; /* may be unused. */ }; -/** - * Free a work queue entry received in a intercept callback. - * - * @work_queue_entry: - * Work queue entry to free - * Returns Zero on success, Negative on failure. - */ int cvm_oct_free_work(void *work_queue_entry); extern int cvm_oct_rgmii_init(struct net_device *dev); -- cgit v1.2.2 From 559e25a5e3efe60a22b7f96ea4ad2eb09d996e97 Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 16 Feb 2010 17:25:33 -0800 Subject: Staging: Octeon: Remove /proc/octeon_ethernet_stats This file shouldn't be in /proc, so we remove it. Signed-off-by: David Daney To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/970/ Signed-off-by: Ralf Baechle --- drivers/staging/octeon/Makefile | 1 - drivers/staging/octeon/ethernet-proc.c | 144 --------------------------------- drivers/staging/octeon/ethernet-proc.h | 29 ------- drivers/staging/octeon/ethernet.c | 4 - 4 files changed, 178 deletions(-) delete mode 100644 drivers/staging/octeon/ethernet-proc.c delete mode 100644 drivers/staging/octeon/ethernet-proc.h (limited to 'drivers/staging') diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile index c0a583cc2227..87447c102fa0 100644 --- a/drivers/staging/octeon/Makefile +++ b/drivers/staging/octeon/Makefile @@ -14,7 +14,6 @@ obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o octeon-ethernet-objs := ethernet.o octeon-ethernet-objs += ethernet-mdio.o octeon-ethernet-objs += ethernet-mem.o -octeon-ethernet-objs += ethernet-proc.o octeon-ethernet-objs += ethernet-rgmii.o octeon-ethernet-objs += ethernet-rx.o octeon-ethernet-objs += ethernet-sgmii.o diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c deleted file mode 100644 index 16308d484d3b..000000000000 --- a/drivers/staging/octeon/ethernet-proc.c +++ /dev/null @@ -1,144 +0,0 @@ -/********************************************************************** - * Author: Cavium Networks - * - * Contact: support@caviumnetworks.com - * This file is part of the OCTEON SDK - * - * Copyright (c) 2003-2007 Cavium Networks - * - * This file is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, Version 2, as - * published by the Free Software Foundation. - * - * This file is distributed in the hope that it will be useful, but - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this file; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * or visit http://www.gnu.org/licenses/. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium Networks for more information -**********************************************************************/ -#include -#include -#include -#include - -#include - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" - -#include "cvmx-helper.h" -#include "cvmx-pip.h" - -/** - * User is reading /proc/octeon_ethernet_stats - * - * @m: - * @v: - * Returns - */ -static int cvm_oct_stats_show(struct seq_file *m, void *v) -{ - struct octeon_ethernet *priv; - int port; - - for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { - - if (cvm_oct_device[port]) { - priv = netdev_priv(cvm_oct_device[port]); - - seq_printf(m, "\nOcteon Port %d (%s)\n", port, - cvm_oct_device[port]->name); - seq_printf(m, - "rx_packets: %12lu\t" - "tx_packets: %12lu\n", - priv->stats.rx_packets, - priv->stats.tx_packets); - seq_printf(m, - "rx_bytes: %12lu\t" - "tx_bytes: %12lu\n", - priv->stats.rx_bytes, priv->stats.tx_bytes); - seq_printf(m, - "rx_errors: %12lu\t" - "tx_errors: %12lu\n", - priv->stats.rx_errors, - priv->stats.tx_errors); - seq_printf(m, - "rx_dropped: %12lu\t" - "tx_dropped: %12lu\n", - priv->stats.rx_dropped, - priv->stats.tx_dropped); - seq_printf(m, - "rx_length_errors: %12lu\t" - "tx_aborted_errors: %12lu\n", - priv->stats.rx_length_errors, - priv->stats.tx_aborted_errors); - seq_printf(m, - "rx_over_errors: %12lu\t" - "tx_carrier_errors: %12lu\n", - priv->stats.rx_over_errors, - priv->stats.tx_carrier_errors); - seq_printf(m, - "rx_crc_errors: %12lu\t" - "tx_fifo_errors: %12lu\n", - priv->stats.rx_crc_errors, - priv->stats.tx_fifo_errors); - seq_printf(m, - "rx_frame_errors: %12lu\t" - "tx_heartbeat_errors: %12lu\n", - priv->stats.rx_frame_errors, - priv->stats.tx_heartbeat_errors); - seq_printf(m, - "rx_fifo_errors: %12lu\t" - "tx_window_errors: %12lu\n", - priv->stats.rx_fifo_errors, - priv->stats.tx_window_errors); - seq_printf(m, - "rx_missed_errors: %12lu\t" - "multicast: %12lu\n", - priv->stats.rx_missed_errors, - priv->stats.multicast); - } - } - - return 0; -} - -/** - * /proc/octeon_ethernet_stats was openned. Use the single_open iterator - * - * @inode: - * @file: - * Returns - */ -static int cvm_oct_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, cvm_oct_stats_show, NULL); -} - -static const struct file_operations cvm_oct_stats_operations = { - .open = cvm_oct_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void cvm_oct_proc_initialize(void) -{ - struct proc_dir_entry *entry = - create_proc_entry("octeon_ethernet_stats", 0, NULL); - if (entry) - entry->proc_fops = &cvm_oct_stats_operations; -} - -void cvm_oct_proc_shutdown(void) -{ - remove_proc_entry("octeon_ethernet_stats", NULL); -} diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h deleted file mode 100644 index 82c7d9f78bc4..000000000000 --- a/drivers/staging/octeon/ethernet-proc.h +++ /dev/null @@ -1,29 +0,0 @@ -/********************************************************************* - * Author: Cavium Networks - * - * Contact: support@caviumnetworks.com - * This file is part of the OCTEON SDK - * - * Copyright (c) 2003-2007 Cavium Networks - * - * This file is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, Version 2, as - * published by the Free Software Foundation. - * - * This file is distributed in the hope that it will be useful, but - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this file; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * or visit http://www.gnu.org/licenses/. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium Networks for more information -*********************************************************************/ - -void cvm_oct_proc_initialize(void); -void cvm_oct_proc_shutdown(void); diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 45cb4c7d422d..02b63678811a 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -42,8 +42,6 @@ #include "ethernet-tx.h" #include "ethernet-mdio.h" #include "ethernet-util.h" -#include "ethernet-proc.h" - #include "cvmx-pip.h" #include "cvmx-pko.h" @@ -621,7 +619,6 @@ static int __init cvm_oct_init_module(void) return -ENOMEM; } - cvm_oct_proc_initialize(); cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); @@ -828,7 +825,6 @@ static void __exit cvm_oct_cleanup_module(void) destroy_workqueue(cvm_oct_poll_queue); cvmx_pko_shutdown(); - cvm_oct_proc_shutdown(); cvmx_ipd_free_ptr(); -- cgit v1.2.2