aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-19 05:41:25 -0500
committerThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-20 17:11:48 -0500
commit6a20c1758da0220139633238214f0438c90227da (patch)
tree08e8c69ae2b932d1515ea77c374cd51234fcba52 /drivers/net
parentb07812f15e396001b0d0949902d8d633596b093f (diff)
net: mvneta: adjust multiline comments to net/ style
As reported by checkpatch, the multiline comments for net/ and drivers/net/ have a slightly different format than the one used in the rest of the kernel, so we adjust our multiline comments accordingly. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a7826f0a968e..d9dadee6ab79 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -178,8 +178,7 @@
178/* Napi polling weight */ 178/* Napi polling weight */
179#define MVNETA_RX_POLL_WEIGHT 64 179#define MVNETA_RX_POLL_WEIGHT 64
180 180
181/* 181/* The two bytes Marvell header. Either contains a special value used
182 * The two bytes Marvell header. Either contains a special value used
183 * by Marvell switches when a specific hardware mode is enabled (not 182 * by Marvell switches when a specific hardware mode is enabled (not
184 * supported by this driver) or is filled automatically by zeroes on 183 * supported by this driver) or is filled automatically by zeroes on
185 * the RX side. Those two bytes being at the front of the Ethernet 184 * the RX side. Those two bytes being at the front of the Ethernet
@@ -259,8 +258,7 @@ struct mvneta_port {
259 unsigned int speed; 258 unsigned int speed;
260}; 259};
261 260
262/* 261/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
263 * The mvneta_tx_desc and mvneta_rx_desc structures describe the
264 * layout of the transmit and reception DMA descriptors, and their 262 * layout of the transmit and reception DMA descriptors, and their
265 * layout is therefore defined by the hardware design 263 * layout is therefore defined by the hardware design
266 */ 264 */
@@ -318,7 +316,8 @@ struct mvneta_tx_queue {
318 int size; 316 int size;
319 317
320 /* Number of currently used TX DMA descriptor in the 318 /* Number of currently used TX DMA descriptor in the
321 * descriptor ring */ 319 * descriptor ring
320 */
322 int count; 321 int count;
323 322
324 /* Array of transmitted skb */ 323 /* Array of transmitted skb */
@@ -454,8 +453,7 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
454 453
455/* Rx descriptors helper methods */ 454/* Rx descriptors helper methods */
456 455
457/* 456/* Checks whether the given RX descriptor is both the first and the
458 * Checks whether the given RX descriptor is both the first and the
459 * last descriptor for the RX packet. Each RX packet is currently 457 * last descriptor for the RX packet. Each RX packet is currently
460 * received through a single RX descriptor, so not having each RX 458 * received through a single RX descriptor, so not having each RX
461 * descriptor with its first and last bits set is an error 459 * descriptor with its first and last bits set is an error
@@ -472,7 +470,8 @@ static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
472 int ndescs) 470 int ndescs)
473{ 471{
474 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 472 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
475 * be added at once */ 473 * be added at once
474 */
476 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 475 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
477 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 476 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
478 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 477 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
@@ -494,8 +493,7 @@ static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
494 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 493 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
495} 494}
496 495
497/* 496/* Update num of rx desc called upon return from rx path or
498 * Update num of rx desc called upon return from rx path or
499 * from mvneta_rxq_drop_pkts(). 497 * from mvneta_rxq_drop_pkts().
500 */ 498 */
501static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 499static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
@@ -580,7 +578,8 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
580 u32 val; 578 u32 val;
581 579
582 /* Only 255 descriptors can be added at once ; Assume caller 580 /* Only 255 descriptors can be added at once ; Assume caller
583 process TX desriptors in quanta less than 256 */ 581 * process TX desriptors in quanta less than 256
582 */
584 val = pend_desc; 583 val = pend_desc;
585 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 584 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
586} 585}
@@ -596,7 +595,8 @@ mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
596} 595}
597 596
598/* Release the last allocated TX descriptor. Useful to handle DMA 597/* Release the last allocated TX descriptor. Useful to handle DMA
599 * mapping failures in the TX path. */ 598 * mapping failures in the TX path.
599 */
600static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 600static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
601{ 601{
602 if (txq->next_desc_to_proc == 0) 602 if (txq->next_desc_to_proc == 0)
@@ -714,7 +714,8 @@ static void mvneta_port_down(struct mvneta_port *pp)
714 } while (val & 0xff); 714 } while (val & 0xff);
715 715
716 /* Stop Tx port activity. Check port Tx activity. Issue stop 716 /* Stop Tx port activity. Check port Tx activity. Issue stop
717 command for active channels only */ 717 * command for active channels only
718 */
718 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 719 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
719 720
720 if (val != 0) 721 if (val != 0)
@@ -865,7 +866,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
865 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 866 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
866 867
867 /* Set CPU queue access map - all CPUs have access to all RX 868 /* Set CPU queue access map - all CPUs have access to all RX
868 queues and to all TX queues */ 869 * queues and to all TX queues
870 */
869 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) 871 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
870 mvreg_write(pp, MVNETA_CPU_MAP(cpu), 872 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
871 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | 873 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
@@ -1010,9 +1012,8 @@ static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1010 mvneta_set_ucast_addr(pp, addr[5], queue); 1012 mvneta_set_ucast_addr(pp, addr[5], queue);
1011} 1013}
1012 1014
1013/* 1015/* Set the number of packets that will be received before RX interrupt
1014 * Set the number of packets that will be received before 1016 * will be generated by HW.
1015 * RX interrupt will be generated by HW.
1016 */ 1017 */
1017static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1018static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1018 struct mvneta_rx_queue *rxq, u32 value) 1019 struct mvneta_rx_queue *rxq, u32 value)
@@ -1022,9 +1023,8 @@ static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1022 rxq->pkts_coal = value; 1023 rxq->pkts_coal = value;
1023} 1024}
1024 1025
1025/* 1026/* Set the time delay in usec before RX interrupt will be generated by
1026 * Set the time delay in usec before 1027 * HW.
1027 * RX interrupt will be generated by HW.
1028 */ 1028 */
1029static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1029static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1030 struct mvneta_rx_queue *rxq, u32 value) 1030 struct mvneta_rx_queue *rxq, u32 value)
@@ -1102,8 +1102,7 @@ static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1102 return sent_desc; 1102 return sent_desc;
1103} 1103}
1104 1104
1105/* 1105/* Get number of sent descriptors and decrement counter.
1106 * Get number of sent descriptors and decrement counter.
1107 * The number of sent descriptors is returned. 1106 * The number of sent descriptors is returned.
1108 */ 1107 */
1109static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1108static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
@@ -1128,8 +1127,9 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1128 u32 command; 1127 u32 command;
1129 1128
1130 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1129 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1131 G_L4_chk, L4_type; required only for checksum 1130 * G_L4_chk, L4_type; required only for checksum
1132 calculation */ 1131 * calculation
1132 */
1133 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1133 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1134 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1134 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1135 1135
@@ -1305,8 +1305,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1305 return MVNETA_TX_L4_CSUM_NOT; 1305 return MVNETA_TX_L4_CSUM_NOT;
1306} 1306}
1307 1307
1308/* 1308/* Returns rx queue pointer (find last set bit) according to causeRxTx
1309 * Returns rx queue pointer (find last set bit) according to causeRxTx
1310 * value 1309 * value
1311 */ 1310 */
1312static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, 1311static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
@@ -1454,7 +1453,8 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1454 1453
1455error: 1454error:
1456 /* Release all descriptors that were used to map fragments of 1455 /* Release all descriptors that were used to map fragments of
1457 * this packet, as well as the corresponding DMA mappings */ 1456 * this packet, as well as the corresponding DMA mappings
1457 */
1458 for (i = i - 1; i >= 0; i--) { 1458 for (i = i - 1; i >= 0; i--) {
1459 tx_desc = txq->descs + i; 1459 tx_desc = txq->descs + i;
1460 dma_unmap_single(pp->dev->dev.parent, 1460 dma_unmap_single(pp->dev->dev.parent,
@@ -1546,7 +1546,8 @@ out:
1546 mvneta_txq_done(pp, txq); 1546 mvneta_txq_done(pp, txq);
1547 1547
1548 /* If after calling mvneta_txq_done, count equals 1548 /* If after calling mvneta_txq_done, count equals
1549 frags, we need to set the timer */ 1549 * frags, we need to set the timer
1550 */
1550 if (txq->count == frags && frags > 0) 1551 if (txq->count == frags && frags > 0)
1551 mvneta_add_tx_done_timer(pp); 1552 mvneta_add_tx_done_timer(pp);
1552 1553
@@ -1598,8 +1599,7 @@ static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1598 return tx_done; 1599 return tx_done;
1599} 1600}
1600 1601
1601/* 1602/* Compute crc8 of the specified address, using a unique algorithm ,
1602 * Compute crc8 of the specified address, using a unique algorithm ,
1603 * according to hw spec, different than generic crc8 algorithm 1603 * according to hw spec, different than generic crc8 algorithm
1604 */ 1604 */
1605static int mvneta_addr_crc(unsigned char *addr) 1605static int mvneta_addr_crc(unsigned char *addr)
@@ -1828,8 +1828,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
1828 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 1828 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1829 MVNETA_RX_INTR_MASK(rxq_number); 1829 MVNETA_RX_INTR_MASK(rxq_number);
1830 1830
1831 /* 1831 /* For the case where the last mvneta_poll did not process all
1832 * For the case where the last mvneta_poll did not process all
1833 * RX packets 1832 * RX packets
1834 */ 1833 */
1835 cause_rx_tx |= pp->cause_rx_tx; 1834 cause_rx_tx |= pp->cause_rx_tx;
@@ -1847,10 +1846,12 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
1847 rx_done += count; 1846 rx_done += count;
1848 budget -= count; 1847 budget -= count;
1849 if (budget > 0) { 1848 if (budget > 0) {
1850 /* set off the rx bit of the corresponding bit 1849 /* set off the rx bit of the
1851 in the cause rx tx register, so that next 1850 * corresponding bit in the cause rx
1852 iteration will find the next rx queue where 1851 * tx register, so that next iteration
1853 packets are received on */ 1852 * will find the next rx queue where
1853 * packets are received on
1854 */
1854 cause_rx_tx &= ~((1 << rxq->id) << 8); 1855 cause_rx_tx &= ~((1 << rxq->id) << 8);
1855 } 1856 }
1856 } 1857 }
@@ -1925,7 +1926,8 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1925 } 1926 }
1926 1927
1927 /* Add this number of RX descriptors as non occupied (ready to 1928 /* Add this number of RX descriptors as non occupied (ready to
1928 get packets) */ 1929 * get packets)
1930 */
1929 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 1931 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1930 1932
1931 return i; 1933 return i;
@@ -2201,7 +2203,7 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2201 return -EINVAL; 2203 return -EINVAL;
2202 } 2204 }
2203 2205
2204 /* 9676 == 9700 - 20 and rounding to 8 */ 2206 /* 9676 == 9700 - 20 and rounding to 8 */
2205 if (mtu > 9676) { 2207 if (mtu > 9676) {
2206 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); 2208 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2207 mtu = 9676; 2209 mtu = 9676;
@@ -2231,8 +2233,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2231 if (!netif_running(dev)) 2233 if (!netif_running(dev))
2232 return 0; 2234 return 0;
2233 2235
2234 /* 2236 /* The interface is running, so we have to force a
2235 * The interface is running, so we have to force a
2236 * reallocation of the RXQs 2237 * reallocation of the RXQs
2237 */ 2238 */
2238 mvneta_stop_dev(pp); 2239 mvneta_stop_dev(pp);
@@ -2677,8 +2678,7 @@ static int __devinit mvneta_probe(struct platform_device *pdev)
2677 int phy_mode; 2678 int phy_mode;
2678 int err; 2679 int err;
2679 2680
2680 /* 2681 /* Our multiqueue support is not complete, so for now, only
2681 * Our multiqueue support is not complete, so for now, only
2682 * allow the usage of the first RX queue 2682 * allow the usage of the first RX queue
2683 */ 2683 */
2684 if (rxq_def != 0) { 2684 if (rxq_def != 0) {