aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavid decotigny <david.decotigny@google.com>2011-11-05 10:38:24 -0400
committerDavid S. Miller <davem@davemloft.net>2011-11-07 13:31:25 -0500
commite45a618753d5a8bc9086382f73bbc2d6a3399250 (patch)
tree5d703fb6c245a761d9d6b838ecc7d7c594c97f7b
parent0bdfea8ba856826f5901fda608013f323c87f661 (diff)
forcedeth: fix a few sparse warnings (variable shadowing)
This fixes the following sparse warnings: drivers/net/ethernet/nvidia/forcedeth.c:2113:7: warning: symbol 'size' shadows an earlier one drivers/net/ethernet/nvidia/forcedeth.c:2102:6: originally declared here drivers/net/ethernet/nvidia/forcedeth.c:2155:7: warning: symbol 'size' shadows an earlier one drivers/net/ethernet/nvidia/forcedeth.c:2102:6: originally declared here drivers/net/ethernet/nvidia/forcedeth.c:2227:7: warning: symbol 'size' shadows an earlier one drivers/net/ethernet/nvidia/forcedeth.c:2215:6: originally declared here drivers/net/ethernet/nvidia/forcedeth.c:2271:7: warning: symbol 'size' shadows an earlier one drivers/net/ethernet/nvidia/forcedeth.c:2215:6: originally declared here drivers/net/ethernet/nvidia/forcedeth.c:2986:20: warning: symbol 'addr' shadows an earlier one drivers/net/ethernet/nvidia/forcedeth.c:2963:6: originally declared here Signed-off-by: David Decotigny <david.decotigny@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0c10ff700cbc..1dca57013cb2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2103,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2103 2103
2104 /* add fragments to entries count */ 2104 /* add fragments to entries count */
2105 for (i = 0; i < fragments; i++) { 2105 for (i = 0; i < fragments; i++) {
2106 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2106 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2107 2107
2108 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2108 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2109 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2109 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2110 } 2110 }
2111 2111
2112 spin_lock_irqsave(&np->lock, flags); 2112 spin_lock_irqsave(&np->lock, flags);
@@ -2145,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2145 /* setup the fragments */ 2145 /* setup the fragments */
2146 for (i = 0; i < fragments; i++) { 2146 for (i = 0; i < fragments; i++) {
2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2148 u32 size = skb_frag_size(frag); 2148 u32 frag_size = skb_frag_size(frag);
2149 offset = 0; 2149 offset = 0;
2150 2150
2151 do { 2151 do {
2152 prev_tx = put_tx; 2152 prev_tx = put_tx;
2153 prev_tx_ctx = np->put_tx_ctx; 2153 prev_tx_ctx = np->put_tx_ctx;
2154 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2154 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2155 np->put_tx_ctx->dma = skb_frag_dma_map( 2155 np->put_tx_ctx->dma = skb_frag_dma_map(
2156 &np->pci_dev->dev, 2156 &np->pci_dev->dev,
2157 frag, offset, 2157 frag, offset,
@@ -2163,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2164 2164
2165 offset += bcnt; 2165 offset += bcnt;
2166 size -= bcnt; 2166 frag_size -= bcnt;
2167 if (unlikely(put_tx++ == np->last_tx.orig)) 2167 if (unlikely(put_tx++ == np->last_tx.orig))
2168 put_tx = np->first_tx.orig; 2168 put_tx = np->first_tx.orig;
2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2170 np->put_tx_ctx = np->first_tx_ctx; 2170 np->put_tx_ctx = np->first_tx_ctx;
2171 } while (size); 2171 } while (frag_size);
2172 } 2172 }
2173 2173
2174 /* set last fragment flag */ 2174 /* set last fragment flag */
@@ -2217,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2217 2217
2218 /* add fragments to entries count */ 2218 /* add fragments to entries count */
2219 for (i = 0; i < fragments; i++) { 2219 for (i = 0; i < fragments; i++) {
2220 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2220 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2221 2221
2222 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2222 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2223 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2223 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2224 } 2224 }
2225 2225
2226 spin_lock_irqsave(&np->lock, flags); 2226 spin_lock_irqsave(&np->lock, flags);
@@ -2261,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2261 /* setup the fragments */ 2261 /* setup the fragments */
2262 for (i = 0; i < fragments; i++) { 2262 for (i = 0; i < fragments; i++) {
2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2264 u32 size = skb_frag_size(frag); 2264 u32 frag_size = skb_frag_size(frag);
2265 offset = 0; 2265 offset = 0;
2266 2266
2267 do { 2267 do {
2268 prev_tx = put_tx; 2268 prev_tx = put_tx;
2269 prev_tx_ctx = np->put_tx_ctx; 2269 prev_tx_ctx = np->put_tx_ctx;
2270 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2270 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2271 np->put_tx_ctx->dma = skb_frag_dma_map( 2271 np->put_tx_ctx->dma = skb_frag_dma_map(
2272 &np->pci_dev->dev, 2272 &np->pci_dev->dev,
2273 frag, offset, 2273 frag, offset,
@@ -2280,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2281 2281
2282 offset += bcnt; 2282 offset += bcnt;
2283 size -= bcnt; 2283 frag_size -= bcnt;
2284 if (unlikely(put_tx++ == np->last_tx.ex)) 2284 if (unlikely(put_tx++ == np->last_tx.ex))
2285 put_tx = np->first_tx.ex; 2285 put_tx = np->first_tx.ex;
2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2287 np->put_tx_ctx = np->first_tx_ctx; 2287 np->put_tx_ctx = np->first_tx_ctx;
2288 } while (size); 2288 } while (frag_size);
2289 } 2289 }
2290 2290
2291 /* set last fragment flag */ 2291 /* set last fragment flag */
@@ -2933,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)
2933 struct netdev_hw_addr *ha; 2933 struct netdev_hw_addr *ha;
2934 2934
2935 netdev_for_each_mc_addr(ha, dev) { 2935 netdev_for_each_mc_addr(ha, dev) {
2936 unsigned char *addr = ha->addr; 2936 unsigned char *hw_addr = ha->addr;
2937 u32 a, b; 2937 u32 a, b;
2938 2938
2939 a = le32_to_cpu(*(__le32 *) addr); 2939 a = le32_to_cpu(*(__le32 *) hw_addr);
2940 b = le16_to_cpu(*(__le16 *) (&addr[4])); 2940 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2941 alwaysOn[0] &= a; 2941 alwaysOn[0] &= a;
2942 alwaysOff[0] &= ~a; 2942 alwaysOff[0] &= ~a;
2943 alwaysOn[1] &= b; 2943 alwaysOn[1] &= b;