aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/r8169.c
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2010-10-20 18:25:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:32:46 -0400
commit48addcc9edd551d09063148886bd6f3467d74c15 (patch)
tree1d35941cce516178e20bfb51740cf93b4b1ce83b /drivers/net/r8169.c
parent231aee63c1e270353fc0dc7fd4d5605a96562ec0 (diff)
r8169: use pointer to struct device as local variable
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/r8169.c')
-rw-r--r--drivers/net/r8169.c51
1 files changed, 25 insertions, 26 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 981b195811bb..c36f64264300 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1200,6 +1200,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1200 dma_addr_t paddr; 1200 dma_addr_t paddr;
1201 u32 cmd; 1201 u32 cmd;
1202 int wait = 1000; 1202 int wait = 1000;
1203 struct device *d = &tp->pci_dev->dev;
1203 1204
1204 /* 1205 /*
1205 * Some chips are unable to dump tally counters when the receiver 1206 * Some chips are unable to dump tally counters when the receiver
@@ -1208,8 +1209,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1208 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) 1209 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1209 return; 1210 return;
1210 1211
1211 counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters), 1212 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1212 &paddr, GFP_KERNEL);
1213 if (!counters) 1213 if (!counters)
1214 return; 1214 return;
1215 1215
@@ -1230,8 +1230,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1230 RTL_W32(CounterAddrLow, 0); 1230 RTL_W32(CounterAddrLow, 0);
1231 RTL_W32(CounterAddrHigh, 0); 1231 RTL_W32(CounterAddrHigh, 0);
1232 1232
1233 dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters, 1233 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1234 paddr);
1235} 1234}
1236 1235
1237static void rtl8169_get_ethtool_stats(struct net_device *dev, 1236static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -3945,10 +3944,9 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
3945static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, 3944static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
3946 void **data_buff, struct RxDesc *desc) 3945 void **data_buff, struct RxDesc *desc)
3947{ 3946{
3948 struct pci_dev *pdev = tp->pci_dev; 3947 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
3949
3950 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
3951 DMA_FROM_DEVICE); 3948 DMA_FROM_DEVICE);
3949
3952 kfree(*data_buff); 3950 kfree(*data_buff);
3953 *data_buff = NULL; 3951 *data_buff = NULL;
3954 rtl8169_make_unusable_by_asic(desc); 3952 rtl8169_make_unusable_by_asic(desc);
@@ -3979,6 +3977,7 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3979{ 3977{
3980 void *data; 3978 void *data;
3981 dma_addr_t mapping; 3979 dma_addr_t mapping;
3980 struct device *d = &tp->pci_dev->dev;
3982 struct net_device *dev = tp->dev; 3981 struct net_device *dev = tp->dev;
3983 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 3982 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
3984 3983
@@ -3993,9 +3992,9 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3993 return NULL; 3992 return NULL;
3994 } 3993 }
3995 3994
3996 mapping = dma_map_single(&tp->pci_dev->dev, rtl8169_align(data), rx_buf_sz, 3995 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
3997 DMA_FROM_DEVICE); 3996 DMA_FROM_DEVICE);
3998 if (unlikely(dma_mapping_error(&tp->pci_dev->dev, mapping))) 3997 if (unlikely(dma_mapping_error(d, mapping)))
3999 goto err_out; 3998 goto err_out;
4000 3999
4001 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 4000 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4066,13 +4065,13 @@ static int rtl8169_init_ring(struct net_device *dev)
4066 return rtl8169_rx_fill(tp); 4065 return rtl8169_rx_fill(tp);
4067} 4066}
4068 4067
4069static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, 4068static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
4070 struct TxDesc *desc) 4069 struct TxDesc *desc)
4071{ 4070{
4072 unsigned int len = tx_skb->len; 4071 unsigned int len = tx_skb->len;
4073 4072
4074 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, 4073 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
4075 DMA_TO_DEVICE); 4074
4076 desc->opts1 = 0x00; 4075 desc->opts1 = 0x00;
4077 desc->opts2 = 0x00; 4076 desc->opts2 = 0x00;
4078 desc->addr = 0x00; 4077 desc->addr = 0x00;
@@ -4092,7 +4091,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4092 if (len) { 4091 if (len) {
4093 struct sk_buff *skb = tx_skb->skb; 4092 struct sk_buff *skb = tx_skb->skb;
4094 4093
4095 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, 4094 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
4096 tp->TxDescArray + entry); 4095 tp->TxDescArray + entry);
4097 if (skb) { 4096 if (skb) {
4098 dev_kfree_skb(skb); 4097 dev_kfree_skb(skb);
@@ -4209,6 +4208,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4209 struct skb_shared_info *info = skb_shinfo(skb); 4208 struct skb_shared_info *info = skb_shinfo(skb);
4210 unsigned int cur_frag, entry; 4209 unsigned int cur_frag, entry;
4211 struct TxDesc * uninitialized_var(txd); 4210 struct TxDesc * uninitialized_var(txd);
4211 struct device *d = &tp->pci_dev->dev;
4212 4212
4213 entry = tp->cur_tx; 4213 entry = tp->cur_tx;
4214 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { 4214 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
@@ -4222,9 +4222,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4222 txd = tp->TxDescArray + entry; 4222 txd = tp->TxDescArray + entry;
4223 len = frag->size; 4223 len = frag->size;
4224 addr = ((void *) page_address(frag->page)) + frag->page_offset; 4224 addr = ((void *) page_address(frag->page)) + frag->page_offset;
4225 mapping = dma_map_single(&tp->pci_dev->dev, addr, len, 4225 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4226 DMA_TO_DEVICE); 4226 if (unlikely(dma_mapping_error(d, mapping)))
4227 if (unlikely(dma_mapping_error(&tp->pci_dev->dev, mapping)))
4228 goto err_out; 4227 goto err_out;
4229 4228
4230 /* anti gcc 2.95.3 bugware (sic) */ 4229 /* anti gcc 2.95.3 bugware (sic) */
@@ -4275,6 +4274,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4275 unsigned int entry = tp->cur_tx % NUM_TX_DESC; 4274 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4276 struct TxDesc *txd = tp->TxDescArray + entry; 4275 struct TxDesc *txd = tp->TxDescArray + entry;
4277 void __iomem *ioaddr = tp->mmio_addr; 4276 void __iomem *ioaddr = tp->mmio_addr;
4277 struct device *d = &tp->pci_dev->dev;
4278 dma_addr_t mapping; 4278 dma_addr_t mapping;
4279 u32 status, len; 4279 u32 status, len;
4280 u32 opts1; 4280 u32 opts1;
@@ -4289,9 +4289,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4289 goto err_stop_0; 4289 goto err_stop_0;
4290 4290
4291 len = skb_headlen(skb); 4291 len = skb_headlen(skb);
4292 mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len, 4292 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
4293 DMA_TO_DEVICE); 4293 if (unlikely(dma_mapping_error(d, mapping)))
4294 if (unlikely(dma_mapping_error(&tp->pci_dev->dev, mapping)))
4295 goto err_dma_0; 4294 goto err_dma_0;
4296 4295
4297 tp->tx_skb[entry].len = len; 4296 tp->tx_skb[entry].len = len;
@@ -4332,7 +4331,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4332 return NETDEV_TX_OK; 4331 return NETDEV_TX_OK;
4333 4332
4334err_dma_1: 4333err_dma_1:
4335 rtl8169_unmap_tx_skb(tp->pci_dev, tp->tx_skb + entry, txd); 4334 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
4336err_dma_0: 4335err_dma_0:
4337 dev_kfree_skb(skb); 4336 dev_kfree_skb(skb);
4338 dev->stats.tx_dropped++; 4337 dev->stats.tx_dropped++;
@@ -4414,8 +4413,8 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
4414 dev->stats.tx_bytes += len; 4413 dev->stats.tx_bytes += len;
4415 dev->stats.tx_packets++; 4414 dev->stats.tx_packets++;
4416 4415
4417 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 4416 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
4418 4417 tp->TxDescArray + entry);
4419 if (status & LastFrag) { 4418 if (status & LastFrag) {
4420 dev_kfree_skb(tx_skb->skb); 4419 dev_kfree_skb(tx_skb->skb);
4421 tx_skb->skb = NULL; 4420 tx_skb->skb = NULL;
@@ -4466,16 +4465,16 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
4466 dma_addr_t addr) 4465 dma_addr_t addr)
4467{ 4466{
4468 struct sk_buff *skb; 4467 struct sk_buff *skb;
4468 struct device *d = &tp->pci_dev->dev;
4469 4469
4470 data = rtl8169_align(data); 4470 data = rtl8169_align(data);
4471 dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, 4471 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
4472 DMA_FROM_DEVICE);
4473 prefetch(data); 4472 prefetch(data);
4474 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); 4473 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
4475 if (skb) 4474 if (skb)
4476 memcpy(skb->data, data, pkt_size); 4475 memcpy(skb->data, data, pkt_size);
4477 dma_sync_single_for_device(&tp->pci_dev->dev, addr, pkt_size, 4476 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
4478 DMA_FROM_DEVICE); 4477
4479 return skb; 4478 return skb;
4480} 4479}
4481 4480