diff options
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r-- | drivers/net/chelsio/sge.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8c658cf6f62f..df3a1410696e 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/ip.h> | 53 | #include <linux/ip.h> |
54 | #include <linux/in.h> | 54 | #include <linux/in.h> |
55 | #include <linux/if_arp.h> | 55 | #include <linux/if_arp.h> |
56 | #include <linux/slab.h> | ||
56 | 57 | ||
57 | #include "cpl5_cmd.h" | 58 | #include "cpl5_cmd.h" |
58 | #include "sge.h" | 59 | #include "sge.h" |
@@ -248,7 +249,7 @@ static void restart_sched(unsigned long); | |||
248 | * | 249 | * |
249 | * Interrupts are handled by a single CPU and it is likely that on a MP system | 250 | * Interrupts are handled by a single CPU and it is likely that on a MP system |
250 | * the application is migrated to another CPU. In that scenario, we try to | 251 | * the application is migrated to another CPU. In that scenario, we try to |
251 | * seperate the RX(in irq context) and TX state in order to decrease memory | 252 | * separate the RX(in irq context) and TX state in order to decrease memory |
252 | * contention. | 253 | * contention. |
253 | */ | 254 | */ |
254 | struct sge { | 255 | struct sge { |
@@ -267,7 +268,7 @@ struct sge { | |||
267 | struct sk_buff *espibug_skb[MAX_NPORTS]; | 268 | struct sk_buff *espibug_skb[MAX_NPORTS]; |
268 | u32 sge_control; /* shadow value of sge control reg */ | 269 | u32 sge_control; /* shadow value of sge control reg */ |
269 | struct sge_intr_counts stats; | 270 | struct sge_intr_counts stats; |
270 | struct sge_port_stats *port_stats[MAX_NPORTS]; | 271 | struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; |
271 | struct sched *tx_sched; | 272 | struct sched *tx_sched; |
272 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; | 273 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; |
273 | }; | 274 | }; |
@@ -953,7 +954,7 @@ int t1_sge_intr_error_handler(struct sge *sge) | |||
953 | sge->stats.respQ_empty++; | 954 | sge->stats.respQ_empty++; |
954 | if (cause & F_RESPQ_OVERFLOW) { | 955 | if (cause & F_RESPQ_OVERFLOW) { |
955 | sge->stats.respQ_overflow++; | 956 | sge->stats.respQ_overflow++; |
956 | CH_ALERT("%s: SGE response queue overflow\n", | 957 | pr_alert("%s: SGE response queue overflow\n", |
957 | adapter->name); | 958 | adapter->name); |
958 | } | 959 | } |
959 | if (cause & F_FL_EXHAUSTED) { | 960 | if (cause & F_FL_EXHAUSTED) { |
@@ -962,12 +963,12 @@ int t1_sge_intr_error_handler(struct sge *sge) | |||
962 | } | 963 | } |
963 | if (cause & F_PACKET_TOO_BIG) { | 964 | if (cause & F_PACKET_TOO_BIG) { |
964 | sge->stats.pkt_too_big++; | 965 | sge->stats.pkt_too_big++; |
965 | CH_ALERT("%s: SGE max packet size exceeded\n", | 966 | pr_alert("%s: SGE max packet size exceeded\n", |
966 | adapter->name); | 967 | adapter->name); |
967 | } | 968 | } |
968 | if (cause & F_PACKET_MISMATCH) { | 969 | if (cause & F_PACKET_MISMATCH) { |
969 | sge->stats.pkt_mismatch++; | 970 | sge->stats.pkt_mismatch++; |
970 | CH_ALERT("%s: SGE packet mismatch\n", adapter->name); | 971 | pr_alert("%s: SGE packet mismatch\n", adapter->name); |
971 | } | 972 | } |
972 | if (cause & SGE_INT_FATAL) | 973 | if (cause & SGE_INT_FATAL) |
973 | t1_fatal_err(adapter); | 974 | t1_fatal_err(adapter); |
@@ -1101,7 +1102,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |||
1101 | 1102 | ||
1102 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), | 1103 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), |
1103 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | 1104 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
1104 | CH_ERR("%s: unexpected offload packet, cmd %u\n", | 1105 | pr_err("%s: unexpected offload packet, cmd %u\n", |
1105 | adapter->name, *skb->data); | 1106 | adapter->name, *skb->data); |
1106 | recycle_fl_buf(fl, fl->cidx); | 1107 | recycle_fl_buf(fl, fl->cidx); |
1107 | } | 1108 | } |
@@ -1378,7 +1379,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1378 | } | 1379 | } |
1379 | __skb_pull(skb, sizeof(*p)); | 1380 | __skb_pull(skb, sizeof(*p)); |
1380 | 1381 | ||
1381 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); | 1382 | st = this_cpu_ptr(sge->port_stats[p->iff]); |
1382 | 1383 | ||
1383 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); | 1384 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); |
1384 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | 1385 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
@@ -1687,7 +1688,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |||
1687 | netif_stop_queue(dev); | 1688 | netif_stop_queue(dev); |
1688 | set_bit(dev->if_port, &sge->stopped_tx_queues); | 1689 | set_bit(dev->if_port, &sge->stopped_tx_queues); |
1689 | sge->stats.cmdQ_full[2]++; | 1690 | sge->stats.cmdQ_full[2]++; |
1690 | CH_ERR("%s: Tx ring full while queue awake!\n", | 1691 | pr_err("%s: Tx ring full while queue awake!\n", |
1691 | adapter->name); | 1692 | adapter->name); |
1692 | } | 1693 | } |
1693 | spin_unlock(&q->lock); | 1694 | spin_unlock(&q->lock); |
@@ -1780,8 +1781,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1780 | { | 1781 | { |
1781 | struct adapter *adapter = dev->ml_priv; | 1782 | struct adapter *adapter = dev->ml_priv; |
1782 | struct sge *sge = adapter->sge; | 1783 | struct sge *sge = adapter->sge; |
1783 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], | 1784 | struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); |
1784 | smp_processor_id()); | ||
1785 | struct cpl_tx_pkt *cpl; | 1785 | struct cpl_tx_pkt *cpl; |
1786 | struct sk_buff *orig_skb = skb; | 1786 | struct sk_buff *orig_skb = skb; |
1787 | int ret; | 1787 | int ret; |