aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/sky2.c548
-rw-r--r--drivers/net/sky2.h62
-rw-r--r--drivers/net/spider_net.c6
-rw-r--r--drivers/net/wan/pc300.h1
5 files changed, 361 insertions, 260 deletions
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index db16fee40a5f..dc434fb6da85 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.1.0-test" 25#define DRV_VERSION "3.1.1"
26#define DRV_RELDATE "September 9, 2006" 26#define DRV_RELDATE "September 26, 2006"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7eeefa2d6c89..396e7df3c61b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,19 +50,18 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.7" 53#define DRV_VERSION "1.9"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
57 * The Yukon II chipset takes 64 bit command blocks (called list elements) 57 * The Yukon II chipset takes 64 bit command blocks (called list elements)
58 * that are organized into three (receive, transmit, status) different rings 58 * that are organized into three (receive, transmit, status) different rings
59 * similar to Tigon3. A transmit can require several elements; 59 * similar to Tigon3.
60 * a receive requires one (or two if using 64 bit dma).
61 */ 60 */
62 61
63#define RX_LE_SIZE 512 62#define RX_LE_SIZE 1024
64#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 63#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
65#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 64#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
66#define RX_DEF_PENDING RX_MAX_PENDING 65#define RX_DEF_PENDING RX_MAX_PENDING
67#define RX_SKB_ALIGN 8 66#define RX_SKB_ALIGN 8
68#define RX_BUF_WRITE 16 67#define RX_BUF_WRITE 16
@@ -74,7 +73,6 @@
74 73
75#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ 74#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
76#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 75#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
77#define ETH_JUMBO_MTU 9000
78#define TX_WATCHDOG (5 * HZ) 76#define TX_WATCHDOG (5 * HZ)
79#define NAPI_WEIGHT 64 77#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 78#define PHY_RETRIES 1000
@@ -90,7 +88,7 @@ static int debug = -1; /* defaults above */
90module_param(debug, int, 0); 88module_param(debug, int, 0);
91MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 89MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92 90
93static int copybreak __read_mostly = 256; 91static int copybreak __read_mostly = 128;
94module_param(copybreak, int, 0); 92module_param(copybreak, int, 0);
95MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 93MODULE_PARM_DESC(copybreak, "Receive copy threshold");
96 94
@@ -769,9 +767,16 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
769 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 767 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
770 768
771 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); 769 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
770 le->ctrl = 0;
772 return le; 771 return le;
773} 772}
774 773
774static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
775 struct sky2_tx_le *le)
776{
777 return sky2->tx_ring + (le - sky2->tx_le);
778}
779
775/* Update chip's next pointer */ 780/* Update chip's next pointer */
776static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) 781static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
777{ 782{
@@ -786,6 +791,7 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
786{ 791{
787 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 792 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
788 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); 793 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
794 le->ctrl = 0;
789 return le; 795 return le;
790} 796}
791 797
@@ -795,17 +801,16 @@ static inline u32 high32(dma_addr_t a)
795 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0; 801 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
796} 802}
797 803
798/* Build description to hardware about buffer */ 804/* Build description to hardware for one receive segment */
799static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) 805static void sky2_rx_add(struct sky2_port *sky2, u8 op,
806 dma_addr_t map, unsigned len)
800{ 807{
801 struct sky2_rx_le *le; 808 struct sky2_rx_le *le;
802 u32 hi = high32(map); 809 u32 hi = high32(map);
803 u16 len = sky2->rx_bufsize;
804 810
805 if (sky2->rx_addr64 != hi) { 811 if (sky2->rx_addr64 != hi) {
806 le = sky2_next_rx(sky2); 812 le = sky2_next_rx(sky2);
807 le->addr = cpu_to_le32(hi); 813 le->addr = cpu_to_le32(hi);
808 le->ctrl = 0;
809 le->opcode = OP_ADDR64 | HW_OWNER; 814 le->opcode = OP_ADDR64 | HW_OWNER;
810 sky2->rx_addr64 = high32(map + len); 815 sky2->rx_addr64 = high32(map + len);
811 } 816 }
@@ -813,11 +818,53 @@ static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
813 le = sky2_next_rx(sky2); 818 le = sky2_next_rx(sky2);
814 le->addr = cpu_to_le32((u32) map); 819 le->addr = cpu_to_le32((u32) map);
815 le->length = cpu_to_le16(len); 820 le->length = cpu_to_le16(len);
816 le->ctrl = 0; 821 le->opcode = op | HW_OWNER;
817 le->opcode = OP_PACKET | HW_OWNER; 822}
823
824/* Build description to hardware for one possibly fragmented skb */
825static void sky2_rx_submit(struct sky2_port *sky2,
826 const struct rx_ring_info *re)
827{
828 int i;
829
830 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
831
832 for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
833 sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
818} 834}
819 835
820 836
837static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
838 unsigned size)
839{
840 struct sk_buff *skb = re->skb;
841 int i;
842
843 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
844 pci_unmap_len_set(re, data_size, size);
845
846 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
847 re->frag_addr[i] = pci_map_page(pdev,
848 skb_shinfo(skb)->frags[i].page,
849 skb_shinfo(skb)->frags[i].page_offset,
850 skb_shinfo(skb)->frags[i].size,
851 PCI_DMA_FROMDEVICE);
852}
853
854static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
855{
856 struct sk_buff *skb = re->skb;
857 int i;
858
859 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
860 PCI_DMA_FROMDEVICE);
861
862 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
863 pci_unmap_page(pdev, re->frag_addr[i],
864 skb_shinfo(skb)->frags[i].size,
865 PCI_DMA_FROMDEVICE);
866}
867
821/* Tell chip where to start receive checksum. 868/* Tell chip where to start receive checksum.
822 * Actually has two checksums, but set both same to avoid possible byte 869 * Actually has two checksums, but set both same to avoid possible byte
823 * order problems. 870 * order problems.
@@ -877,12 +924,10 @@ static void sky2_rx_clean(struct sky2_port *sky2)
877 924
878 memset(sky2->rx_le, 0, RX_LE_BYTES); 925 memset(sky2->rx_le, 0, RX_LE_BYTES);
879 for (i = 0; i < sky2->rx_pending; i++) { 926 for (i = 0; i < sky2->rx_pending; i++) {
880 struct ring_info *re = sky2->rx_ring + i; 927 struct rx_ring_info *re = sky2->rx_ring + i;
881 928
882 if (re->skb) { 929 if (re->skb) {
883 pci_unmap_single(sky2->hw->pdev, 930 sky2_rx_unmap_skb(sky2->hw->pdev, re);
884 re->mapaddr, sky2->rx_bufsize,
885 PCI_DMA_FROMDEVICE);
886 kfree_skb(re->skb); 931 kfree_skb(re->skb);
887 re->skb = NULL; 932 re->skb = NULL;
888 } 933 }
@@ -936,13 +981,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
936 struct sky2_hw *hw = sky2->hw; 981 struct sky2_hw *hw = sky2->hw;
937 u16 port = sky2->port; 982 u16 port = sky2->port;
938 983
939 spin_lock_bh(&sky2->tx_lock); 984 netif_tx_lock_bh(dev);
940 985
941 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); 986 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
942 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); 987 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
943 sky2->vlgrp = grp; 988 sky2->vlgrp = grp;
944 989
945 spin_unlock_bh(&sky2->tx_lock); 990 netif_tx_unlock_bh(dev);
946} 991}
947 992
948static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 993static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -951,50 +996,69 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
951 struct sky2_hw *hw = sky2->hw; 996 struct sky2_hw *hw = sky2->hw;
952 u16 port = sky2->port; 997 u16 port = sky2->port;
953 998
954 spin_lock_bh(&sky2->tx_lock); 999 netif_tx_lock_bh(dev);
955 1000
956 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1001 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
957 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1002 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
958 if (sky2->vlgrp) 1003 if (sky2->vlgrp)
959 sky2->vlgrp->vlan_devices[vid] = NULL; 1004 sky2->vlgrp->vlan_devices[vid] = NULL;
960 1005
961 spin_unlock_bh(&sky2->tx_lock); 1006 netif_tx_unlock_bh(dev);
962} 1007}
963#endif 1008#endif
964 1009
965/* 1010/*
1011 * Allocate an skb for receiving. If the MTU is large enough
1012 * make the skb non-linear with a fragment list of pages.
1013 *
966 * It appears the hardware has a bug in the FIFO logic that 1014 * It appears the hardware has a bug in the FIFO logic that
967 * cause it to hang if the FIFO gets overrun and the receive buffer 1015 * cause it to hang if the FIFO gets overrun and the receive buffer
968 * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is 1016 * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
969 * aligned except if slab debugging is enabled. 1017 * aligned except if slab debugging is enabled.
970 */ 1018 */
971static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev, 1019static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
972 unsigned int length,
973 gfp_t gfp_mask)
974{ 1020{
975 struct sk_buff *skb; 1021 struct sk_buff *skb;
1022 unsigned long p;
1023 int i;
976 1024
977 skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask); 1025 skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + RX_SKB_ALIGN);
978 if (likely(skb)) { 1026 if (!skb)
979 unsigned long p = (unsigned long) skb->data; 1027 goto nomem;
980 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p); 1028
1029 p = (unsigned long) skb->data;
1030 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
1031
1032 for (i = 0; i < sky2->rx_nfrags; i++) {
1033 struct page *page = alloc_page(GFP_ATOMIC);
1034
1035 if (!page)
1036 goto free_partial;
1037 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
981 } 1038 }
982 1039
983 return skb; 1040 return skb;
1041free_partial:
1042 kfree_skb(skb);
1043nomem:
1044 return NULL;
984} 1045}
985 1046
986/* 1047/*
987 * Allocate and setup receiver buffer pool. 1048 * Allocate and setup receiver buffer pool.
988 * In case of 64 bit dma, there are 2X as many list elements 1049 * Normal case this ends up creating one list element for skb
989 * available as ring entries 1050 * in the receive ring. Worst case if using large MTU and each
990 * and need to reserve one list element so we don't wrap around. 1051 * allocation falls on a different 64 bit region, that results
1052 * in 6 list elements per ring entry.
1053 * One element is used for checksum enable/disable, and one
1054 * extra to avoid wrap.
991 */ 1055 */
992static int sky2_rx_start(struct sky2_port *sky2) 1056static int sky2_rx_start(struct sky2_port *sky2)
993{ 1057{
994 struct sky2_hw *hw = sky2->hw; 1058 struct sky2_hw *hw = sky2->hw;
1059 struct rx_ring_info *re;
995 unsigned rxq = rxqaddr[sky2->port]; 1060 unsigned rxq = rxqaddr[sky2->port];
996 int i; 1061 unsigned i, size, space, thresh;
997 unsigned thresh;
998 1062
999 sky2->rx_put = sky2->rx_next = 0; 1063 sky2->rx_put = sky2->rx_next = 0;
1000 sky2_qset(hw, rxq); 1064 sky2_qset(hw, rxq);
@@ -1007,27 +1071,56 @@ static int sky2_rx_start(struct sky2_port *sky2)
1007 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 1071 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1008 1072
1009 rx_set_checksum(sky2); 1073 rx_set_checksum(sky2);
1074
1075 /* Space needed for frame data + headers rounded up */
1076 size = ALIGN(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8)
1077 + 8;
1078
1079 /* Stopping point for hardware truncation */
1080 thresh = (size - 8) / sizeof(u32);
1081
1082 /* Account for overhead of skb - to avoid order > 0 allocation */
1083 space = SKB_DATA_ALIGN(size) + NET_SKB_PAD
1084 + sizeof(struct skb_shared_info);
1085
1086 sky2->rx_nfrags = space >> PAGE_SHIFT;
1087 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1088
1089 if (sky2->rx_nfrags != 0) {
1090 /* Compute residue after pages */
1091 space = sky2->rx_nfrags << PAGE_SHIFT;
1092
1093 if (space < size)
1094 size -= space;
1095 else
1096 size = 0;
1097
1098 /* Optimize to handle small packets and headers */
1099 if (size < copybreak)
1100 size = copybreak;
1101 if (size < ETH_HLEN)
1102 size = ETH_HLEN;
1103 }
1104 sky2->rx_data_size = size;
1105
1106 /* Fill Rx ring */
1010 for (i = 0; i < sky2->rx_pending; i++) { 1107 for (i = 0; i < sky2->rx_pending; i++) {
1011 struct ring_info *re = sky2->rx_ring + i; 1108 re = sky2->rx_ring + i;
1012 1109
1013 re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize, 1110 re->skb = sky2_rx_alloc(sky2);
1014 GFP_KERNEL);
1015 if (!re->skb) 1111 if (!re->skb)
1016 goto nomem; 1112 goto nomem;
1017 1113
1018 re->mapaddr = pci_map_single(hw->pdev, re->skb->data, 1114 sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size);
1019 sky2->rx_bufsize, PCI_DMA_FROMDEVICE); 1115 sky2_rx_submit(sky2, re);
1020 sky2_rx_add(sky2, re->mapaddr);
1021 } 1116 }
1022 1117
1023
1024 /* 1118 /*
1025 * The receiver hangs if it receives frames larger than the 1119 * The receiver hangs if it receives frames larger than the
1026 * packet buffer. As a workaround, truncate oversize frames, but 1120 * packet buffer. As a workaround, truncate oversize frames, but
1027 * the register is limited to 9 bits, so if you do frames > 2052 1121 * the register is limited to 9 bits, so if you do frames > 2052
1028 * you better get the MTU right! 1122 * you better get the MTU right!
1029 */ 1123 */
1030 thresh = (sky2->rx_bufsize - 8) / sizeof(u32);
1031 if (thresh > 0x1ff) 1124 if (thresh > 0x1ff)
1032 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); 1125 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1033 else { 1126 else {
@@ -1035,7 +1128,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
1035 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); 1128 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1036 } 1129 }
1037 1130
1038
1039 /* Tell chip about available buffers */ 1131 /* Tell chip about available buffers */
1040 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 1132 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1041 return 0; 1133 return 0;
@@ -1094,7 +1186,7 @@ static int sky2_up(struct net_device *dev)
1094 goto err_out; 1186 goto err_out;
1095 memset(sky2->rx_le, 0, RX_LE_BYTES); 1187 memset(sky2->rx_le, 0, RX_LE_BYTES);
1096 1188
1097 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct ring_info), 1189 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1098 GFP_KERNEL); 1190 GFP_KERNEL);
1099 if (!sky2->rx_ring) 1191 if (!sky2->rx_ring)
1100 goto err_out; 1192 goto err_out;
@@ -1124,7 +1216,8 @@ static int sky2_up(struct net_device *dev)
1124 sky2_qset(hw, txqaddr[port]); 1216 sky2_qset(hw, txqaddr[port]);
1125 1217
1126 /* Set almost empty threshold */ 1218 /* Set almost empty threshold */
1127 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1) 1219 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1220 && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1128 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); 1221 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1129 1222
1130 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1223 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
@@ -1195,8 +1288,6 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1195 * A single packet can generate multiple list elements, and 1288 * A single packet can generate multiple list elements, and
1196 * the number of ring elements will probably be less than the number 1289 * the number of ring elements will probably be less than the number
1197 * of list elements used. 1290 * of list elements used.
1198 *
1199 * No BH disabling for tx_lock here (like tg3)
1200 */ 1291 */
1201static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) 1292static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1202{ 1293{
@@ -1210,27 +1301,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1210 u16 mss; 1301 u16 mss;
1211 u8 ctrl; 1302 u8 ctrl;
1212 1303
1213 /* No BH disabling for tx_lock here. We are running in BH disabled 1304 if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1214 * context and TX reclaim runs via poll inside of a software 1305 return NETDEV_TX_BUSY;
1215 * interrupt, and no related locks in IRQ processing.
1216 */
1217 if (!spin_trylock(&sky2->tx_lock))
1218 return NETDEV_TX_LOCKED;
1219
1220 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1221 /* There is a known but harmless race with lockless tx
1222 * and netif_stop_queue.
1223 */
1224 if (!netif_queue_stopped(dev)) {
1225 netif_stop_queue(dev);
1226 if (net_ratelimit())
1227 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1228 dev->name);
1229 }
1230 spin_unlock(&sky2->tx_lock);
1231
1232 return NETDEV_TX_BUSY;
1233 }
1234 1306
1235 if (unlikely(netif_msg_tx_queued(sky2))) 1307 if (unlikely(netif_msg_tx_queued(sky2)))
1236 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", 1308 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
@@ -1240,13 +1312,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1240 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 1312 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1241 addr64 = high32(mapping); 1313 addr64 = high32(mapping);
1242 1314
1243 re = sky2->tx_ring + sky2->tx_prod;
1244
1245 /* Send high bits if changed or crosses boundary */ 1315 /* Send high bits if changed or crosses boundary */
1246 if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) { 1316 if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
1247 le = get_tx_le(sky2); 1317 le = get_tx_le(sky2);
1248 le->addr = cpu_to_le32(addr64); 1318 le->addr = cpu_to_le32(addr64);
1249 le->ctrl = 0;
1250 le->opcode = OP_ADDR64 | HW_OWNER; 1319 le->opcode = OP_ADDR64 | HW_OWNER;
1251 sky2->tx_addr64 = high32(mapping + len); 1320 sky2->tx_addr64 = high32(mapping + len);
1252 } 1321 }
@@ -1262,7 +1331,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1262 le = get_tx_le(sky2); 1331 le = get_tx_le(sky2);
1263 le->addr = cpu_to_le32(mss); 1332 le->addr = cpu_to_le32(mss);
1264 le->opcode = OP_LRGLEN | HW_OWNER; 1333 le->opcode = OP_LRGLEN | HW_OWNER;
1265 le->ctrl = 0;
1266 sky2->tx_last_mss = mss; 1334 sky2->tx_last_mss = mss;
1267 } 1335 }
1268 } 1336 }
@@ -1275,7 +1343,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1275 le = get_tx_le(sky2); 1343 le = get_tx_le(sky2);
1276 le->addr = 0; 1344 le->addr = 0;
1277 le->opcode = OP_VLAN|HW_OWNER; 1345 le->opcode = OP_VLAN|HW_OWNER;
1278 le->ctrl = 0;
1279 } else 1346 } else
1280 le->opcode |= OP_VLAN; 1347 le->opcode |= OP_VLAN;
1281 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1348 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
@@ -1312,13 +1379,13 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1312 le->ctrl = ctrl; 1379 le->ctrl = ctrl;
1313 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); 1380 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1314 1381
1315 /* Record the transmit mapping info */ 1382 re = tx_le_re(sky2, le);
1316 re->skb = skb; 1383 re->skb = skb;
1317 pci_unmap_addr_set(re, mapaddr, mapping); 1384 pci_unmap_addr_set(re, mapaddr, mapping);
1385 pci_unmap_len_set(re, maplen, len);
1318 1386
1319 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1387 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1320 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1388 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1321 struct tx_ring_info *fre;
1322 1389
1323 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1390 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1324 frag->size, PCI_DMA_TODEVICE); 1391 frag->size, PCI_DMA_TODEVICE);
@@ -1337,12 +1404,12 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1337 le->ctrl = ctrl; 1404 le->ctrl = ctrl;
1338 le->opcode = OP_BUFFER | HW_OWNER; 1405 le->opcode = OP_BUFFER | HW_OWNER;
1339 1406
1340 fre = sky2->tx_ring 1407 re = tx_le_re(sky2, le);
1341 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); 1408 re->skb = skb;
1342 pci_unmap_addr_set(fre, mapaddr, mapping); 1409 pci_unmap_addr_set(re, mapaddr, mapping);
1410 pci_unmap_len_set(re, maplen, frag->size);
1343 } 1411 }
1344 1412
1345 re->idx = sky2->tx_prod;
1346 le->ctrl |= EOP; 1413 le->ctrl |= EOP;
1347 1414
1348 if (tx_avail(sky2) <= MAX_SKB_TX_LE) 1415 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
@@ -1350,8 +1417,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1350 1417
1351 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); 1418 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1352 1419
1353 spin_unlock(&sky2->tx_lock);
1354
1355 dev->trans_start = jiffies; 1420 dev->trans_start = jiffies;
1356 return NETDEV_TX_OK; 1421 return NETDEV_TX_OK;
1357} 1422}
@@ -1360,59 +1425,59 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1360 * Free ring elements from starting at tx_cons until "done" 1425 * Free ring elements from starting at tx_cons until "done"
1361 * 1426 *
1362 * NB: the hardware will tell us about partial completion of multi-part 1427 * NB: the hardware will tell us about partial completion of multi-part
1363 * buffers; these are deferred until completion. 1428 * buffers so make sure not to free skb to early.
1364 */ 1429 */
1365static void sky2_tx_complete(struct sky2_port *sky2, u16 done) 1430static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1366{ 1431{
1367 struct net_device *dev = sky2->netdev; 1432 struct net_device *dev = sky2->netdev;
1368 struct pci_dev *pdev = sky2->hw->pdev; 1433 struct pci_dev *pdev = sky2->hw->pdev;
1369 u16 nxt, put; 1434 unsigned idx;
1370 unsigned i;
1371 1435
1372 BUG_ON(done >= TX_RING_SIZE); 1436 BUG_ON(done >= TX_RING_SIZE);
1373 1437
1374 if (unlikely(netif_msg_tx_done(sky2))) 1438 for (idx = sky2->tx_cons; idx != done;
1375 printk(KERN_DEBUG "%s: tx done, up to %u\n", 1439 idx = RING_NEXT(idx, TX_RING_SIZE)) {
1376 dev->name, done); 1440 struct sky2_tx_le *le = sky2->tx_le + idx;
1377 1441 struct tx_ring_info *re = sky2->tx_ring + idx;
1378 for (put = sky2->tx_cons; put != done; put = nxt) { 1442
1379 struct tx_ring_info *re = sky2->tx_ring + put; 1443 switch(le->opcode & ~HW_OWNER) {
1380 struct sk_buff *skb = re->skb; 1444 case OP_LARGESEND:
1381 1445 case OP_PACKET:
1382 nxt = re->idx; 1446 pci_unmap_single(pdev,
1383 BUG_ON(nxt >= TX_RING_SIZE); 1447 pci_unmap_addr(re, mapaddr),
1384 prefetch(sky2->tx_ring + nxt); 1448 pci_unmap_len(re, maplen),
1385 1449 PCI_DMA_TODEVICE);
1386 /* Check for partial status */
1387 if (tx_dist(put, done) < tx_dist(put, nxt))
1388 break; 1450 break;
1389 1451 case OP_BUFFER:
1390 skb = re->skb; 1452 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1391 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1453 pci_unmap_len(re, maplen),
1392 skb_headlen(skb), PCI_DMA_TODEVICE);
1393
1394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1395 struct tx_ring_info *fre;
1396 fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1397 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1398 skb_shinfo(skb)->frags[i].size,
1399 PCI_DMA_TODEVICE); 1454 PCI_DMA_TODEVICE);
1455 break;
1400 } 1456 }
1401 1457
1402 dev_kfree_skb(skb); 1458 if (le->ctrl & EOP) {
1459 if (unlikely(netif_msg_tx_done(sky2)))
1460 printk(KERN_DEBUG "%s: tx done %u\n",
1461 dev->name, idx);
1462 dev_kfree_skb(re->skb);
1463 }
1464
1465 le->opcode = 0; /* paranoia */
1403 } 1466 }
1404 1467
1405 sky2->tx_cons = put; 1468 sky2->tx_cons = idx;
1406 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 1469 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1407 netif_wake_queue(dev); 1470 netif_wake_queue(dev);
1408} 1471}
1409 1472
1410/* Cleanup all untransmitted buffers, assume transmitter not running */ 1473/* Cleanup all untransmitted buffers, assume transmitter not running */
1411static void sky2_tx_clean(struct sky2_port *sky2) 1474static void sky2_tx_clean(struct net_device *dev)
1412{ 1475{
1413 spin_lock_bh(&sky2->tx_lock); 1476 struct sky2_port *sky2 = netdev_priv(dev);
1477
1478 netif_tx_lock_bh(dev);
1414 sky2_tx_complete(sky2, sky2->tx_prod); 1479 sky2_tx_complete(sky2, sky2->tx_prod);
1415 spin_unlock_bh(&sky2->tx_lock); 1480 netif_tx_unlock_bh(dev);
1416} 1481}
1417 1482
1418/* Network shutdown */ 1483/* Network shutdown */
@@ -1443,6 +1508,13 @@ static int sky2_down(struct net_device *dev)
1443 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 1508 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1444 RB_RST_SET | RB_DIS_OP_MD); 1509 RB_RST_SET | RB_DIS_OP_MD);
1445 1510
1511 /* WA for dev. #4.209 */
1512 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1513 && hw->chip_rev == CHIP_REV_YU_EC_U_A1)
1514 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1515 sky2->speed != SPEED_1000 ?
1516 TX_STFW_ENA : TX_STFW_DIS);
1517
1446 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1518 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1447 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); 1519 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1448 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1520 gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -1489,7 +1561,7 @@ static int sky2_down(struct net_device *dev)
1489 1561
1490 synchronize_irq(hw->pdev->irq); 1562 synchronize_irq(hw->pdev->irq);
1491 1563
1492 sky2_tx_clean(sky2); 1564 sky2_tx_clean(dev);
1493 sky2_rx_clean(sky2); 1565 sky2_rx_clean(sky2);
1494 1566
1495 pci_free_consistent(hw->pdev, RX_LE_BYTES, 1567 pci_free_consistent(hw->pdev, RX_LE_BYTES,
@@ -1624,22 +1696,33 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1624 return -1; 1696 return -1;
1625 } 1697 }
1626 1698
1627 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1628 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1629 printk(KERN_ERR PFX "%s: master/slave fault",
1630 sky2->netdev->name);
1631 return -1;
1632 }
1633
1634 if (!(aux & PHY_M_PS_SPDUP_RES)) { 1699 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1635 printk(KERN_ERR PFX "%s: speed/duplex mismatch", 1700 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1636 sky2->netdev->name); 1701 sky2->netdev->name);
1637 return -1; 1702 return -1;
1638 } 1703 }
1639 1704
1640 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1641
1642 sky2->speed = sky2_phy_speed(hw, aux); 1705 sky2->speed = sky2_phy_speed(hw, aux);
1706 if (sky2->speed == SPEED_1000) {
1707 u16 ctl2 = gm_phy_read(hw, port, PHY_MARV_1000T_CTRL);
1708 u16 lpa2 = gm_phy_read(hw, port, PHY_MARV_1000T_STAT);
1709 if (lpa2 & PHY_B_1000S_MSF) {
1710 printk(KERN_ERR PFX "%s: master/slave fault",
1711 sky2->netdev->name);
1712 return -1;
1713 }
1714
1715 if ((ctl2 & PHY_M_1000C_AFD) && (lpa2 & PHY_B_1000S_LP_FD))
1716 sky2->duplex = DUPLEX_FULL;
1717 else
1718 sky2->duplex = DUPLEX_HALF;
1719 } else {
1720 u16 adv = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1721 if ((aux & adv) & PHY_AN_FULL)
1722 sky2->duplex = DUPLEX_FULL;
1723 else
1724 sky2->duplex = DUPLEX_HALF;
1725 }
1643 1726
1644 /* Pause bits are offset (9..8) */ 1727 /* Pause bits are offset (9..8) */
1645 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) 1728 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
@@ -1730,31 +1813,22 @@ static void sky2_tx_timeout(struct net_device *dev)
1730 } else if (report != sky2->tx_cons) { 1813 } else if (report != sky2->tx_cons) {
1731 printk(KERN_INFO PFX "status report lost?\n"); 1814 printk(KERN_INFO PFX "status report lost?\n");
1732 1815
1733 spin_lock_bh(&sky2->tx_lock); 1816 netif_tx_lock_bh(dev);
1734 sky2_tx_complete(sky2, report); 1817 sky2_tx_complete(sky2, report);
1735 spin_unlock_bh(&sky2->tx_lock); 1818 netif_tx_unlock_bh(dev);
1736 } else { 1819 } else {
1737 printk(KERN_INFO PFX "hardware hung? flushing\n"); 1820 printk(KERN_INFO PFX "hardware hung? flushing\n");
1738 1821
1739 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1822 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1740 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1823 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1741 1824
1742 sky2_tx_clean(sky2); 1825 sky2_tx_clean(dev);
1743 1826
1744 sky2_qset(hw, txq); 1827 sky2_qset(hw, txq);
1745 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1828 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1746 } 1829 }
1747} 1830}
1748 1831
1749
1750/* Want receive buffer size to be multiple of 64 bits
1751 * and incl room for vlan and truncation
1752 */
1753static inline unsigned sky2_buf_size(int mtu)
1754{
1755 return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1756}
1757
1758static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1832static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1759{ 1833{
1760 struct sky2_port *sky2 = netdev_priv(dev); 1834 struct sky2_port *sky2 = netdev_priv(dev);
@@ -1789,7 +1863,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1789 sky2_rx_clean(sky2); 1863 sky2_rx_clean(sky2);
1790 1864
1791 dev->mtu = new_mtu; 1865 dev->mtu = new_mtu;
1792 sky2->rx_bufsize = sky2_buf_size(new_mtu); 1866
1793 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | 1867 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1794 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 1868 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1795 1869
@@ -1815,16 +1889,100 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1815 return err; 1889 return err;
1816} 1890}
1817 1891
1892/* For small just reuse existing skb for next receive */
1893static struct sk_buff *receive_copy(struct sky2_port *sky2,
1894 const struct rx_ring_info *re,
1895 unsigned length)
1896{
1897 struct sk_buff *skb;
1898
1899 skb = netdev_alloc_skb(sky2->netdev, length + 2);
1900 if (likely(skb)) {
1901 skb_reserve(skb, 2);
1902 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
1903 length, PCI_DMA_FROMDEVICE);
1904 memcpy(skb->data, re->skb->data, length);
1905 skb->ip_summed = re->skb->ip_summed;
1906 skb->csum = re->skb->csum;
1907 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
1908 length, PCI_DMA_FROMDEVICE);
1909 re->skb->ip_summed = CHECKSUM_NONE;
1910 __skb_put(skb, length);
1911 }
1912 return skb;
1913}
1914
1915/* Adjust length of skb with fragments to match received data */
1916static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
1917 unsigned int length)
1918{
1919 int i, num_frags;
1920 unsigned int size;
1921
1922 /* put header into skb */
1923 size = min(length, hdr_space);
1924 skb->tail += size;
1925 skb->len += size;
1926 length -= size;
1927
1928 num_frags = skb_shinfo(skb)->nr_frags;
1929 for (i = 0; i < num_frags; i++) {
1930 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1931
1932 if (length == 0) {
1933 /* don't need this page */
1934 __free_page(frag->page);
1935 --skb_shinfo(skb)->nr_frags;
1936 } else {
1937 size = min(length, (unsigned) PAGE_SIZE);
1938
1939 frag->size = size;
1940 skb->data_len += size;
1941 skb->truesize += size;
1942 skb->len += size;
1943 length -= size;
1944 }
1945 }
1946}
1947
1948/* Normal packet - take skb from ring element and put in a new one */
1949static struct sk_buff *receive_new(struct sky2_port *sky2,
1950 struct rx_ring_info *re,
1951 unsigned int length)
1952{
1953 struct sk_buff *skb, *nskb;
1954 unsigned hdr_space = sky2->rx_data_size;
1955
1956 pr_debug(PFX "receive new length=%d\n", length);
1957
1958 /* Don't be tricky about reusing pages (yet) */
1959 nskb = sky2_rx_alloc(sky2);
1960 if (unlikely(!nskb))
1961 return NULL;
1962
1963 skb = re->skb;
1964 sky2_rx_unmap_skb(sky2->hw->pdev, re);
1965
1966 prefetch(skb->data);
1967 re->skb = nskb;
1968 sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space);
1969
1970 if (skb_shinfo(skb)->nr_frags)
1971 skb_put_frags(skb, hdr_space, length);
1972 else
1973 skb_put(skb, hdr_space);
1974 return skb;
1975}
1976
1818/* 1977/*
1819 * Receive one packet. 1978 * Receive one packet.
1820 * For small packets or errors, just reuse existing skb.
1821 * For larger packets, get new buffer. 1979 * For larger packets, get new buffer.
1822 */ 1980 */
1823static struct sk_buff *sky2_receive(struct net_device *dev, 1981static struct sk_buff *sky2_receive(struct net_device *dev,
1824 u16 length, u32 status) 1982 u16 length, u32 status)
1825{ 1983{
1826 struct sky2_port *sky2 = netdev_priv(dev); 1984 struct sky2_port *sky2 = netdev_priv(dev);
1827 struct ring_info *re = sky2->rx_ring + sky2->rx_next; 1985 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
1828 struct sk_buff *skb = NULL; 1986 struct sk_buff *skb = NULL;
1829 1987
1830 if (unlikely(netif_msg_rx_status(sky2))) 1988 if (unlikely(netif_msg_rx_status(sky2)))
@@ -1843,40 +2001,12 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
1843 if (length > dev->mtu + ETH_HLEN) 2001 if (length > dev->mtu + ETH_HLEN)
1844 goto oversize; 2002 goto oversize;
1845 2003
1846 if (length < copybreak) { 2004 if (length < copybreak)
1847 skb = netdev_alloc_skb(dev, length + 2); 2005 skb = receive_copy(sky2, re, length);
1848 if (!skb) 2006 else
1849 goto resubmit; 2007 skb = receive_new(sky2, re, length);
1850
1851 skb_reserve(skb, 2);
1852 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1853 length, PCI_DMA_FROMDEVICE);
1854 memcpy(skb->data, re->skb->data, length);
1855 skb->ip_summed = re->skb->ip_summed;
1856 skb->csum = re->skb->csum;
1857 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1858 length, PCI_DMA_FROMDEVICE);
1859 } else {
1860 struct sk_buff *nskb;
1861
1862 nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC);
1863 if (!nskb)
1864 goto resubmit;
1865
1866 skb = re->skb;
1867 re->skb = nskb;
1868 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1869 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1870 prefetch(skb->data);
1871
1872 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1873 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1874 }
1875
1876 skb_put(skb, length);
1877resubmit: 2008resubmit:
1878 re->skb->ip_summed = CHECKSUM_NONE; 2009 sky2_rx_submit(sky2, re);
1879 sky2_rx_add(sky2, re->mapaddr);
1880 2010
1881 return skb; 2011 return skb;
1882 2012
@@ -1909,9 +2039,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1909 struct sky2_port *sky2 = netdev_priv(dev); 2039 struct sky2_port *sky2 = netdev_priv(dev);
1910 2040
1911 if (netif_running(dev)) { 2041 if (netif_running(dev)) {
1912 spin_lock(&sky2->tx_lock); 2042 netif_tx_lock(dev);
1913 sky2_tx_complete(sky2, last); 2043 sky2_tx_complete(sky2, last);
1914 spin_unlock(&sky2->tx_lock); 2044 netif_tx_unlock(dev);
1915 } 2045 }
1916} 2046}
1917 2047
@@ -2082,7 +2212,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2082 2212
2083 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2213 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2084 sky2_pci_write16(hw, PCI_STATUS, 2214 sky2_pci_write16(hw, PCI_STATUS,
2085 pci_err | PCI_STATUS_ERROR_BITS); 2215 pci_err | PCI_STATUS_ERROR_BITS);
2086 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2216 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2087 } 2217 }
2088 2218
@@ -2090,7 +2220,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2090 /* PCI-Express uncorrectable Error occurred */ 2220 /* PCI-Express uncorrectable Error occurred */
2091 u32 pex_err; 2221 u32 pex_err;
2092 2222
2093 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); 2223 pex_err = sky2_pci_read32(hw,
2224 hw->err_cap + PCI_ERR_UNCOR_STATUS);
2094 2225
2095 if (net_ratelimit()) 2226 if (net_ratelimit())
2096 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2227 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2098,15 +2229,20 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2098 2229
2099 /* clear the interrupt */ 2230 /* clear the interrupt */
2100 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2231 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2101 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 2232 sky2_pci_write32(hw,
2102 0xffffffffUL); 2233 hw->err_cap + PCI_ERR_UNCOR_STATUS,
2234 0xffffffffUL);
2103 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2235 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2104 2236
2105 if (pex_err & PEX_FATAL_ERRORS) { 2237
2238 /* In case of fatal error mask off to keep from getting stuck */
2239 if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP
2240 | PCI_ERR_UNC_DLP)) {
2106 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); 2241 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2107 hwmsk &= ~Y2_IS_PCI_EXP; 2242 hwmsk &= ~Y2_IS_PCI_EXP;
2108 sky2_write32(hw, B0_HWE_IMSK, hwmsk); 2243 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
2109 } 2244 }
2245
2110 } 2246 }
2111 2247
2112 if (status & Y2_HWE_L1_MASK) 2248 if (status & Y2_HWE_L1_MASK)
@@ -2287,6 +2423,7 @@ static int sky2_reset(struct sky2_hw *hw)
2287 u16 status; 2423 u16 status;
2288 u8 t8; 2424 u8 t8;
2289 int i; 2425 int i;
2426 u32 msk;
2290 2427
2291 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2428 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2292 2429
@@ -2327,9 +2464,13 @@ static int sky2_reset(struct sky2_hw *hw)
2327 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2464 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2328 2465
2329 /* clear any PEX errors */ 2466 /* clear any PEX errors */
2330 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) 2467 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
2331 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2468 hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR);
2332 2469 if (hw->err_cap)
2470 sky2_pci_write32(hw,
2471 hw->err_cap + PCI_ERR_UNCOR_STATUS,
2472 0xffffffffUL);
2473 }
2333 2474
2334 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2475 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2335 hw->ports = 1; 2476 hw->ports = 1;
@@ -2386,7 +2527,10 @@ static int sky2_reset(struct sky2_hw *hw)
2386 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); 2527 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2387 } 2528 }
2388 2529
2389 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); 2530 msk = Y2_HWE_ALL_MASK;
2531 if (!hw->err_cap)
2532 msk &= ~Y2_IS_PCI_EXP;
2533 sky2_write32(hw, B0_HWE_IMSK, msk);
2390 2534
2391 for (i = 0; i < hw->ports; i++) 2535 for (i = 0; i < hw->ports; i++)
2392 sky2_gmac_reset(hw, i); 2536 sky2_gmac_reset(hw, i);
@@ -3102,7 +3246,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3102 sky2->hw = hw; 3246 sky2->hw = hw;
3103 sky2->msg_enable = netif_msg_init(debug, default_msg); 3247 sky2->msg_enable = netif_msg_init(debug, default_msg);
3104 3248
3105 spin_lock_init(&sky2->tx_lock);
3106 /* Auto speed and flow control */ 3249 /* Auto speed and flow control */
3107 sky2->autoneg = AUTONEG_ENABLE; 3250 sky2->autoneg = AUTONEG_ENABLE;
3108 sky2->tx_pause = 1; 3251 sky2->tx_pause = 1;
@@ -3115,13 +3258,11 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3115 spin_lock_init(&sky2->phy_lock); 3258 spin_lock_init(&sky2->phy_lock);
3116 sky2->tx_pending = TX_DEF_PENDING; 3259 sky2->tx_pending = TX_DEF_PENDING;
3117 sky2->rx_pending = RX_DEF_PENDING; 3260 sky2->rx_pending = RX_DEF_PENDING;
3118 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
3119 3261
3120 hw->dev[port] = dev; 3262 hw->dev[port] = dev;
3121 3263
3122 sky2->port = port; 3264 sky2->port = port;
3123 3265
3124 dev->features |= NETIF_F_LLTX;
3125 if (hw->chip_id != CHIP_ID_YUKON_EC_U) 3266 if (hw->chip_id != CHIP_ID_YUKON_EC_U)
3126 dev->features |= NETIF_F_TSO; 3267 dev->features |= NETIF_F_TSO;
3127 if (highmem) 3268 if (highmem)
@@ -3316,6 +3457,14 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3316 if (!dev) 3457 if (!dev)
3317 goto err_out_free_pci; 3458 goto err_out_free_pci;
3318 3459
3460 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3461 err = sky2_test_msi(hw);
3462 if (err == -EOPNOTSUPP)
3463 pci_disable_msi(pdev);
3464 else if (err)
3465 goto err_out_free_netdev;
3466 }
3467
3319 err = register_netdev(dev); 3468 err = register_netdev(dev);
3320 if (err) { 3469 if (err) {
3321 printk(KERN_ERR PFX "%s: cannot register net device\n", 3470 printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3323,6 +3472,14 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3323 goto err_out_free_netdev; 3472 goto err_out_free_netdev;
3324 } 3473 }
3325 3474
3475 err = request_irq(pdev->irq, sky2_intr, IRQF_SHARED, dev->name, hw);
3476 if (err) {
3477 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3478 pci_name(pdev), pdev->irq);
3479 goto err_out_unregister;
3480 }
3481 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3482
3326 sky2_show_addr(dev); 3483 sky2_show_addr(dev);
3327 3484
3328 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) { 3485 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
@@ -3337,23 +3494,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3337 } 3494 }
3338 } 3495 }
3339 3496
3340 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3341 err = sky2_test_msi(hw);
3342 if (err == -EOPNOTSUPP)
3343 pci_disable_msi(pdev);
3344 else if (err)
3345 goto err_out_unregister;
3346 }
3347
3348 err = request_irq(pdev->irq, sky2_intr, IRQF_SHARED, DRV_NAME, hw);
3349 if (err) {
3350 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3351 pci_name(pdev), pdev->irq);
3352 goto err_out_unregister;
3353 }
3354
3355 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3356
3357 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 3497 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3358 sky2_idle_start(hw); 3498 sky2_idle_start(hw);
3359 3499
@@ -3363,10 +3503,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3363 3503
3364err_out_unregister: 3504err_out_unregister:
3365 pci_disable_msi(pdev); 3505 pci_disable_msi(pdev);
3366 if (dev1) {
3367 unregister_netdev(dev1);
3368 free_netdev(dev1);
3369 }
3370 unregister_netdev(dev); 3506 unregister_netdev(dev);
3371err_out_free_netdev: 3507err_out_free_netdev:
3372 free_netdev(dev); 3508 free_netdev(dev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 4c13c371bc21..f66109a96d95 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -4,24 +4,17 @@
4#ifndef _SKY2_H 4#ifndef _SKY2_H
5#define _SKY2_H 5#define _SKY2_H
6 6
7/* PCI config registers */ 7#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
8
9/* PCI device specific config registers */
8enum { 10enum {
9 PCI_DEV_REG1 = 0x40, 11 PCI_DEV_REG1 = 0x40,
10 PCI_DEV_REG2 = 0x44, 12 PCI_DEV_REG2 = 0x44,
11 PCI_DEV_STATUS = 0x7c,
12 PCI_DEV_REG3 = 0x80, 13 PCI_DEV_REG3 = 0x80,
13 PCI_DEV_REG4 = 0x84, 14 PCI_DEV_REG4 = 0x84,
14 PCI_DEV_REG5 = 0x88, 15 PCI_DEV_REG5 = 0x88,
15}; 16};
16 17
17enum {
18 PEX_DEV_CAP = 0xe4,
19 PEX_DEV_CTRL = 0xe8,
20 PEX_DEV_STA = 0xea,
21 PEX_LNK_STAT = 0xf2,
22 PEX_UNC_ERR_STAT= 0x104,
23};
24
25/* Yukon-2 */ 18/* Yukon-2 */
26enum pci_dev_reg_1 { 19enum pci_dev_reg_1 {
27 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ 20 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
@@ -70,39 +63,6 @@ enum pci_dev_reg_4 {
70 PCI_STATUS_REC_MASTER_ABORT | \ 63 PCI_STATUS_REC_MASTER_ABORT | \
71 PCI_STATUS_REC_TARGET_ABORT | \ 64 PCI_STATUS_REC_TARGET_ABORT | \
72 PCI_STATUS_PARITY) 65 PCI_STATUS_PARITY)
73
74enum pex_dev_ctrl {
75 PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
76 PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
77 PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
78 PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
79 PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
80 PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
81 PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
82 PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
83 PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
84 PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
85 PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
86};
87#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
88
89/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
90enum pex_err {
91 PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
92
93 PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
94
95 PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
96
97 PEX_COMP_TO = 1<<14, /* Completion Timeout */
98 PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
99 PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
100
101 PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
102 PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
103};
104
105
106enum csr_regs { 66enum csr_regs {
107 B0_RAP = 0x0000, 67 B0_RAP = 0x0000,
108 B0_CTST = 0x0004, 68 B0_CTST = 0x0004,
@@ -1816,12 +1776,14 @@ struct sky2_status_le {
1816struct tx_ring_info { 1776struct tx_ring_info {
1817 struct sk_buff *skb; 1777 struct sk_buff *skb;
1818 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1778 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1819 u16 idx; 1779 DECLARE_PCI_UNMAP_ADDR(maplen);
1820}; 1780};
1821 1781
1822struct ring_info { 1782struct rx_ring_info {
1823 struct sk_buff *skb; 1783 struct sk_buff *skb;
1824 dma_addr_t mapaddr; 1784 dma_addr_t data_addr;
1785 DECLARE_PCI_UNMAP_ADDR(data_size);
1786 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
1825}; 1787};
1826 1788
1827struct sky2_port { 1789struct sky2_port {
@@ -1831,7 +1793,6 @@ struct sky2_port {
1831 u32 msg_enable; 1793 u32 msg_enable;
1832 spinlock_t phy_lock; 1794 spinlock_t phy_lock;
1833 1795
1834 spinlock_t tx_lock ____cacheline_aligned_in_smp;
1835 struct tx_ring_info *tx_ring; 1796 struct tx_ring_info *tx_ring;
1836 struct sky2_tx_le *tx_le; 1797 struct sky2_tx_le *tx_le;
1837 u16 tx_cons; /* next le to check */ 1798 u16 tx_cons; /* next le to check */
@@ -1841,13 +1802,15 @@ struct sky2_port {
1841 u16 tx_last_mss; 1802 u16 tx_last_mss;
1842 u32 tx_tcpsum; 1803 u32 tx_tcpsum;
1843 1804
1844 struct ring_info *rx_ring ____cacheline_aligned_in_smp; 1805 struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
1845 struct sky2_rx_le *rx_le; 1806 struct sky2_rx_le *rx_le;
1846 u32 rx_addr64; 1807 u32 rx_addr64;
1847 u16 rx_next; /* next re to check */ 1808 u16 rx_next; /* next re to check */
1848 u16 rx_put; /* next le index to use */ 1809 u16 rx_put; /* next le index to use */
1849 u16 rx_pending; 1810 u16 rx_pending;
1850 u16 rx_bufsize; 1811 u16 rx_data_size;
1812 u16 rx_nfrags;
1813
1851#ifdef SKY2_VLAN_TAG_USED 1814#ifdef SKY2_VLAN_TAG_USED
1852 u16 rx_tag; 1815 u16 rx_tag;
1853 struct vlan_group *vlgrp; 1816 struct vlan_group *vlgrp;
@@ -1873,6 +1836,7 @@ struct sky2_hw {
1873 struct net_device *dev[2]; 1836 struct net_device *dev[2];
1874 1837
1875 int pm_cap; 1838 int pm_cap;
1839 int err_cap;
1876 u8 chip_id; 1840 u8 chip_id;
1877 u8 chip_rev; 1841 u8 chip_rev;
1878 u8 pmd_type; 1842 u8 pmd_type;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index cc240adb7269..1397fc55cf68 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -317,7 +317,7 @@ spider_net_init_chain(struct spider_net_card *card,
317 SPIDER_NET_DESCR_SIZE, 317 SPIDER_NET_DESCR_SIZE,
318 direction); 318 direction);
319 319
320 if (buf == DMA_ERROR_CODE) 320 if (pci_dma_mapping_error(buf))
321 goto iommu_error; 321 goto iommu_error;
322 322
323 descr->bus_addr = buf; 323 descr->bus_addr = buf;
@@ -420,7 +420,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
420 buf = pci_map_single(card->pdev, descr->skb->data, 420 buf = pci_map_single(card->pdev, descr->skb->data,
421 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 421 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
422 descr->buf_addr = buf; 422 descr->buf_addr = buf;
423 if (buf == DMA_ERROR_CODE) { 423 if (pci_dma_mapping_error(buf)) {
424 dev_kfree_skb_any(descr->skb); 424 dev_kfree_skb_any(descr->skb);
425 if (netif_msg_rx_err(card) && net_ratelimit()) 425 if (netif_msg_rx_err(card) && net_ratelimit())
426 pr_err("Could not iommu-map rx buffer\n"); 426 pr_err("Could not iommu-map rx buffer\n");
@@ -649,7 +649,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
649 dma_addr_t buf; 649 dma_addr_t buf;
650 650
651 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 651 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
652 if (buf == DMA_ERROR_CODE) { 652 if (pci_dma_mapping_error(buf)) {
653 if (netif_msg_tx_err(card) && net_ratelimit()) 653 if (netif_msg_tx_err(card) && net_ratelimit())
654 pr_err("could not iommu-map packet (%p, %i). " 654 pr_err("could not iommu-map packet (%p, %i). "
655 "Dropping packet\n", skb->data, skb->len); 655 "Dropping packet\n", skb->data, skb->len);
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 2024b26b99e6..63e9fcf31fb8 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -100,6 +100,7 @@
100#define _PC300_H 100#define _PC300_H
101 101
102#include <linux/hdlc.h> 102#include <linux/hdlc.h>
103#include <net/syncppp.h>
103#include "hd64572.h" 104#include "hd64572.h"
104#include "pc300-falc-lh.h" 105#include "pc300-falc-lh.h"
105 106