diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 594 |
1 files changed, 316 insertions, 278 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 6526214f69d9..5edcbfe93065 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -144,11 +144,13 @@ struct myri10ge_tx_buf { | |||
144 | char *req_bytes; | 144 | char *req_bytes; |
145 | struct myri10ge_tx_buffer_state *info; | 145 | struct myri10ge_tx_buffer_state *info; |
146 | int mask; /* number of transmit slots -1 */ | 146 | int mask; /* number of transmit slots -1 */ |
147 | int boundary; /* boundary transmits cannot cross */ | ||
148 | int req ____cacheline_aligned; /* transmit slots submitted */ | 147 | int req ____cacheline_aligned; /* transmit slots submitted */ |
149 | int pkt_start; /* packets started */ | 148 | int pkt_start; /* packets started */ |
149 | int stop_queue; | ||
150 | int linearized; | ||
150 | int done ____cacheline_aligned; /* transmit slots completed */ | 151 | int done ____cacheline_aligned; /* transmit slots completed */ |
151 | int pkt_done; /* packets completed */ | 152 | int pkt_done; /* packets completed */ |
153 | int wake_queue; | ||
152 | }; | 154 | }; |
153 | 155 | ||
154 | struct myri10ge_rx_done { | 156 | struct myri10ge_rx_done { |
@@ -160,29 +162,49 @@ struct myri10ge_rx_done { | |||
160 | struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; | 162 | struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; |
161 | }; | 163 | }; |
162 | 164 | ||
163 | struct myri10ge_priv { | 165 | struct myri10ge_slice_netstats { |
164 | int running; /* running? */ | 166 | unsigned long rx_packets; |
165 | int csum_flag; /* rx_csums? */ | 167 | unsigned long tx_packets; |
168 | unsigned long rx_bytes; | ||
169 | unsigned long tx_bytes; | ||
170 | unsigned long rx_dropped; | ||
171 | unsigned long tx_dropped; | ||
172 | }; | ||
173 | |||
174 | struct myri10ge_slice_state { | ||
166 | struct myri10ge_tx_buf tx; /* transmit ring */ | 175 | struct myri10ge_tx_buf tx; /* transmit ring */ |
167 | struct myri10ge_rx_buf rx_small; | 176 | struct myri10ge_rx_buf rx_small; |
168 | struct myri10ge_rx_buf rx_big; | 177 | struct myri10ge_rx_buf rx_big; |
169 | struct myri10ge_rx_done rx_done; | 178 | struct myri10ge_rx_done rx_done; |
179 | struct net_device *dev; | ||
180 | struct napi_struct napi; | ||
181 | struct myri10ge_priv *mgp; | ||
182 | struct myri10ge_slice_netstats stats; | ||
183 | __be32 __iomem *irq_claim; | ||
184 | struct mcp_irq_data *fw_stats; | ||
185 | dma_addr_t fw_stats_bus; | ||
186 | int watchdog_tx_done; | ||
187 | int watchdog_tx_req; | ||
188 | }; | ||
189 | |||
190 | struct myri10ge_priv { | ||
191 | struct myri10ge_slice_state ss; | ||
192 | int tx_boundary; /* boundary transmits cannot cross */ | ||
193 | int running; /* running? */ | ||
194 | int csum_flag; /* rx_csums? */ | ||
170 | int small_bytes; | 195 | int small_bytes; |
171 | int big_bytes; | 196 | int big_bytes; |
172 | struct net_device *dev; | 197 | struct net_device *dev; |
173 | struct napi_struct napi; | ||
174 | struct net_device_stats stats; | 198 | struct net_device_stats stats; |
199 | spinlock_t stats_lock; | ||
175 | u8 __iomem *sram; | 200 | u8 __iomem *sram; |
176 | int sram_size; | 201 | int sram_size; |
177 | unsigned long board_span; | 202 | unsigned long board_span; |
178 | unsigned long iomem_base; | 203 | unsigned long iomem_base; |
179 | __be32 __iomem *irq_claim; | ||
180 | __be32 __iomem *irq_deassert; | 204 | __be32 __iomem *irq_deassert; |
181 | char *mac_addr_string; | 205 | char *mac_addr_string; |
182 | struct mcp_cmd_response *cmd; | 206 | struct mcp_cmd_response *cmd; |
183 | dma_addr_t cmd_bus; | 207 | dma_addr_t cmd_bus; |
184 | struct mcp_irq_data *fw_stats; | ||
185 | dma_addr_t fw_stats_bus; | ||
186 | struct pci_dev *pdev; | 208 | struct pci_dev *pdev; |
187 | int msi_enabled; | 209 | int msi_enabled; |
188 | u32 link_state; | 210 | u32 link_state; |
@@ -191,17 +213,12 @@ struct myri10ge_priv { | |||
191 | __be32 __iomem *intr_coal_delay_ptr; | 213 | __be32 __iomem *intr_coal_delay_ptr; |
192 | int mtrr; | 214 | int mtrr; |
193 | int wc_enabled; | 215 | int wc_enabled; |
194 | int wake_queue; | ||
195 | int stop_queue; | ||
196 | int down_cnt; | 216 | int down_cnt; |
197 | wait_queue_head_t down_wq; | 217 | wait_queue_head_t down_wq; |
198 | struct work_struct watchdog_work; | 218 | struct work_struct watchdog_work; |
199 | struct timer_list watchdog_timer; | 219 | struct timer_list watchdog_timer; |
200 | int watchdog_tx_done; | ||
201 | int watchdog_tx_req; | ||
202 | int watchdog_pause; | ||
203 | int watchdog_resets; | 220 | int watchdog_resets; |
204 | int tx_linearized; | 221 | int watchdog_pause; |
205 | int pause; | 222 | int pause; |
206 | char *fw_name; | 223 | char *fw_name; |
207 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; | 224 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; |
@@ -643,7 +660,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
643 | } | 660 | } |
644 | dev_info(&mgp->pdev->dev, | 661 | dev_info(&mgp->pdev->dev, |
645 | "Successfully adopted running firmware\n"); | 662 | "Successfully adopted running firmware\n"); |
646 | if (mgp->tx.boundary == 4096) { | 663 | if (mgp->tx_boundary == 4096) { |
647 | dev_warn(&mgp->pdev->dev, | 664 | dev_warn(&mgp->pdev->dev, |
648 | "Using firmware currently running on NIC" | 665 | "Using firmware currently running on NIC" |
649 | ". For optimal\n"); | 666 | ". For optimal\n"); |
@@ -654,7 +671,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
654 | } | 671 | } |
655 | 672 | ||
656 | mgp->fw_name = "adopted"; | 673 | mgp->fw_name = "adopted"; |
657 | mgp->tx.boundary = 2048; | 674 | mgp->tx_boundary = 2048; |
658 | return status; | 675 | return status; |
659 | } | 676 | } |
660 | 677 | ||
@@ -780,7 +797,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) | |||
780 | * transfers took to complete. | 797 | * transfers took to complete. |
781 | */ | 798 | */ |
782 | 799 | ||
783 | len = mgp->tx.boundary; | 800 | len = mgp->tx_boundary; |
784 | 801 | ||
785 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); | 802 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); |
786 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); | 803 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); |
@@ -842,17 +859,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
842 | 859 | ||
843 | /* Now exchange information about interrupts */ | 860 | /* Now exchange information about interrupts */ |
844 | 861 | ||
845 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 862 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
846 | memset(mgp->rx_done.entry, 0, bytes); | 863 | memset(mgp->ss.rx_done.entry, 0, bytes); |
847 | cmd.data0 = (u32) bytes; | 864 | cmd.data0 = (u32) bytes; |
848 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); | 865 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); |
849 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | 866 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); |
850 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | 867 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); |
851 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); | 868 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); |
852 | 869 | ||
853 | status |= | 870 | status |= |
854 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); | 871 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); |
855 | mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); | 872 | mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); |
856 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, | 873 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, |
857 | &cmd, 0); | 874 | &cmd, 0); |
858 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); | 875 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); |
@@ -866,17 +883,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
866 | } | 883 | } |
867 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); | 884 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); |
868 | 885 | ||
869 | memset(mgp->rx_done.entry, 0, bytes); | 886 | memset(mgp->ss.rx_done.entry, 0, bytes); |
870 | 887 | ||
871 | /* reset mcp/driver shared state back to 0 */ | 888 | /* reset mcp/driver shared state back to 0 */ |
872 | mgp->tx.req = 0; | 889 | mgp->ss.tx.req = 0; |
873 | mgp->tx.done = 0; | 890 | mgp->ss.tx.done = 0; |
874 | mgp->tx.pkt_start = 0; | 891 | mgp->ss.tx.pkt_start = 0; |
875 | mgp->tx.pkt_done = 0; | 892 | mgp->ss.tx.pkt_done = 0; |
876 | mgp->rx_big.cnt = 0; | 893 | mgp->ss.rx_big.cnt = 0; |
877 | mgp->rx_small.cnt = 0; | 894 | mgp->ss.rx_small.cnt = 0; |
878 | mgp->rx_done.idx = 0; | 895 | mgp->ss.rx_done.idx = 0; |
879 | mgp->rx_done.cnt = 0; | 896 | mgp->ss.rx_done.cnt = 0; |
880 | mgp->link_changes = 0; | 897 | mgp->link_changes = 0; |
881 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); | 898 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); |
882 | myri10ge_change_pause(mgp, mgp->pause); | 899 | myri10ge_change_pause(mgp, mgp->pause); |
@@ -1028,9 +1045,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, | |||
1028 | * page into an skb */ | 1045 | * page into an skb */ |
1029 | 1046 | ||
1030 | static inline int | 1047 | static inline int |
1031 | myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | 1048 | myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, |
1032 | int bytes, int len, __wsum csum) | 1049 | int bytes, int len, __wsum csum) |
1033 | { | 1050 | { |
1051 | struct myri10ge_priv *mgp = ss->mgp; | ||
1034 | struct sk_buff *skb; | 1052 | struct sk_buff *skb; |
1035 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | 1053 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
1036 | int i, idx, hlen, remainder; | 1054 | int i, idx, hlen, remainder; |
@@ -1060,11 +1078,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1060 | rx_frags[0].page_offset += MXGEFW_PAD; | 1078 | rx_frags[0].page_offset += MXGEFW_PAD; |
1061 | rx_frags[0].size -= MXGEFW_PAD; | 1079 | rx_frags[0].size -= MXGEFW_PAD; |
1062 | len -= MXGEFW_PAD; | 1080 | len -= MXGEFW_PAD; |
1063 | lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, | 1081 | lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, |
1064 | len, len, | 1082 | len, len, |
1065 | /* opaque, will come back in get_frag_header */ | 1083 | /* opaque, will come back in get_frag_header */ |
1066 | (void *)(__force unsigned long)csum, | 1084 | (void *)(__force unsigned long)csum, csum); |
1067 | csum); | ||
1068 | return 1; | 1085 | return 1; |
1069 | } | 1086 | } |
1070 | 1087 | ||
@@ -1104,10 +1121,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1104 | return 1; | 1121 | return 1; |
1105 | } | 1122 | } |
1106 | 1123 | ||
1107 | static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | 1124 | static inline void |
1125 | myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) | ||
1108 | { | 1126 | { |
1109 | struct pci_dev *pdev = mgp->pdev; | 1127 | struct pci_dev *pdev = ss->mgp->pdev; |
1110 | struct myri10ge_tx_buf *tx = &mgp->tx; | 1128 | struct myri10ge_tx_buf *tx = &ss->tx; |
1111 | struct sk_buff *skb; | 1129 | struct sk_buff *skb; |
1112 | int idx, len; | 1130 | int idx, len; |
1113 | 1131 | ||
@@ -1125,8 +1143,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | |||
1125 | len = pci_unmap_len(&tx->info[idx], len); | 1143 | len = pci_unmap_len(&tx->info[idx], len); |
1126 | pci_unmap_len_set(&tx->info[idx], len, 0); | 1144 | pci_unmap_len_set(&tx->info[idx], len, 0); |
1127 | if (skb) { | 1145 | if (skb) { |
1128 | mgp->stats.tx_bytes += skb->len; | 1146 | ss->stats.tx_bytes += skb->len; |
1129 | mgp->stats.tx_packets++; | 1147 | ss->stats.tx_packets++; |
1130 | dev_kfree_skb_irq(skb); | 1148 | dev_kfree_skb_irq(skb); |
1131 | if (len) | 1149 | if (len) |
1132 | pci_unmap_single(pdev, | 1150 | pci_unmap_single(pdev, |
@@ -1142,16 +1160,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | |||
1142 | } | 1160 | } |
1143 | } | 1161 | } |
1144 | /* start the queue if we've stopped it */ | 1162 | /* start the queue if we've stopped it */ |
1145 | if (netif_queue_stopped(mgp->dev) | 1163 | if (netif_queue_stopped(ss->dev) |
1146 | && tx->req - tx->done < (tx->mask >> 1)) { | 1164 | && tx->req - tx->done < (tx->mask >> 1)) { |
1147 | mgp->wake_queue++; | 1165 | tx->wake_queue++; |
1148 | netif_wake_queue(mgp->dev); | 1166 | netif_wake_queue(ss->dev); |
1149 | } | 1167 | } |
1150 | } | 1168 | } |
1151 | 1169 | ||
1152 | static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) | 1170 | static inline int |
1171 | myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | ||
1153 | { | 1172 | { |
1154 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | 1173 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
1174 | struct myri10ge_priv *mgp = ss->mgp; | ||
1155 | unsigned long rx_bytes = 0; | 1175 | unsigned long rx_bytes = 0; |
1156 | unsigned long rx_packets = 0; | 1176 | unsigned long rx_packets = 0; |
1157 | unsigned long rx_ok; | 1177 | unsigned long rx_ok; |
@@ -1167,11 +1187,11 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) | |||
1167 | rx_done->entry[idx].length = 0; | 1187 | rx_done->entry[idx].length = 0; |
1168 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1188 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
1169 | if (length <= mgp->small_bytes) | 1189 | if (length <= mgp->small_bytes) |
1170 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, | 1190 | rx_ok = myri10ge_rx_done(ss, &ss->rx_small, |
1171 | mgp->small_bytes, | 1191 | mgp->small_bytes, |
1172 | length, checksum); | 1192 | length, checksum); |
1173 | else | 1193 | else |
1174 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, | 1194 | rx_ok = myri10ge_rx_done(ss, &ss->rx_big, |
1175 | mgp->big_bytes, | 1195 | mgp->big_bytes, |
1176 | length, checksum); | 1196 | length, checksum); |
1177 | rx_packets += rx_ok; | 1197 | rx_packets += rx_ok; |
@@ -1182,25 +1202,25 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) | |||
1182 | } | 1202 | } |
1183 | rx_done->idx = idx; | 1203 | rx_done->idx = idx; |
1184 | rx_done->cnt = cnt; | 1204 | rx_done->cnt = cnt; |
1185 | mgp->stats.rx_packets += rx_packets; | 1205 | ss->stats.rx_packets += rx_packets; |
1186 | mgp->stats.rx_bytes += rx_bytes; | 1206 | ss->stats.rx_bytes += rx_bytes; |
1187 | 1207 | ||
1188 | if (myri10ge_lro) | 1208 | if (myri10ge_lro) |
1189 | lro_flush_all(&rx_done->lro_mgr); | 1209 | lro_flush_all(&rx_done->lro_mgr); |
1190 | 1210 | ||
1191 | /* restock receive rings if needed */ | 1211 | /* restock receive rings if needed */ |
1192 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) | 1212 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) |
1193 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 1213 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, |
1194 | mgp->small_bytes + MXGEFW_PAD, 0); | 1214 | mgp->small_bytes + MXGEFW_PAD, 0); |
1195 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) | 1215 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) |
1196 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | 1216 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); |
1197 | 1217 | ||
1198 | return work_done; | 1218 | return work_done; |
1199 | } | 1219 | } |
1200 | 1220 | ||
1201 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | 1221 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) |
1202 | { | 1222 | { |
1203 | struct mcp_irq_data *stats = mgp->fw_stats; | 1223 | struct mcp_irq_data *stats = mgp->ss.fw_stats; |
1204 | 1224 | ||
1205 | if (unlikely(stats->stats_updated)) { | 1225 | if (unlikely(stats->stats_updated)) { |
1206 | unsigned link_up = ntohl(stats->link_up); | 1226 | unsigned link_up = ntohl(stats->link_up); |
@@ -1227,9 +1247,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |||
1227 | } | 1247 | } |
1228 | } | 1248 | } |
1229 | if (mgp->rdma_tags_available != | 1249 | if (mgp->rdma_tags_available != |
1230 | ntohl(mgp->fw_stats->rdma_tags_available)) { | 1250 | ntohl(stats->rdma_tags_available)) { |
1231 | mgp->rdma_tags_available = | 1251 | mgp->rdma_tags_available = |
1232 | ntohl(mgp->fw_stats->rdma_tags_available); | 1252 | ntohl(stats->rdma_tags_available); |
1233 | printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " | 1253 | printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " |
1234 | "%d tags left\n", mgp->dev->name, | 1254 | "%d tags left\n", mgp->dev->name, |
1235 | mgp->rdma_tags_available); | 1255 | mgp->rdma_tags_available); |
@@ -1242,26 +1262,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |||
1242 | 1262 | ||
1243 | static int myri10ge_poll(struct napi_struct *napi, int budget) | 1263 | static int myri10ge_poll(struct napi_struct *napi, int budget) |
1244 | { | 1264 | { |
1245 | struct myri10ge_priv *mgp = | 1265 | struct myri10ge_slice_state *ss = |
1246 | container_of(napi, struct myri10ge_priv, napi); | 1266 | container_of(napi, struct myri10ge_slice_state, napi); |
1247 | struct net_device *netdev = mgp->dev; | 1267 | struct net_device *netdev = ss->mgp->dev; |
1248 | int work_done; | 1268 | int work_done; |
1249 | 1269 | ||
1250 | /* process as many rx events as NAPI will allow */ | 1270 | /* process as many rx events as NAPI will allow */ |
1251 | work_done = myri10ge_clean_rx_done(mgp, budget); | 1271 | work_done = myri10ge_clean_rx_done(ss, budget); |
1252 | 1272 | ||
1253 | if (work_done < budget) { | 1273 | if (work_done < budget) { |
1254 | netif_rx_complete(netdev, napi); | 1274 | netif_rx_complete(netdev, napi); |
1255 | put_be32(htonl(3), mgp->irq_claim); | 1275 | put_be32(htonl(3), ss->irq_claim); |
1256 | } | 1276 | } |
1257 | return work_done; | 1277 | return work_done; |
1258 | } | 1278 | } |
1259 | 1279 | ||
1260 | static irqreturn_t myri10ge_intr(int irq, void *arg) | 1280 | static irqreturn_t myri10ge_intr(int irq, void *arg) |
1261 | { | 1281 | { |
1262 | struct myri10ge_priv *mgp = arg; | 1282 | struct myri10ge_slice_state *ss = arg; |
1263 | struct mcp_irq_data *stats = mgp->fw_stats; | 1283 | struct myri10ge_priv *mgp = ss->mgp; |
1264 | struct myri10ge_tx_buf *tx = &mgp->tx; | 1284 | struct mcp_irq_data *stats = ss->fw_stats; |
1285 | struct myri10ge_tx_buf *tx = &ss->tx; | ||
1265 | u32 send_done_count; | 1286 | u32 send_done_count; |
1266 | int i; | 1287 | int i; |
1267 | 1288 | ||
@@ -1272,7 +1293,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1272 | /* low bit indicates receives are present, so schedule | 1293 | /* low bit indicates receives are present, so schedule |
1273 | * napi poll handler */ | 1294 | * napi poll handler */ |
1274 | if (stats->valid & 1) | 1295 | if (stats->valid & 1) |
1275 | netif_rx_schedule(mgp->dev, &mgp->napi); | 1296 | netif_rx_schedule(ss->dev, &ss->napi); |
1276 | 1297 | ||
1277 | if (!mgp->msi_enabled) { | 1298 | if (!mgp->msi_enabled) { |
1278 | put_be32(0, mgp->irq_deassert); | 1299 | put_be32(0, mgp->irq_deassert); |
@@ -1289,7 +1310,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1289 | /* check for transmit completes and receives */ | 1310 | /* check for transmit completes and receives */ |
1290 | send_done_count = ntohl(stats->send_done_count); | 1311 | send_done_count = ntohl(stats->send_done_count); |
1291 | if (send_done_count != tx->pkt_done) | 1312 | if (send_done_count != tx->pkt_done) |
1292 | myri10ge_tx_done(mgp, (int)send_done_count); | 1313 | myri10ge_tx_done(ss, (int)send_done_count); |
1293 | if (unlikely(i > myri10ge_max_irq_loops)) { | 1314 | if (unlikely(i > myri10ge_max_irq_loops)) { |
1294 | printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", | 1315 | printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", |
1295 | mgp->dev->name); | 1316 | mgp->dev->name); |
@@ -1304,7 +1325,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1304 | 1325 | ||
1305 | myri10ge_check_statblock(mgp); | 1326 | myri10ge_check_statblock(mgp); |
1306 | 1327 | ||
1307 | put_be32(htonl(3), mgp->irq_claim + 1); | 1328 | put_be32(htonl(3), ss->irq_claim + 1); |
1308 | return (IRQ_HANDLED); | 1329 | return (IRQ_HANDLED); |
1309 | } | 1330 | } |
1310 | 1331 | ||
@@ -1409,10 +1430,10 @@ myri10ge_get_ringparam(struct net_device *netdev, | |||
1409 | { | 1430 | { |
1410 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1431 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
1411 | 1432 | ||
1412 | ring->rx_mini_max_pending = mgp->rx_small.mask + 1; | 1433 | ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; |
1413 | ring->rx_max_pending = mgp->rx_big.mask + 1; | 1434 | ring->rx_max_pending = mgp->ss.rx_big.mask + 1; |
1414 | ring->rx_jumbo_max_pending = 0; | 1435 | ring->rx_jumbo_max_pending = 0; |
1415 | ring->tx_max_pending = mgp->rx_small.mask + 1; | 1436 | ring->tx_max_pending = mgp->ss.rx_small.mask + 1; |
1416 | ring->rx_mini_pending = ring->rx_mini_max_pending; | 1437 | ring->rx_mini_pending = ring->rx_mini_max_pending; |
1417 | ring->rx_pending = ring->rx_max_pending; | 1438 | ring->rx_pending = ring->rx_max_pending; |
1418 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; | 1439 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; |
@@ -1452,7 +1473,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) | |||
1452 | return 0; | 1473 | return 0; |
1453 | } | 1474 | } |
1454 | 1475 | ||
1455 | static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | 1476 | static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { |
1456 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | 1477 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", |
1457 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | 1478 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", |
1458 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | 1479 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", |
@@ -1462,28 +1483,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | |||
1462 | /* device-specific stats */ | 1483 | /* device-specific stats */ |
1463 | "tx_boundary", "WC", "irq", "MSI", | 1484 | "tx_boundary", "WC", "irq", "MSI", |
1464 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | 1485 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
1465 | "serial_number", "tx_pkt_start", "tx_pkt_done", | 1486 | "serial_number", "watchdog_resets", |
1466 | "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", | ||
1467 | "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", | ||
1468 | "link_changes", "link_up", "dropped_link_overflow", | 1487 | "link_changes", "link_up", "dropped_link_overflow", |
1469 | "dropped_link_error_or_filtered", | 1488 | "dropped_link_error_or_filtered", |
1470 | "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", | 1489 | "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", |
1471 | "dropped_unicast_filtered", "dropped_multicast_filtered", | 1490 | "dropped_unicast_filtered", "dropped_multicast_filtered", |
1472 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", | 1491 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", |
1473 | "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", | 1492 | "dropped_no_big_buffer" |
1493 | }; | ||
1494 | |||
1495 | static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { | ||
1496 | "----------- slice ---------", | ||
1497 | "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", | ||
1498 | "rx_small_cnt", "rx_big_cnt", | ||
1499 | "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", | ||
1500 | "LRO flushed", | ||
1474 | "LRO avg aggr", "LRO no_desc" | 1501 | "LRO avg aggr", "LRO no_desc" |
1475 | }; | 1502 | }; |
1476 | 1503 | ||
1477 | #define MYRI10GE_NET_STATS_LEN 21 | 1504 | #define MYRI10GE_NET_STATS_LEN 21 |
1478 | #define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) | 1505 | #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) |
1506 | #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) | ||
1479 | 1507 | ||
1480 | static void | 1508 | static void |
1481 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) | 1509 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) |
1482 | { | 1510 | { |
1483 | switch (stringset) { | 1511 | switch (stringset) { |
1484 | case ETH_SS_STATS: | 1512 | case ETH_SS_STATS: |
1485 | memcpy(data, *myri10ge_gstrings_stats, | 1513 | memcpy(data, *myri10ge_gstrings_main_stats, |
1486 | sizeof(myri10ge_gstrings_stats)); | 1514 | sizeof(myri10ge_gstrings_main_stats)); |
1515 | data += sizeof(myri10ge_gstrings_main_stats); | ||
1516 | memcpy(data, *myri10ge_gstrings_slice_stats, | ||
1517 | sizeof(myri10ge_gstrings_slice_stats)); | ||
1518 | data += sizeof(myri10ge_gstrings_slice_stats); | ||
1487 | break; | 1519 | break; |
1488 | } | 1520 | } |
1489 | } | 1521 | } |
@@ -1492,7 +1524,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset) | |||
1492 | { | 1524 | { |
1493 | switch (sset) { | 1525 | switch (sset) { |
1494 | case ETH_SS_STATS: | 1526 | case ETH_SS_STATS: |
1495 | return MYRI10GE_STATS_LEN; | 1527 | return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; |
1496 | default: | 1528 | default: |
1497 | return -EOPNOTSUPP; | 1529 | return -EOPNOTSUPP; |
1498 | } | 1530 | } |
@@ -1503,12 +1535,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
1503 | struct ethtool_stats *stats, u64 * data) | 1535 | struct ethtool_stats *stats, u64 * data) |
1504 | { | 1536 | { |
1505 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1537 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
1538 | struct myri10ge_slice_state *ss; | ||
1506 | int i; | 1539 | int i; |
1507 | 1540 | ||
1508 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) | 1541 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) |
1509 | data[i] = ((unsigned long *)&mgp->stats)[i]; | 1542 | data[i] = ((unsigned long *)&mgp->stats)[i]; |
1510 | 1543 | ||
1511 | data[i++] = (unsigned int)mgp->tx.boundary; | 1544 | data[i++] = (unsigned int)mgp->tx_boundary; |
1512 | data[i++] = (unsigned int)mgp->wc_enabled; | 1545 | data[i++] = (unsigned int)mgp->wc_enabled; |
1513 | data[i++] = (unsigned int)mgp->pdev->irq; | 1546 | data[i++] = (unsigned int)mgp->pdev->irq; |
1514 | data[i++] = (unsigned int)mgp->msi_enabled; | 1547 | data[i++] = (unsigned int)mgp->msi_enabled; |
@@ -1516,40 +1549,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
1516 | data[i++] = (unsigned int)mgp->write_dma; | 1549 | data[i++] = (unsigned int)mgp->write_dma; |
1517 | data[i++] = (unsigned int)mgp->read_write_dma; | 1550 | data[i++] = (unsigned int)mgp->read_write_dma; |
1518 | data[i++] = (unsigned int)mgp->serial_number; | 1551 | data[i++] = (unsigned int)mgp->serial_number; |
1519 | data[i++] = (unsigned int)mgp->tx.pkt_start; | ||
1520 | data[i++] = (unsigned int)mgp->tx.pkt_done; | ||
1521 | data[i++] = (unsigned int)mgp->tx.req; | ||
1522 | data[i++] = (unsigned int)mgp->tx.done; | ||
1523 | data[i++] = (unsigned int)mgp->rx_small.cnt; | ||
1524 | data[i++] = (unsigned int)mgp->rx_big.cnt; | ||
1525 | data[i++] = (unsigned int)mgp->wake_queue; | ||
1526 | data[i++] = (unsigned int)mgp->stop_queue; | ||
1527 | data[i++] = (unsigned int)mgp->watchdog_resets; | 1552 | data[i++] = (unsigned int)mgp->watchdog_resets; |
1528 | data[i++] = (unsigned int)mgp->tx_linearized; | ||
1529 | data[i++] = (unsigned int)mgp->link_changes; | 1553 | data[i++] = (unsigned int)mgp->link_changes; |
1530 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); | 1554 | |
1531 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); | 1555 | /* firmware stats are useful only in the first slice */ |
1532 | data[i++] = | 1556 | ss = &mgp->ss; |
1533 | (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); | 1557 | data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); |
1534 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); | 1558 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); |
1535 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); | ||
1536 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); | ||
1537 | data[i++] = | 1559 | data[i++] = |
1538 | (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); | 1560 | (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); |
1561 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); | ||
1562 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); | ||
1563 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); | ||
1564 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); | ||
1539 | data[i++] = | 1565 | data[i++] = |
1540 | (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); | 1566 | (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); |
1541 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); | 1567 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); |
1542 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); | 1568 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); |
1543 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); | 1569 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); |
1544 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); | 1570 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); |
1545 | data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; | 1571 | |
1546 | data[i++] = mgp->rx_done.lro_mgr.stats.flushed; | 1572 | data[i++] = 0; |
1547 | if (mgp->rx_done.lro_mgr.stats.flushed) | 1573 | data[i++] = (unsigned int)ss->tx.pkt_start; |
1548 | data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / | 1574 | data[i++] = (unsigned int)ss->tx.pkt_done; |
1549 | mgp->rx_done.lro_mgr.stats.flushed; | 1575 | data[i++] = (unsigned int)ss->tx.req; |
1576 | data[i++] = (unsigned int)ss->tx.done; | ||
1577 | data[i++] = (unsigned int)ss->rx_small.cnt; | ||
1578 | data[i++] = (unsigned int)ss->rx_big.cnt; | ||
1579 | data[i++] = (unsigned int)ss->tx.wake_queue; | ||
1580 | data[i++] = (unsigned int)ss->tx.stop_queue; | ||
1581 | data[i++] = (unsigned int)ss->tx.linearized; | ||
1582 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated; | ||
1583 | data[i++] = ss->rx_done.lro_mgr.stats.flushed; | ||
1584 | if (ss->rx_done.lro_mgr.stats.flushed) | ||
1585 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated / | ||
1586 | ss->rx_done.lro_mgr.stats.flushed; | ||
1550 | else | 1587 | else |
1551 | data[i++] = 0; | 1588 | data[i++] = 0; |
1552 | data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; | 1589 | data[i++] = ss->rx_done.lro_mgr.stats.no_desc; |
1553 | } | 1590 | } |
1554 | 1591 | ||
1555 | static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) | 1592 | static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) |
@@ -1585,19 +1622,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { | |||
1585 | .get_msglevel = myri10ge_get_msglevel | 1622 | .get_msglevel = myri10ge_get_msglevel |
1586 | }; | 1623 | }; |
1587 | 1624 | ||
1588 | static int myri10ge_allocate_rings(struct net_device *dev) | 1625 | static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) |
1589 | { | 1626 | { |
1590 | struct myri10ge_priv *mgp; | 1627 | struct myri10ge_priv *mgp = ss->mgp; |
1591 | struct myri10ge_cmd cmd; | 1628 | struct myri10ge_cmd cmd; |
1629 | struct net_device *dev = mgp->dev; | ||
1592 | int tx_ring_size, rx_ring_size; | 1630 | int tx_ring_size, rx_ring_size; |
1593 | int tx_ring_entries, rx_ring_entries; | 1631 | int tx_ring_entries, rx_ring_entries; |
1594 | int i, status; | 1632 | int i, status; |
1595 | size_t bytes; | 1633 | size_t bytes; |
1596 | 1634 | ||
1597 | mgp = netdev_priv(dev); | ||
1598 | |||
1599 | /* get ring sizes */ | 1635 | /* get ring sizes */ |
1600 | |||
1601 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); | 1636 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); |
1602 | tx_ring_size = cmd.data0; | 1637 | tx_ring_size = cmd.data0; |
1603 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); | 1638 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); |
@@ -1607,144 +1642,142 @@ static int myri10ge_allocate_rings(struct net_device *dev) | |||
1607 | 1642 | ||
1608 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); | 1643 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); |
1609 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); | 1644 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); |
1610 | mgp->tx.mask = tx_ring_entries - 1; | 1645 | ss->tx.mask = tx_ring_entries - 1; |
1611 | mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; | 1646 | ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; |
1612 | 1647 | ||
1613 | status = -ENOMEM; | 1648 | status = -ENOMEM; |
1614 | 1649 | ||
1615 | /* allocate the host shadow rings */ | 1650 | /* allocate the host shadow rings */ |
1616 | 1651 | ||
1617 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) | 1652 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) |
1618 | * sizeof(*mgp->tx.req_list); | 1653 | * sizeof(*ss->tx.req_list); |
1619 | mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); | 1654 | ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); |
1620 | if (mgp->tx.req_bytes == NULL) | 1655 | if (ss->tx.req_bytes == NULL) |
1621 | goto abort_with_nothing; | 1656 | goto abort_with_nothing; |
1622 | 1657 | ||
1623 | /* ensure req_list entries are aligned to 8 bytes */ | 1658 | /* ensure req_list entries are aligned to 8 bytes */ |
1624 | mgp->tx.req_list = (struct mcp_kreq_ether_send *) | 1659 | ss->tx.req_list = (struct mcp_kreq_ether_send *) |
1625 | ALIGN((unsigned long)mgp->tx.req_bytes, 8); | 1660 | ALIGN((unsigned long)ss->tx.req_bytes, 8); |
1626 | 1661 | ||
1627 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); | 1662 | bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); |
1628 | mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); | 1663 | ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); |
1629 | if (mgp->rx_small.shadow == NULL) | 1664 | if (ss->rx_small.shadow == NULL) |
1630 | goto abort_with_tx_req_bytes; | 1665 | goto abort_with_tx_req_bytes; |
1631 | 1666 | ||
1632 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); | 1667 | bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); |
1633 | mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); | 1668 | ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); |
1634 | if (mgp->rx_big.shadow == NULL) | 1669 | if (ss->rx_big.shadow == NULL) |
1635 | goto abort_with_rx_small_shadow; | 1670 | goto abort_with_rx_small_shadow; |
1636 | 1671 | ||
1637 | /* allocate the host info rings */ | 1672 | /* allocate the host info rings */ |
1638 | 1673 | ||
1639 | bytes = tx_ring_entries * sizeof(*mgp->tx.info); | 1674 | bytes = tx_ring_entries * sizeof(*ss->tx.info); |
1640 | mgp->tx.info = kzalloc(bytes, GFP_KERNEL); | 1675 | ss->tx.info = kzalloc(bytes, GFP_KERNEL); |
1641 | if (mgp->tx.info == NULL) | 1676 | if (ss->tx.info == NULL) |
1642 | goto abort_with_rx_big_shadow; | 1677 | goto abort_with_rx_big_shadow; |
1643 | 1678 | ||
1644 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); | 1679 | bytes = rx_ring_entries * sizeof(*ss->rx_small.info); |
1645 | mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); | 1680 | ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); |
1646 | if (mgp->rx_small.info == NULL) | 1681 | if (ss->rx_small.info == NULL) |
1647 | goto abort_with_tx_info; | 1682 | goto abort_with_tx_info; |
1648 | 1683 | ||
1649 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); | 1684 | bytes = rx_ring_entries * sizeof(*ss->rx_big.info); |
1650 | mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); | 1685 | ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); |
1651 | if (mgp->rx_big.info == NULL) | 1686 | if (ss->rx_big.info == NULL) |
1652 | goto abort_with_rx_small_info; | 1687 | goto abort_with_rx_small_info; |
1653 | 1688 | ||
1654 | /* Fill the receive rings */ | 1689 | /* Fill the receive rings */ |
1655 | mgp->rx_big.cnt = 0; | 1690 | ss->rx_big.cnt = 0; |
1656 | mgp->rx_small.cnt = 0; | 1691 | ss->rx_small.cnt = 0; |
1657 | mgp->rx_big.fill_cnt = 0; | 1692 | ss->rx_big.fill_cnt = 0; |
1658 | mgp->rx_small.fill_cnt = 0; | 1693 | ss->rx_small.fill_cnt = 0; |
1659 | mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; | 1694 | ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; |
1660 | mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; | 1695 | ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; |
1661 | mgp->rx_small.watchdog_needed = 0; | 1696 | ss->rx_small.watchdog_needed = 0; |
1662 | mgp->rx_big.watchdog_needed = 0; | 1697 | ss->rx_big.watchdog_needed = 0; |
1663 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 1698 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, |
1664 | mgp->small_bytes + MXGEFW_PAD, 0); | 1699 | mgp->small_bytes + MXGEFW_PAD, 0); |
1665 | 1700 | ||
1666 | if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { | 1701 | if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { |
1667 | printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", | 1702 | printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", |
1668 | dev->name, mgp->rx_small.fill_cnt); | 1703 | dev->name, ss->rx_small.fill_cnt); |
1669 | goto abort_with_rx_small_ring; | 1704 | goto abort_with_rx_small_ring; |
1670 | } | 1705 | } |
1671 | 1706 | ||
1672 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | 1707 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); |
1673 | if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { | 1708 | if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { |
1674 | printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", | 1709 | printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", |
1675 | dev->name, mgp->rx_big.fill_cnt); | 1710 | dev->name, ss->rx_big.fill_cnt); |
1676 | goto abort_with_rx_big_ring; | 1711 | goto abort_with_rx_big_ring; |
1677 | } | 1712 | } |
1678 | 1713 | ||
1679 | return 0; | 1714 | return 0; |
1680 | 1715 | ||
1681 | abort_with_rx_big_ring: | 1716 | abort_with_rx_big_ring: |
1682 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { | 1717 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
1683 | int idx = i & mgp->rx_big.mask; | 1718 | int idx = i & ss->rx_big.mask; |
1684 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], | 1719 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], |
1685 | mgp->big_bytes); | 1720 | mgp->big_bytes); |
1686 | put_page(mgp->rx_big.info[idx].page); | 1721 | put_page(ss->rx_big.info[idx].page); |
1687 | } | 1722 | } |
1688 | 1723 | ||
1689 | abort_with_rx_small_ring: | 1724 | abort_with_rx_small_ring: |
1690 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { | 1725 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
1691 | int idx = i & mgp->rx_small.mask; | 1726 | int idx = i & ss->rx_small.mask; |
1692 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], | 1727 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], |
1693 | mgp->small_bytes + MXGEFW_PAD); | 1728 | mgp->small_bytes + MXGEFW_PAD); |
1694 | put_page(mgp->rx_small.info[idx].page); | 1729 | put_page(ss->rx_small.info[idx].page); |
1695 | } | 1730 | } |
1696 | 1731 | ||
1697 | kfree(mgp->rx_big.info); | 1732 | kfree(ss->rx_big.info); |
1698 | 1733 | ||
1699 | abort_with_rx_small_info: | 1734 | abort_with_rx_small_info: |
1700 | kfree(mgp->rx_small.info); | 1735 | kfree(ss->rx_small.info); |
1701 | 1736 | ||
1702 | abort_with_tx_info: | 1737 | abort_with_tx_info: |
1703 | kfree(mgp->tx.info); | 1738 | kfree(ss->tx.info); |
1704 | 1739 | ||
1705 | abort_with_rx_big_shadow: | 1740 | abort_with_rx_big_shadow: |
1706 | kfree(mgp->rx_big.shadow); | 1741 | kfree(ss->rx_big.shadow); |
1707 | 1742 | ||
1708 | abort_with_rx_small_shadow: | 1743 | abort_with_rx_small_shadow: |
1709 | kfree(mgp->rx_small.shadow); | 1744 | kfree(ss->rx_small.shadow); |
1710 | 1745 | ||
1711 | abort_with_tx_req_bytes: | 1746 | abort_with_tx_req_bytes: |
1712 | kfree(mgp->tx.req_bytes); | 1747 | kfree(ss->tx.req_bytes); |
1713 | mgp->tx.req_bytes = NULL; | 1748 | ss->tx.req_bytes = NULL; |
1714 | mgp->tx.req_list = NULL; | 1749 | ss->tx.req_list = NULL; |
1715 | 1750 | ||
1716 | abort_with_nothing: | 1751 | abort_with_nothing: |
1717 | return status; | 1752 | return status; |
1718 | } | 1753 | } |
1719 | 1754 | ||
1720 | static void myri10ge_free_rings(struct net_device *dev) | 1755 | static void myri10ge_free_rings(struct myri10ge_slice_state *ss) |
1721 | { | 1756 | { |
1722 | struct myri10ge_priv *mgp; | 1757 | struct myri10ge_priv *mgp = ss->mgp; |
1723 | struct sk_buff *skb; | 1758 | struct sk_buff *skb; |
1724 | struct myri10ge_tx_buf *tx; | 1759 | struct myri10ge_tx_buf *tx; |
1725 | int i, len, idx; | 1760 | int i, len, idx; |
1726 | 1761 | ||
1727 | mgp = netdev_priv(dev); | 1762 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
1728 | 1763 | idx = i & ss->rx_big.mask; | |
1729 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { | 1764 | if (i == ss->rx_big.fill_cnt - 1) |
1730 | idx = i & mgp->rx_big.mask; | 1765 | ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; |
1731 | if (i == mgp->rx_big.fill_cnt - 1) | 1766 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], |
1732 | mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; | ||
1733 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], | ||
1734 | mgp->big_bytes); | 1767 | mgp->big_bytes); |
1735 | put_page(mgp->rx_big.info[idx].page); | 1768 | put_page(ss->rx_big.info[idx].page); |
1736 | } | 1769 | } |
1737 | 1770 | ||
1738 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { | 1771 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
1739 | idx = i & mgp->rx_small.mask; | 1772 | idx = i & ss->rx_small.mask; |
1740 | if (i == mgp->rx_small.fill_cnt - 1) | 1773 | if (i == ss->rx_small.fill_cnt - 1) |
1741 | mgp->rx_small.info[idx].page_offset = | 1774 | ss->rx_small.info[idx].page_offset = |
1742 | MYRI10GE_ALLOC_SIZE; | 1775 | MYRI10GE_ALLOC_SIZE; |
1743 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], | 1776 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], |
1744 | mgp->small_bytes + MXGEFW_PAD); | 1777 | mgp->small_bytes + MXGEFW_PAD); |
1745 | put_page(mgp->rx_small.info[idx].page); | 1778 | put_page(ss->rx_small.info[idx].page); |
1746 | } | 1779 | } |
1747 | tx = &mgp->tx; | 1780 | tx = &ss->tx; |
1748 | while (tx->done != tx->req) { | 1781 | while (tx->done != tx->req) { |
1749 | idx = tx->done & tx->mask; | 1782 | idx = tx->done & tx->mask; |
1750 | skb = tx->info[idx].skb; | 1783 | skb = tx->info[idx].skb; |
@@ -1755,7 +1788,7 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
1755 | len = pci_unmap_len(&tx->info[idx], len); | 1788 | len = pci_unmap_len(&tx->info[idx], len); |
1756 | pci_unmap_len_set(&tx->info[idx], len, 0); | 1789 | pci_unmap_len_set(&tx->info[idx], len, 0); |
1757 | if (skb) { | 1790 | if (skb) { |
1758 | mgp->stats.tx_dropped++; | 1791 | ss->stats.tx_dropped++; |
1759 | dev_kfree_skb_any(skb); | 1792 | dev_kfree_skb_any(skb); |
1760 | if (len) | 1793 | if (len) |
1761 | pci_unmap_single(mgp->pdev, | 1794 | pci_unmap_single(mgp->pdev, |
@@ -1770,19 +1803,19 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
1770 | PCI_DMA_TODEVICE); | 1803 | PCI_DMA_TODEVICE); |
1771 | } | 1804 | } |
1772 | } | 1805 | } |
1773 | kfree(mgp->rx_big.info); | 1806 | kfree(ss->rx_big.info); |
1774 | 1807 | ||
1775 | kfree(mgp->rx_small.info); | 1808 | kfree(ss->rx_small.info); |
1776 | 1809 | ||
1777 | kfree(mgp->tx.info); | 1810 | kfree(ss->tx.info); |
1778 | 1811 | ||
1779 | kfree(mgp->rx_big.shadow); | 1812 | kfree(ss->rx_big.shadow); |
1780 | 1813 | ||
1781 | kfree(mgp->rx_small.shadow); | 1814 | kfree(ss->rx_small.shadow); |
1782 | 1815 | ||
1783 | kfree(mgp->tx.req_bytes); | 1816 | kfree(ss->tx.req_bytes); |
1784 | mgp->tx.req_bytes = NULL; | 1817 | ss->tx.req_bytes = NULL; |
1785 | mgp->tx.req_list = NULL; | 1818 | ss->tx.req_list = NULL; |
1786 | } | 1819 | } |
1787 | 1820 | ||
1788 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) | 1821 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) |
@@ -1881,13 +1914,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | |||
1881 | 1914 | ||
1882 | static int myri10ge_open(struct net_device *dev) | 1915 | static int myri10ge_open(struct net_device *dev) |
1883 | { | 1916 | { |
1884 | struct myri10ge_priv *mgp; | 1917 | struct myri10ge_priv *mgp = netdev_priv(dev); |
1885 | struct myri10ge_cmd cmd; | 1918 | struct myri10ge_cmd cmd; |
1886 | struct net_lro_mgr *lro_mgr; | 1919 | struct net_lro_mgr *lro_mgr; |
1887 | int status, big_pow2; | 1920 | int status, big_pow2; |
1888 | 1921 | ||
1889 | mgp = netdev_priv(dev); | ||
1890 | |||
1891 | if (mgp->running != MYRI10GE_ETH_STOPPED) | 1922 | if (mgp->running != MYRI10GE_ETH_STOPPED) |
1892 | return -EBUSY; | 1923 | return -EBUSY; |
1893 | 1924 | ||
@@ -1924,16 +1955,16 @@ static int myri10ge_open(struct net_device *dev) | |||
1924 | /* get the lanai pointers to the send and receive rings */ | 1955 | /* get the lanai pointers to the send and receive rings */ |
1925 | 1956 | ||
1926 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); | 1957 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); |
1927 | mgp->tx.lanai = | 1958 | mgp->ss.tx.lanai = |
1928 | (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); | 1959 | (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); |
1929 | 1960 | ||
1930 | status |= | 1961 | status |= |
1931 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); | 1962 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); |
1932 | mgp->rx_small.lanai = | 1963 | mgp->ss.rx_small.lanai = |
1933 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | 1964 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); |
1934 | 1965 | ||
1935 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); | 1966 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); |
1936 | mgp->rx_big.lanai = | 1967 | mgp->ss.rx_big.lanai = |
1937 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | 1968 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); |
1938 | 1969 | ||
1939 | if (status != 0) { | 1970 | if (status != 0) { |
@@ -1945,15 +1976,15 @@ static int myri10ge_open(struct net_device *dev) | |||
1945 | } | 1976 | } |
1946 | 1977 | ||
1947 | if (myri10ge_wcfifo && mgp->wc_enabled) { | 1978 | if (myri10ge_wcfifo && mgp->wc_enabled) { |
1948 | mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; | 1979 | mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; |
1949 | mgp->rx_small.wc_fifo = | 1980 | mgp->ss.rx_small.wc_fifo = |
1950 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; | 1981 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; |
1951 | mgp->rx_big.wc_fifo = | 1982 | mgp->ss.rx_big.wc_fifo = |
1952 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; | 1983 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; |
1953 | } else { | 1984 | } else { |
1954 | mgp->tx.wc_fifo = NULL; | 1985 | mgp->ss.tx.wc_fifo = NULL; |
1955 | mgp->rx_small.wc_fifo = NULL; | 1986 | mgp->ss.rx_small.wc_fifo = NULL; |
1956 | mgp->rx_big.wc_fifo = NULL; | 1987 | mgp->ss.rx_big.wc_fifo = NULL; |
1957 | } | 1988 | } |
1958 | 1989 | ||
1959 | /* Firmware needs the big buff size as a power of 2. Lie and | 1990 | /* Firmware needs the big buff size as a power of 2. Lie and |
@@ -1970,7 +2001,7 @@ static int myri10ge_open(struct net_device *dev) | |||
1970 | mgp->big_bytes = big_pow2; | 2001 | mgp->big_bytes = big_pow2; |
1971 | } | 2002 | } |
1972 | 2003 | ||
1973 | status = myri10ge_allocate_rings(dev); | 2004 | status = myri10ge_allocate_rings(&mgp->ss); |
1974 | if (status != 0) | 2005 | if (status != 0) |
1975 | goto abort_with_irq; | 2006 | goto abort_with_irq; |
1976 | 2007 | ||
@@ -1989,12 +2020,12 @@ static int myri10ge_open(struct net_device *dev) | |||
1989 | goto abort_with_rings; | 2020 | goto abort_with_rings; |
1990 | } | 2021 | } |
1991 | 2022 | ||
1992 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); | 2023 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); |
1993 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); | 2024 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); |
1994 | cmd.data2 = sizeof(struct mcp_irq_data); | 2025 | cmd.data2 = sizeof(struct mcp_irq_data); |
1995 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); | 2026 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); |
1996 | if (status == -ENOSYS) { | 2027 | if (status == -ENOSYS) { |
1997 | dma_addr_t bus = mgp->fw_stats_bus; | 2028 | dma_addr_t bus = mgp->ss.fw_stats_bus; |
1998 | bus += offsetof(struct mcp_irq_data, send_done_count); | 2029 | bus += offsetof(struct mcp_irq_data, send_done_count); |
1999 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); | 2030 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); |
2000 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); | 2031 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); |
@@ -2015,20 +2046,20 @@ static int myri10ge_open(struct net_device *dev) | |||
2015 | mgp->link_state = ~0U; | 2046 | mgp->link_state = ~0U; |
2016 | mgp->rdma_tags_available = 15; | 2047 | mgp->rdma_tags_available = 15; |
2017 | 2048 | ||
2018 | lro_mgr = &mgp->rx_done.lro_mgr; | 2049 | lro_mgr = &mgp->ss.rx_done.lro_mgr; |
2019 | lro_mgr->dev = dev; | 2050 | lro_mgr->dev = dev; |
2020 | lro_mgr->features = LRO_F_NAPI; | 2051 | lro_mgr->features = LRO_F_NAPI; |
2021 | lro_mgr->ip_summed = CHECKSUM_COMPLETE; | 2052 | lro_mgr->ip_summed = CHECKSUM_COMPLETE; |
2022 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | 2053 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; |
2023 | lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; | 2054 | lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; |
2024 | lro_mgr->lro_arr = mgp->rx_done.lro_desc; | 2055 | lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; |
2025 | lro_mgr->get_frag_header = myri10ge_get_frag_header; | 2056 | lro_mgr->get_frag_header = myri10ge_get_frag_header; |
2026 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; | 2057 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; |
2027 | lro_mgr->frag_align_pad = 2; | 2058 | lro_mgr->frag_align_pad = 2; |
2028 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | 2059 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) |
2029 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | 2060 | lro_mgr->max_aggr = MAX_SKB_FRAGS; |
2030 | 2061 | ||
2031 | napi_enable(&mgp->napi); /* must happen prior to any irq */ | 2062 | napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ |
2032 | 2063 | ||
2033 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); | 2064 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); |
2034 | if (status) { | 2065 | if (status) { |
@@ -2037,8 +2068,8 @@ static int myri10ge_open(struct net_device *dev) | |||
2037 | goto abort_with_rings; | 2068 | goto abort_with_rings; |
2038 | } | 2069 | } |
2039 | 2070 | ||
2040 | mgp->wake_queue = 0; | 2071 | mgp->ss.tx.wake_queue = 0; |
2041 | mgp->stop_queue = 0; | 2072 | mgp->ss.tx.stop_queue = 0; |
2042 | mgp->running = MYRI10GE_ETH_RUNNING; | 2073 | mgp->running = MYRI10GE_ETH_RUNNING; |
2043 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; | 2074 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; |
2044 | add_timer(&mgp->watchdog_timer); | 2075 | add_timer(&mgp->watchdog_timer); |
@@ -2046,7 +2077,7 @@ static int myri10ge_open(struct net_device *dev) | |||
2046 | return 0; | 2077 | return 0; |
2047 | 2078 | ||
2048 | abort_with_rings: | 2079 | abort_with_rings: |
2049 | myri10ge_free_rings(dev); | 2080 | myri10ge_free_rings(&mgp->ss); |
2050 | 2081 | ||
2051 | abort_with_irq: | 2082 | abort_with_irq: |
2052 | myri10ge_free_irq(mgp); | 2083 | myri10ge_free_irq(mgp); |
@@ -2058,21 +2089,19 @@ abort_with_nothing: | |||
2058 | 2089 | ||
2059 | static int myri10ge_close(struct net_device *dev) | 2090 | static int myri10ge_close(struct net_device *dev) |
2060 | { | 2091 | { |
2061 | struct myri10ge_priv *mgp; | 2092 | struct myri10ge_priv *mgp = netdev_priv(dev); |
2062 | struct myri10ge_cmd cmd; | 2093 | struct myri10ge_cmd cmd; |
2063 | int status, old_down_cnt; | 2094 | int status, old_down_cnt; |
2064 | 2095 | ||
2065 | mgp = netdev_priv(dev); | ||
2066 | |||
2067 | if (mgp->running != MYRI10GE_ETH_RUNNING) | 2096 | if (mgp->running != MYRI10GE_ETH_RUNNING) |
2068 | return 0; | 2097 | return 0; |
2069 | 2098 | ||
2070 | if (mgp->tx.req_bytes == NULL) | 2099 | if (mgp->ss.tx.req_bytes == NULL) |
2071 | return 0; | 2100 | return 0; |
2072 | 2101 | ||
2073 | del_timer_sync(&mgp->watchdog_timer); | 2102 | del_timer_sync(&mgp->watchdog_timer); |
2074 | mgp->running = MYRI10GE_ETH_STOPPING; | 2103 | mgp->running = MYRI10GE_ETH_STOPPING; |
2075 | napi_disable(&mgp->napi); | 2104 | napi_disable(&mgp->ss.napi); |
2076 | netif_carrier_off(dev); | 2105 | netif_carrier_off(dev); |
2077 | netif_stop_queue(dev); | 2106 | netif_stop_queue(dev); |
2078 | old_down_cnt = mgp->down_cnt; | 2107 | old_down_cnt = mgp->down_cnt; |
@@ -2088,7 +2117,7 @@ static int myri10ge_close(struct net_device *dev) | |||
2088 | 2117 | ||
2089 | netif_tx_disable(dev); | 2118 | netif_tx_disable(dev); |
2090 | myri10ge_free_irq(mgp); | 2119 | myri10ge_free_irq(mgp); |
2091 | myri10ge_free_rings(dev); | 2120 | myri10ge_free_rings(&mgp->ss); |
2092 | 2121 | ||
2093 | mgp->running = MYRI10GE_ETH_STOPPED; | 2122 | mgp->running = MYRI10GE_ETH_STOPPED; |
2094 | return 0; | 2123 | return 0; |
@@ -2184,7 +2213,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | |||
2184 | 2213 | ||
2185 | /* | 2214 | /* |
2186 | * Transmit a packet. We need to split the packet so that a single | 2215 | * Transmit a packet. We need to split the packet so that a single |
2187 | * segment does not cross myri10ge->tx.boundary, so this makes segment | 2216 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
2188 | * counting tricky. So rather than try to count segments up front, we | 2217 | * counting tricky. So rather than try to count segments up front, we |
2189 | * just give up if there are too few segments to hold a reasonably | 2218 | * just give up if there are too few segments to hold a reasonably |
2190 | * fragmented packet currently available. If we run | 2219 | * fragmented packet currently available. If we run |
@@ -2195,8 +2224,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | |||
2195 | static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | 2224 | static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) |
2196 | { | 2225 | { |
2197 | struct myri10ge_priv *mgp = netdev_priv(dev); | 2226 | struct myri10ge_priv *mgp = netdev_priv(dev); |
2227 | struct myri10ge_slice_state *ss; | ||
2198 | struct mcp_kreq_ether_send *req; | 2228 | struct mcp_kreq_ether_send *req; |
2199 | struct myri10ge_tx_buf *tx = &mgp->tx; | 2229 | struct myri10ge_tx_buf *tx; |
2200 | struct skb_frag_struct *frag; | 2230 | struct skb_frag_struct *frag; |
2201 | dma_addr_t bus; | 2231 | dma_addr_t bus; |
2202 | u32 low; | 2232 | u32 low; |
@@ -2207,6 +2237,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2207 | int cum_len, seglen, boundary, rdma_count; | 2237 | int cum_len, seglen, boundary, rdma_count; |
2208 | u8 flags, odd_flag; | 2238 | u8 flags, odd_flag; |
2209 | 2239 | ||
2240 | /* always transmit through slot 0 */ | ||
2241 | ss = &mgp->ss; | ||
2242 | tx = &ss->tx; | ||
2210 | again: | 2243 | again: |
2211 | req = tx->req_list; | 2244 | req = tx->req_list; |
2212 | avail = tx->mask - 1 - (tx->req - tx->done); | 2245 | avail = tx->mask - 1 - (tx->req - tx->done); |
@@ -2221,7 +2254,7 @@ again: | |||
2221 | 2254 | ||
2222 | if ((unlikely(avail < max_segments))) { | 2255 | if ((unlikely(avail < max_segments))) { |
2223 | /* we are out of transmit resources */ | 2256 | /* we are out of transmit resources */ |
2224 | mgp->stop_queue++; | 2257 | tx->stop_queue++; |
2225 | netif_stop_queue(dev); | 2258 | netif_stop_queue(dev); |
2226 | return 1; | 2259 | return 1; |
2227 | } | 2260 | } |
@@ -2283,7 +2316,7 @@ again: | |||
2283 | if (skb_padto(skb, ETH_ZLEN)) { | 2316 | if (skb_padto(skb, ETH_ZLEN)) { |
2284 | /* The packet is gone, so we must | 2317 | /* The packet is gone, so we must |
2285 | * return 0 */ | 2318 | * return 0 */ |
2286 | mgp->stats.tx_dropped += 1; | 2319 | ss->stats.tx_dropped += 1; |
2287 | return 0; | 2320 | return 0; |
2288 | } | 2321 | } |
2289 | /* adjust the len to account for the zero pad | 2322 | /* adjust the len to account for the zero pad |
@@ -2325,7 +2358,7 @@ again: | |||
2325 | 2358 | ||
2326 | while (1) { | 2359 | while (1) { |
2327 | /* Break the SKB or Fragment up into pieces which | 2360 | /* Break the SKB or Fragment up into pieces which |
2328 | * do not cross mgp->tx.boundary */ | 2361 | * do not cross mgp->tx_boundary */ |
2329 | low = MYRI10GE_LOWPART_TO_U32(bus); | 2362 | low = MYRI10GE_LOWPART_TO_U32(bus); |
2330 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | 2363 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); |
2331 | while (len) { | 2364 | while (len) { |
@@ -2335,7 +2368,8 @@ again: | |||
2335 | if (unlikely(count == max_segments)) | 2368 | if (unlikely(count == max_segments)) |
2336 | goto abort_linearize; | 2369 | goto abort_linearize; |
2337 | 2370 | ||
2338 | boundary = (low + tx->boundary) & ~(tx->boundary - 1); | 2371 | boundary = |
2372 | (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); | ||
2339 | seglen = boundary - low; | 2373 | seglen = boundary - low; |
2340 | if (seglen > len) | 2374 | if (seglen > len) |
2341 | seglen = len; | 2375 | seglen = len; |
@@ -2419,7 +2453,7 @@ again: | |||
2419 | myri10ge_submit_req_wc(tx, tx->req_list, count); | 2453 | myri10ge_submit_req_wc(tx, tx->req_list, count); |
2420 | tx->pkt_start++; | 2454 | tx->pkt_start++; |
2421 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | 2455 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { |
2422 | mgp->stop_queue++; | 2456 | tx->stop_queue++; |
2423 | netif_stop_queue(dev); | 2457 | netif_stop_queue(dev); |
2424 | } | 2458 | } |
2425 | dev->trans_start = jiffies; | 2459 | dev->trans_start = jiffies; |
@@ -2461,12 +2495,12 @@ abort_linearize: | |||
2461 | if (skb_linearize(skb)) | 2495 | if (skb_linearize(skb)) |
2462 | goto drop; | 2496 | goto drop; |
2463 | 2497 | ||
2464 | mgp->tx_linearized++; | 2498 | tx->linearized++; |
2465 | goto again; | 2499 | goto again; |
2466 | 2500 | ||
2467 | drop: | 2501 | drop: |
2468 | dev_kfree_skb_any(skb); | 2502 | dev_kfree_skb_any(skb); |
2469 | mgp->stats.tx_dropped += 1; | 2503 | ss->stats.tx_dropped += 1; |
2470 | return 0; | 2504 | return 0; |
2471 | 2505 | ||
2472 | } | 2506 | } |
@@ -2474,7 +2508,7 @@ drop: | |||
2474 | static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) | 2508 | static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) |
2475 | { | 2509 | { |
2476 | struct sk_buff *segs, *curr; | 2510 | struct sk_buff *segs, *curr; |
2477 | struct myri10ge_priv *mgp = dev->priv; | 2511 | struct myri10ge_priv *mgp = netdev_priv(dev); |
2478 | int status; | 2512 | int status; |
2479 | 2513 | ||
2480 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); | 2514 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); |
@@ -2514,14 +2548,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) | |||
2514 | 2548 | ||
2515 | static void myri10ge_set_multicast_list(struct net_device *dev) | 2549 | static void myri10ge_set_multicast_list(struct net_device *dev) |
2516 | { | 2550 | { |
2551 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
2517 | struct myri10ge_cmd cmd; | 2552 | struct myri10ge_cmd cmd; |
2518 | struct myri10ge_priv *mgp; | ||
2519 | struct dev_mc_list *mc_list; | 2553 | struct dev_mc_list *mc_list; |
2520 | __be32 data[2] = { 0, 0 }; | 2554 | __be32 data[2] = { 0, 0 }; |
2521 | int err; | 2555 | int err; |
2522 | DECLARE_MAC_BUF(mac); | 2556 | DECLARE_MAC_BUF(mac); |
2523 | 2557 | ||
2524 | mgp = netdev_priv(dev); | ||
2525 | /* can be called from atomic contexts, | 2558 | /* can be called from atomic contexts, |
2526 | * pass 1 to force atomicity in myri10ge_send_cmd() */ | 2559 | * pass 1 to force atomicity in myri10ge_send_cmd() */ |
2527 | myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); | 2560 | myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); |
@@ -2723,9 +2756,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) | |||
2723 | * already been enabled, then it must use a firmware image which works | 2756 | * already been enabled, then it must use a firmware image which works |
2724 | * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it | 2757 | * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it |
2725 | * should also ensure that it never gives the device a Read-DMA which is | 2758 | * should also ensure that it never gives the device a Read-DMA which is |
2726 | * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is | 2759 | * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is |
2727 | * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) | 2760 | * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) |
2728 | * firmware image, and set tx.boundary to 4KB. | 2761 | * firmware image, and set tx_boundary to 4KB. |
2729 | */ | 2762 | */ |
2730 | 2763 | ||
2731 | static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | 2764 | static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) |
@@ -2734,7 +2767,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
2734 | struct device *dev = &pdev->dev; | 2767 | struct device *dev = &pdev->dev; |
2735 | int status; | 2768 | int status; |
2736 | 2769 | ||
2737 | mgp->tx.boundary = 4096; | 2770 | mgp->tx_boundary = 4096; |
2738 | /* | 2771 | /* |
2739 | * Verify the max read request size was set to 4KB | 2772 | * Verify the max read request size was set to 4KB |
2740 | * before trying the test with 4KB. | 2773 | * before trying the test with 4KB. |
@@ -2746,7 +2779,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
2746 | } | 2779 | } |
2747 | if (status != 4096) { | 2780 | if (status != 4096) { |
2748 | dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); | 2781 | dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); |
2749 | mgp->tx.boundary = 2048; | 2782 | mgp->tx_boundary = 2048; |
2750 | } | 2783 | } |
2751 | /* | 2784 | /* |
2752 | * load the optimized firmware (which assumes aligned PCIe | 2785 | * load the optimized firmware (which assumes aligned PCIe |
@@ -2779,7 +2812,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
2779 | "Please install up to date fw\n"); | 2812 | "Please install up to date fw\n"); |
2780 | abort: | 2813 | abort: |
2781 | /* fall back to using the unaligned firmware */ | 2814 | /* fall back to using the unaligned firmware */ |
2782 | mgp->tx.boundary = 2048; | 2815 | mgp->tx_boundary = 2048; |
2783 | mgp->fw_name = myri10ge_fw_unaligned; | 2816 | mgp->fw_name = myri10ge_fw_unaligned; |
2784 | 2817 | ||
2785 | } | 2818 | } |
@@ -2800,7 +2833,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |||
2800 | if (link_width < 8) { | 2833 | if (link_width < 8) { |
2801 | dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", | 2834 | dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", |
2802 | link_width); | 2835 | link_width); |
2803 | mgp->tx.boundary = 4096; | 2836 | mgp->tx_boundary = 4096; |
2804 | mgp->fw_name = myri10ge_fw_aligned; | 2837 | mgp->fw_name = myri10ge_fw_aligned; |
2805 | } else { | 2838 | } else { |
2806 | myri10ge_firmware_probe(mgp); | 2839 | myri10ge_firmware_probe(mgp); |
@@ -2809,12 +2842,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |||
2809 | if (myri10ge_force_firmware == 1) { | 2842 | if (myri10ge_force_firmware == 1) { |
2810 | dev_info(&mgp->pdev->dev, | 2843 | dev_info(&mgp->pdev->dev, |
2811 | "Assuming aligned completions (forced)\n"); | 2844 | "Assuming aligned completions (forced)\n"); |
2812 | mgp->tx.boundary = 4096; | 2845 | mgp->tx_boundary = 4096; |
2813 | mgp->fw_name = myri10ge_fw_aligned; | 2846 | mgp->fw_name = myri10ge_fw_aligned; |
2814 | } else { | 2847 | } else { |
2815 | dev_info(&mgp->pdev->dev, | 2848 | dev_info(&mgp->pdev->dev, |
2816 | "Assuming unaligned completions (forced)\n"); | 2849 | "Assuming unaligned completions (forced)\n"); |
2817 | mgp->tx.boundary = 2048; | 2850 | mgp->tx_boundary = 2048; |
2818 | mgp->fw_name = myri10ge_fw_unaligned; | 2851 | mgp->fw_name = myri10ge_fw_unaligned; |
2819 | } | 2852 | } |
2820 | } | 2853 | } |
@@ -2931,6 +2964,7 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
2931 | { | 2964 | { |
2932 | struct myri10ge_priv *mgp = | 2965 | struct myri10ge_priv *mgp = |
2933 | container_of(work, struct myri10ge_priv, watchdog_work); | 2966 | container_of(work, struct myri10ge_priv, watchdog_work); |
2967 | struct myri10ge_tx_buf *tx; | ||
2934 | u32 reboot; | 2968 | u32 reboot; |
2935 | int status; | 2969 | int status; |
2936 | u16 cmd, vendor; | 2970 | u16 cmd, vendor; |
@@ -2980,15 +3014,16 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
2980 | 3014 | ||
2981 | printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", | 3015 | printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", |
2982 | mgp->dev->name); | 3016 | mgp->dev->name); |
3017 | tx = &mgp->ss.tx; | ||
2983 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | 3018 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", |
2984 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | 3019 | mgp->dev->name, tx->req, tx->done, |
2985 | mgp->tx.pkt_start, mgp->tx.pkt_done, | 3020 | tx->pkt_start, tx->pkt_done, |
2986 | (int)ntohl(mgp->fw_stats->send_done_count)); | 3021 | (int)ntohl(mgp->ss.fw_stats->send_done_count)); |
2987 | msleep(2000); | 3022 | msleep(2000); |
2988 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | 3023 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", |
2989 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | 3024 | mgp->dev->name, tx->req, tx->done, |
2990 | mgp->tx.pkt_start, mgp->tx.pkt_done, | 3025 | tx->pkt_start, tx->pkt_done, |
2991 | (int)ntohl(mgp->fw_stats->send_done_count)); | 3026 | (int)ntohl(mgp->ss.fw_stats->send_done_count)); |
2992 | } | 3027 | } |
2993 | rtnl_lock(); | 3028 | rtnl_lock(); |
2994 | myri10ge_close(mgp->dev); | 3029 | myri10ge_close(mgp->dev); |
@@ -3011,28 +3046,31 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
3011 | static void myri10ge_watchdog_timer(unsigned long arg) | 3046 | static void myri10ge_watchdog_timer(unsigned long arg) |
3012 | { | 3047 | { |
3013 | struct myri10ge_priv *mgp; | 3048 | struct myri10ge_priv *mgp; |
3049 | struct myri10ge_slice_state *ss; | ||
3014 | u32 rx_pause_cnt; | 3050 | u32 rx_pause_cnt; |
3015 | 3051 | ||
3016 | mgp = (struct myri10ge_priv *)arg; | 3052 | mgp = (struct myri10ge_priv *)arg; |
3017 | 3053 | ||
3018 | if (mgp->rx_small.watchdog_needed) { | 3054 | rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); |
3019 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 3055 | |
3056 | ss = &mgp->ss; | ||
3057 | if (ss->rx_small.watchdog_needed) { | ||
3058 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, | ||
3020 | mgp->small_bytes + MXGEFW_PAD, 1); | 3059 | mgp->small_bytes + MXGEFW_PAD, 1); |
3021 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= | 3060 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= |
3022 | myri10ge_fill_thresh) | 3061 | myri10ge_fill_thresh) |
3023 | mgp->rx_small.watchdog_needed = 0; | 3062 | ss->rx_small.watchdog_needed = 0; |
3024 | } | 3063 | } |
3025 | if (mgp->rx_big.watchdog_needed) { | 3064 | if (ss->rx_big.watchdog_needed) { |
3026 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); | 3065 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); |
3027 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= | 3066 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= |
3028 | myri10ge_fill_thresh) | 3067 | myri10ge_fill_thresh) |
3029 | mgp->rx_big.watchdog_needed = 0; | 3068 | ss->rx_big.watchdog_needed = 0; |
3030 | } | 3069 | } |
3031 | rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); | ||
3032 | 3070 | ||
3033 | if (mgp->tx.req != mgp->tx.done && | 3071 | if (ss->tx.req != ss->tx.done && |
3034 | mgp->tx.done == mgp->watchdog_tx_done && | 3072 | ss->tx.done == ss->watchdog_tx_done && |
3035 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) { | 3073 | ss->watchdog_tx_req != ss->watchdog_tx_done) { |
3036 | /* nic seems like it might be stuck.. */ | 3074 | /* nic seems like it might be stuck.. */ |
3037 | if (rx_pause_cnt != mgp->watchdog_pause) { | 3075 | if (rx_pause_cnt != mgp->watchdog_pause) { |
3038 | if (net_ratelimit()) | 3076 | if (net_ratelimit()) |
@@ -3047,8 +3085,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
3047 | /* rearm timer */ | 3085 | /* rearm timer */ |
3048 | mod_timer(&mgp->watchdog_timer, | 3086 | mod_timer(&mgp->watchdog_timer, |
3049 | jiffies + myri10ge_watchdog_timeout * HZ); | 3087 | jiffies + myri10ge_watchdog_timeout * HZ); |
3050 | mgp->watchdog_tx_done = mgp->tx.done; | 3088 | ss->watchdog_tx_done = ss->tx.done; |
3051 | mgp->watchdog_tx_req = mgp->tx.req; | 3089 | ss->watchdog_tx_req = ss->tx.req; |
3052 | mgp->watchdog_pause = rx_pause_cnt; | 3090 | mgp->watchdog_pause = rx_pause_cnt; |
3053 | } | 3091 | } |
3054 | 3092 | ||
@@ -3072,7 +3110,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3072 | 3110 | ||
3073 | mgp = netdev_priv(netdev); | 3111 | mgp = netdev_priv(netdev); |
3074 | mgp->dev = netdev; | 3112 | mgp->dev = netdev; |
3075 | netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); | 3113 | netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); |
3076 | mgp->pdev = pdev; | 3114 | mgp->pdev = pdev; |
3077 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | 3115 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; |
3078 | mgp->pause = myri10ge_flow_control; | 3116 | mgp->pause = myri10ge_flow_control; |
@@ -3118,9 +3156,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3118 | if (mgp->cmd == NULL) | 3156 | if (mgp->cmd == NULL) |
3119 | goto abort_with_netdev; | 3157 | goto abort_with_netdev; |
3120 | 3158 | ||
3121 | mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3159 | mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
3122 | &mgp->fw_stats_bus, GFP_KERNEL); | 3160 | &mgp->ss.fw_stats_bus, GFP_KERNEL); |
3123 | if (mgp->fw_stats == NULL) | 3161 | if (mgp->ss.fw_stats == NULL) |
3124 | goto abort_with_cmd; | 3162 | goto abort_with_cmd; |
3125 | 3163 | ||
3126 | mgp->board_span = pci_resource_len(pdev, 0); | 3164 | mgp->board_span = pci_resource_len(pdev, 0); |
@@ -3160,12 +3198,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3160 | netdev->dev_addr[i] = mgp->mac_addr[i]; | 3198 | netdev->dev_addr[i] = mgp->mac_addr[i]; |
3161 | 3199 | ||
3162 | /* allocate rx done ring */ | 3200 | /* allocate rx done ring */ |
3163 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3201 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
3164 | mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, | 3202 | mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, |
3165 | &mgp->rx_done.bus, GFP_KERNEL); | 3203 | &mgp->ss.rx_done.bus, GFP_KERNEL); |
3166 | if (mgp->rx_done.entry == NULL) | 3204 | if (mgp->ss.rx_done.entry == NULL) |
3167 | goto abort_with_ioremap; | 3205 | goto abort_with_ioremap; |
3168 | memset(mgp->rx_done.entry, 0, bytes); | 3206 | memset(mgp->ss.rx_done.entry, 0, bytes); |
3169 | 3207 | ||
3170 | myri10ge_select_firmware(mgp); | 3208 | myri10ge_select_firmware(mgp); |
3171 | 3209 | ||
@@ -3225,7 +3263,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3225 | } | 3263 | } |
3226 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 3264 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", |
3227 | (mgp->msi_enabled ? "MSI" : "xPIC"), | 3265 | (mgp->msi_enabled ? "MSI" : "xPIC"), |
3228 | netdev->irq, mgp->tx.boundary, mgp->fw_name, | 3266 | netdev->irq, mgp->tx_boundary, mgp->fw_name, |
3229 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 3267 | (mgp->wc_enabled ? "Enabled" : "Disabled")); |
3230 | 3268 | ||
3231 | return 0; | 3269 | return 0; |
@@ -3237,9 +3275,9 @@ abort_with_firmware: | |||
3237 | myri10ge_dummy_rdma(mgp, 0); | 3275 | myri10ge_dummy_rdma(mgp, 0); |
3238 | 3276 | ||
3239 | abort_with_rx_done: | 3277 | abort_with_rx_done: |
3240 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3278 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
3241 | dma_free_coherent(&pdev->dev, bytes, | 3279 | dma_free_coherent(&pdev->dev, bytes, |
3242 | mgp->rx_done.entry, mgp->rx_done.bus); | 3280 | mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); |
3243 | 3281 | ||
3244 | abort_with_ioremap: | 3282 | abort_with_ioremap: |
3245 | iounmap(mgp->sram); | 3283 | iounmap(mgp->sram); |
@@ -3249,8 +3287,8 @@ abort_with_wc: | |||
3249 | if (mgp->mtrr >= 0) | 3287 | if (mgp->mtrr >= 0) |
3250 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | 3288 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); |
3251 | #endif | 3289 | #endif |
3252 | dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3290 | dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
3253 | mgp->fw_stats, mgp->fw_stats_bus); | 3291 | mgp->ss.fw_stats, mgp->ss.fw_stats_bus); |
3254 | 3292 | ||
3255 | abort_with_cmd: | 3293 | abort_with_cmd: |
3256 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 3294 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
@@ -3288,9 +3326,9 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
3288 | /* avoid a memory leak */ | 3326 | /* avoid a memory leak */ |
3289 | pci_restore_state(pdev); | 3327 | pci_restore_state(pdev); |
3290 | 3328 | ||
3291 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3329 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
3292 | dma_free_coherent(&pdev->dev, bytes, | 3330 | dma_free_coherent(&pdev->dev, bytes, |
3293 | mgp->rx_done.entry, mgp->rx_done.bus); | 3331 | mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); |
3294 | 3332 | ||
3295 | iounmap(mgp->sram); | 3333 | iounmap(mgp->sram); |
3296 | 3334 | ||
@@ -3298,8 +3336,8 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
3298 | if (mgp->mtrr >= 0) | 3336 | if (mgp->mtrr >= 0) |
3299 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | 3337 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); |
3300 | #endif | 3338 | #endif |
3301 | dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3339 | dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
3302 | mgp->fw_stats, mgp->fw_stats_bus); | 3340 | mgp->ss.fw_stats, mgp->ss.fw_stats_bus); |
3303 | 3341 | ||
3304 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 3342 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
3305 | mgp->cmd, mgp->cmd_bus); | 3343 | mgp->cmd, mgp->cmd_bus); |