diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 549 | ||||
-rw-r--r-- | drivers/net/mv643xx_eth.h | 19 |
2 files changed, 158 insertions, 410 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 098f3a28837c..260be8048d3c 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | 11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
12 | * | 12 | * |
13 | * Copyright (C) 2004-2005 MontaVista Software, Inc. | 13 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
14 | * Dale Farnsworth <dale@farnsworth.org> | 14 | * Dale Farnsworth <dale@farnsworth.org> |
15 | * | 15 | * |
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | 16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
@@ -554,7 +554,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | |||
554 | /* UDP change : We may need this */ | 554 | /* UDP change : We may need this */ |
555 | if ((eth_int_cause_ext & 0x0000ffff) && | 555 | if ((eth_int_cause_ext & 0x0000ffff) && |
556 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && | 556 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && |
557 | (mp->tx_ring_size > mp->tx_desc_count + MAX_DESCS_PER_SKB)) | 557 | (mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB)) |
558 | netif_wake_queue(dev); | 558 | netif_wake_queue(dev); |
559 | #ifdef MV643XX_NAPI | 559 | #ifdef MV643XX_NAPI |
560 | } else { | 560 | } else { |
@@ -598,7 +598,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | |||
598 | mv643xx_eth_update_pscr(dev, &cmd); | 598 | mv643xx_eth_update_pscr(dev, &cmd); |
599 | if (!netif_carrier_ok(dev)) { | 599 | if (!netif_carrier_ok(dev)) { |
600 | netif_carrier_on(dev); | 600 | netif_carrier_on(dev); |
601 | if (mp->tx_ring_size > mp->tx_desc_count + | 601 | if (mp->tx_ring_size - mp->tx_desc_count > |
602 | MAX_DESCS_PER_SKB) { | 602 | MAX_DESCS_PER_SKB) { |
603 | netif_wake_queue(dev); | 603 | netif_wake_queue(dev); |
604 | /* Start TX queue */ | 604 | /* Start TX queue */ |
@@ -777,9 +777,6 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | |||
777 | 777 | ||
778 | mp->tx_curr_desc_q = 0; | 778 | mp->tx_curr_desc_q = 0; |
779 | mp->tx_used_desc_q = 0; | 779 | mp->tx_used_desc_q = 0; |
780 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
781 | mp->tx_first_desc_q = 0; | ||
782 | #endif | ||
783 | 780 | ||
784 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | 781 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); |
785 | 782 | ||
@@ -1085,8 +1082,7 @@ static void mv643xx_tx(struct net_device *dev) | |||
1085 | } | 1082 | } |
1086 | 1083 | ||
1087 | if (netif_queue_stopped(dev) && | 1084 | if (netif_queue_stopped(dev) && |
1088 | mp->tx_ring_size > | 1085 | mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB) |
1089 | mp->tx_desc_count + MAX_DESCS_PER_SKB) | ||
1090 | netif_wake_queue(dev); | 1086 | netif_wake_queue(dev); |
1091 | } | 1087 | } |
1092 | 1088 | ||
@@ -1133,7 +1129,10 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1133 | } | 1129 | } |
1134 | #endif | 1130 | #endif |
1135 | 1131 | ||
1136 | /* Hardware can't handle unaligned fragments smaller than 9 bytes. | 1132 | /** |
1133 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | ||
1134 | * | ||
1135 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | ||
1137 | * This helper function detects that case. | 1136 | * This helper function detects that case. |
1138 | */ | 1137 | */ |
1139 | 1138 | ||
@@ -1150,223 +1149,170 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
1150 | return 0; | 1149 | return 0; |
1151 | } | 1150 | } |
1152 | 1151 | ||
1152 | /** | ||
1153 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | ||
1154 | */ | ||
1155 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | ||
1156 | { | ||
1157 | int tx_desc_curr; | ||
1158 | |||
1159 | tx_desc_curr = mp->tx_curr_desc_q; | ||
1153 | 1160 | ||
1154 | /* | 1161 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); |
1155 | * mv643xx_eth_start_xmit | 1162 | mp->tx_desc_count++; |
1156 | * | 1163 | |
1157 | * This function is queues a packet in the Tx descriptor for | 1164 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; |
1158 | * required port. | 1165 | |
1159 | * | 1166 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); |
1160 | * Input : skb - a pointer to socket buffer | 1167 | |
1161 | * dev - a pointer to the required port | 1168 | return tx_desc_curr; |
1169 | } | ||
1170 | |||
1171 | /** | ||
1172 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | ||
1162 | * | 1173 | * |
1163 | * Output : zero upon success | 1174 | * Ensure the data for each fragment to be transmitted is mapped properly, |
1175 | * then fill in descriptors in the tx hw queue. | ||
1164 | */ | 1176 | */ |
1165 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1177 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, |
1178 | struct sk_buff *skb) | ||
1166 | { | 1179 | { |
1167 | struct mv643xx_private *mp = netdev_priv(dev); | 1180 | int frag; |
1181 | int tx_index; | ||
1182 | struct eth_tx_desc *desc; | ||
1168 | struct net_device_stats *stats = &mp->stats; | 1183 | struct net_device_stats *stats = &mp->stats; |
1169 | ETH_FUNC_RET_STATUS status; | ||
1170 | unsigned long flags; | ||
1171 | struct pkt_info pkt_info; | ||
1172 | 1184 | ||
1173 | if (netif_queue_stopped(dev)) { | 1185 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1174 | printk(KERN_ERR | 1186 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1175 | "%s: Tried sending packet when interface is stopped\n", | 1187 | |
1176 | dev->name); | 1188 | tx_index = eth_alloc_tx_desc_index(mp); |
1177 | return 1; | 1189 | desc = &mp->p_tx_desc_area[tx_index]; |
1190 | |||
1191 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | ||
1192 | /* Last Frag enables interrupt and frees the skb */ | ||
1193 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1194 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1195 | ETH_TX_LAST_DESC | | ||
1196 | ETH_TX_ENABLE_INTERRUPT; | ||
1197 | mp->tx_skb[tx_index] = skb; | ||
1198 | } else | ||
1199 | mp->tx_skb[tx_index] = 0; | ||
1200 | |||
1201 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1202 | desc->l4i_chk = 0; | ||
1203 | desc->byte_cnt = this_frag->size; | ||
1204 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1205 | this_frag->page_offset, | ||
1206 | this_frag->size, | ||
1207 | DMA_TO_DEVICE); | ||
1208 | stats->tx_bytes += this_frag->size; | ||
1178 | } | 1209 | } |
1210 | } | ||
1179 | 1211 | ||
1180 | /* This is a hard error, log it. */ | 1212 | /** |
1181 | if ((mp->tx_ring_size - mp->tx_desc_count) <= | 1213 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw |
1182 | (skb_shinfo(skb)->nr_frags + 1)) { | 1214 | * |
1183 | netif_stop_queue(dev); | 1215 | * Ensure the data for an skb to be transmitted is mapped properly, |
1184 | printk(KERN_ERR | 1216 | * then fill in descriptors in the tx hw queue and start the hardware. |
1185 | "%s: Bug in mv643xx_eth - Trying to transmit when" | 1217 | */ |
1186 | " queue full !\n", dev->name); | 1218 | static int eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, |
1187 | return 1; | 1219 | struct sk_buff *skb) |
1188 | } | 1220 | { |
1221 | int tx_index; | ||
1222 | struct eth_tx_desc *desc; | ||
1223 | u32 cmd_sts; | ||
1224 | int length; | ||
1225 | int tx_bytes = 0; | ||
1189 | 1226 | ||
1190 | /* Paranoid check - this shouldn't happen */ | 1227 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; |
1191 | if (skb == NULL) { | ||
1192 | stats->tx_dropped++; | ||
1193 | printk(KERN_ERR "mv64320_eth paranoid check failed\n"); | ||
1194 | return 1; | ||
1195 | } | ||
1196 | 1228 | ||
1197 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1229 | tx_index = eth_alloc_tx_desc_index(mp); |
1198 | if (has_tiny_unaligned_frags(skb)) { | 1230 | desc = &mp->p_tx_desc_area[tx_index]; |
1199 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { | 1231 | |
1200 | stats->tx_dropped++; | 1232 | if (skb_shinfo(skb)->nr_frags) { |
1201 | printk(KERN_DEBUG "%s: failed to linearize tiny " | 1233 | eth_tx_fill_frag_descs(mp, skb); |
1202 | "unaligned fragment\n", dev->name); | 1234 | |
1203 | return 1; | 1235 | length = skb_headlen(skb); |
1204 | } | 1236 | mp->tx_skb[tx_index] = 0; |
1237 | } else { | ||
1238 | cmd_sts |= ETH_ZERO_PADDING | | ||
1239 | ETH_TX_LAST_DESC | | ||
1240 | ETH_TX_ENABLE_INTERRUPT; | ||
1241 | length = skb->len; | ||
1242 | mp->tx_skb[tx_index] = skb; | ||
1205 | } | 1243 | } |
1206 | 1244 | ||
1207 | spin_lock_irqsave(&mp->lock, flags); | 1245 | desc->byte_cnt = length; |
1246 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); | ||
1247 | tx_bytes += length; | ||
1208 | 1248 | ||
1209 | if (!skb_shinfo(skb)->nr_frags) { | 1249 | if (skb->ip_summed == CHECKSUM_HW) { |
1210 | if (skb->ip_summed != CHECKSUM_HW) { | 1250 | BUG_ON(skb->protocol != ETH_P_IP); |
1211 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | 1251 | |
1212 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | 1252 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1213 | ETH_TX_FIRST_DESC | | 1253 | ETH_GEN_IP_V_4_CHECKSUM | |
1214 | ETH_TX_LAST_DESC | | 1254 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; |
1215 | 5 << ETH_TX_IHL_SHIFT; | 1255 | |
1216 | pkt_info.l4i_chk = 0; | 1256 | switch (skb->nh.iph->protocol) { |
1217 | } else { | 1257 | case IPPROTO_UDP: |
1218 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | 1258 | cmd_sts |= ETH_UDP_FRAME; |
1219 | ETH_TX_FIRST_DESC | | 1259 | desc->l4i_chk = skb->h.uh->check; |
1220 | ETH_TX_LAST_DESC | | 1260 | break; |
1221 | ETH_GEN_TCP_UDP_CHECKSUM | | 1261 | case IPPROTO_TCP: |
1222 | ETH_GEN_IP_V_4_CHECKSUM | | 1262 | desc->l4i_chk = skb->h.th->check; |
1223 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | 1263 | break; |
1224 | /* CPU already calculated pseudo header checksum. */ | 1264 | default: |
1225 | if ((skb->protocol == ETH_P_IP) && | 1265 | BUG(); |
1226 | (skb->nh.iph->protocol == IPPROTO_UDP) ) { | ||
1227 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1228 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1229 | } else if ((skb->protocol == ETH_P_IP) && | ||
1230 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1231 | pkt_info.l4i_chk = skb->h.th->check; | ||
1232 | else { | ||
1233 | printk(KERN_ERR | ||
1234 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1235 | dev->name); | ||
1236 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1237 | return 1; | ||
1238 | } | ||
1239 | } | 1266 | } |
1240 | pkt_info.byte_cnt = skb->len; | ||
1241 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1242 | DMA_TO_DEVICE); | ||
1243 | pkt_info.return_info = skb; | ||
1244 | status = eth_port_send(mp, &pkt_info); | ||
1245 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1246 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1247 | dev->name); | ||
1248 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1249 | } else { | 1267 | } else { |
1250 | unsigned int frag; | 1268 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ |
1251 | 1269 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; | |
1252 | /* first frag which is skb header */ | 1270 | desc->l4i_chk = 0; |
1253 | pkt_info.byte_cnt = skb_headlen(skb); | 1271 | } |
1254 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | ||
1255 | skb_headlen(skb), | ||
1256 | DMA_TO_DEVICE); | ||
1257 | pkt_info.l4i_chk = 0; | ||
1258 | pkt_info.return_info = 0; | ||
1259 | |||
1260 | if (skb->ip_summed != CHECKSUM_HW) | ||
1261 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1262 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1263 | 5 << ETH_TX_IHL_SHIFT; | ||
1264 | else { | ||
1265 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1266 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1267 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1268 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1269 | /* CPU already calculated pseudo header checksum. */ | ||
1270 | if ((skb->protocol == ETH_P_IP) && | ||
1271 | (skb->nh.iph->protocol == IPPROTO_UDP)) { | ||
1272 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1273 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1274 | } else if ((skb->protocol == ETH_P_IP) && | ||
1275 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1276 | pkt_info.l4i_chk = skb->h.th->check; | ||
1277 | else { | ||
1278 | printk(KERN_ERR | ||
1279 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1280 | dev->name); | ||
1281 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1282 | return 1; | ||
1283 | } | ||
1284 | } | ||
1285 | 1272 | ||
1286 | status = eth_port_send(mp, &pkt_info); | 1273 | /* ensure all other descriptors are written before first cmd_sts */ |
1287 | if (status != ETH_OK) { | 1274 | wmb(); |
1288 | if ((status == ETH_ERROR)) | 1275 | desc->cmd_sts = cmd_sts; |
1289 | printk(KERN_ERR | ||
1290 | "%s: Error on transmitting packet\n", | ||
1291 | dev->name); | ||
1292 | if (status == ETH_QUEUE_FULL) | ||
1293 | printk("Error on Queue Full \n"); | ||
1294 | if (status == ETH_QUEUE_LAST_RESOURCE) | ||
1295 | printk("Tx resource error \n"); | ||
1296 | } | ||
1297 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1298 | |||
1299 | /* Check for the remaining frags */ | ||
1300 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1301 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
1302 | pkt_info.l4i_chk = 0x0000; | ||
1303 | pkt_info.cmd_sts = 0x00000000; | ||
1304 | |||
1305 | /* Last Frag enables interrupt and frees the skb */ | ||
1306 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1307 | pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | | ||
1308 | ETH_TX_LAST_DESC; | ||
1309 | pkt_info.return_info = skb; | ||
1310 | } else { | ||
1311 | pkt_info.return_info = 0; | ||
1312 | } | ||
1313 | pkt_info.l4i_chk = 0; | ||
1314 | pkt_info.byte_cnt = this_frag->size; | ||
1315 | 1276 | ||
1316 | pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, | 1277 | /* ensure all descriptors are written before poking hardware */ |
1317 | this_frag->page_offset, | 1278 | wmb(); |
1318 | this_frag->size, | 1279 | mv643xx_eth_port_enable_tx(mp->port_num, mp->port_tx_queue_command); |
1319 | DMA_TO_DEVICE); | ||
1320 | 1280 | ||
1321 | status = eth_port_send(mp, &pkt_info); | 1281 | return tx_bytes; |
1282 | } | ||
1322 | 1283 | ||
1323 | if (status != ETH_OK) { | 1284 | /** |
1324 | if ((status == ETH_ERROR)) | 1285 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission |
1325 | printk(KERN_ERR "%s: Error on " | 1286 | * |
1326 | "transmitting packet\n", | 1287 | */ |
1327 | dev->name); | 1288 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1289 | { | ||
1290 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1291 | struct net_device_stats *stats = &mp->stats; | ||
1292 | unsigned long flags; | ||
1328 | 1293 | ||
1329 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1294 | BUG_ON(netif_queue_stopped(dev)); |
1330 | printk("Tx resource error \n"); | 1295 | BUG_ON(skb == NULL); |
1296 | BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB); | ||
1331 | 1297 | ||
1332 | if (status == ETH_QUEUE_FULL) | 1298 | if (has_tiny_unaligned_frags(skb)) { |
1333 | printk("Queue is full \n"); | 1299 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { |
1334 | } | 1300 | stats->tx_dropped++; |
1335 | stats->tx_bytes += pkt_info.byte_cnt; | 1301 | printk(KERN_DEBUG "%s: failed to linearize tiny " |
1302 | "unaligned fragment\n", dev->name); | ||
1303 | return 1; | ||
1336 | } | 1304 | } |
1337 | } | 1305 | } |
1338 | #else | ||
1339 | spin_lock_irqsave(&mp->lock, flags); | ||
1340 | |||
1341 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | | ||
1342 | ETH_TX_LAST_DESC; | ||
1343 | pkt_info.l4i_chk = 0; | ||
1344 | pkt_info.byte_cnt = skb->len; | ||
1345 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1346 | DMA_TO_DEVICE); | ||
1347 | pkt_info.return_info = skb; | ||
1348 | status = eth_port_send(mp, &pkt_info); | ||
1349 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1350 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1351 | dev->name); | ||
1352 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1353 | #endif | ||
1354 | 1306 | ||
1355 | /* Check if TX queue can handle another skb. If not, then | 1307 | spin_lock_irqsave(&mp->lock, flags); |
1356 | * signal higher layers to stop requesting TX | ||
1357 | */ | ||
1358 | if (mp->tx_ring_size <= (mp->tx_desc_count + MAX_DESCS_PER_SKB)) | ||
1359 | /* | ||
1360 | * Stop getting skb's from upper layers. | ||
1361 | * Getting skb's from upper layers will be enabled again after | ||
1362 | * packets are released. | ||
1363 | */ | ||
1364 | netif_stop_queue(dev); | ||
1365 | 1308 | ||
1366 | /* Update statistics and start of transmittion time */ | 1309 | stats->tx_bytes = eth_tx_submit_descs_for_skb(mp, skb); |
1367 | stats->tx_packets++; | 1310 | stats->tx_packets++; |
1368 | dev->trans_start = jiffies; | 1311 | dev->trans_start = jiffies; |
1369 | 1312 | ||
1313 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | ||
1314 | netif_stop_queue(dev); | ||
1315 | |||
1370 | spin_unlock_irqrestore(&mp->lock, flags); | 1316 | spin_unlock_irqrestore(&mp->lock, flags); |
1371 | 1317 | ||
1372 | return 0; /* success */ | 1318 | return 0; /* success */ |
@@ -1812,22 +1758,6 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1812 | * to the Rx descriptor ring to enable the reuse of this source. | 1758 | * to the Rx descriptor ring to enable the reuse of this source. |
1813 | * Return Rx resource is done using the eth_rx_return_buff API. | 1759 | * Return Rx resource is done using the eth_rx_return_buff API. |
1814 | * | 1760 | * |
1815 | * Transmit operation: | ||
1816 | * The eth_port_send API supports Scatter-Gather which enables to | ||
1817 | * send a packet spanned over multiple buffers. This means that | ||
1818 | * for each packet info structure given by the user and put into | ||
1819 | * the Tx descriptors ring, will be transmitted only if the 'LAST' | ||
1820 | * bit will be set in the packet info command status field. This | ||
1821 | * API also consider restriction regarding buffer alignments and | ||
1822 | * sizes. | ||
1823 | * The user must return a Tx resource after ensuring the buffer | ||
1824 | * has been transmitted to enable the Tx ring indexes to update. | ||
1825 | * | ||
1826 | * BOARD LAYOUT | ||
1827 | * This device is on-board. No jumper diagram is necessary. | ||
1828 | * | ||
1829 | * EXTERNAL INTERFACE | ||
1830 | * | ||
1831 | * Prior to calling the initialization routine eth_port_init() the user | 1761 | * Prior to calling the initialization routine eth_port_init() the user |
1832 | * must set the following fields under mv643xx_private struct: | 1762 | * must set the following fields under mv643xx_private struct: |
1833 | * port_num User Ethernet port number. | 1763 | * port_num User Ethernet port number. |
@@ -1881,7 +1811,6 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry); | |||
1881 | static void eth_port_init(struct mv643xx_private *mp) | 1811 | static void eth_port_init(struct mv643xx_private *mp) |
1882 | { | 1812 | { |
1883 | mp->rx_resource_err = 0; | 1813 | mp->rx_resource_err = 0; |
1884 | mp->tx_resource_err = 0; | ||
1885 | 1814 | ||
1886 | eth_port_reset(mp->port_num); | 1815 | eth_port_reset(mp->port_num); |
1887 | 1816 | ||
@@ -2673,166 +2602,6 @@ static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, | |||
2673 | } | 2602 | } |
2674 | 2603 | ||
2675 | /* | 2604 | /* |
2676 | * eth_port_send - Send an Ethernet packet | ||
2677 | * | ||
2678 | * DESCRIPTION: | ||
2679 | * This routine send a given packet described by p_pktinfo parameter. It | ||
2680 | * supports transmitting of a packet spaned over multiple buffers. The | ||
2681 | * routine updates 'curr' and 'first' indexes according to the packet | ||
2682 | * segment passed to the routine. In case the packet segment is first, | ||
2683 | * the 'first' index is update. In any case, the 'curr' index is updated. | ||
2684 | * If the routine get into Tx resource error it assigns 'curr' index as | ||
2685 | * 'first'. This way the function can abort Tx process of multiple | ||
2686 | * descriptors per packet. | ||
2687 | * | ||
2688 | * INPUT: | ||
2689 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2690 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2691 | * | ||
2692 | * OUTPUT: | ||
2693 | * Tx ring 'curr' and 'first' indexes are updated. | ||
2694 | * | ||
2695 | * RETURN: | ||
2696 | * ETH_QUEUE_FULL in case of Tx resource error. | ||
2697 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2698 | * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. | ||
2699 | * ETH_OK otherwise. | ||
2700 | * | ||
2701 | */ | ||
2702 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2703 | /* | ||
2704 | * Modified to include the first descriptor pointer in case of SG | ||
2705 | */ | ||
2706 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2707 | struct pkt_info *p_pkt_info) | ||
2708 | { | ||
2709 | int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; | ||
2710 | struct eth_tx_desc *current_descriptor; | ||
2711 | struct eth_tx_desc *first_descriptor; | ||
2712 | u32 command; | ||
2713 | |||
2714 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2715 | if (mp->tx_resource_err) | ||
2716 | return ETH_QUEUE_FULL; | ||
2717 | |||
2718 | /* | ||
2719 | * The hardware requires that each buffer that is <= 8 bytes | ||
2720 | * in length must be aligned on an 8 byte boundary. | ||
2721 | */ | ||
2722 | if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) { | ||
2723 | printk(KERN_ERR | ||
2724 | "mv643xx_eth port %d: packet size <= 8 problem\n", | ||
2725 | mp->port_num); | ||
2726 | return ETH_ERROR; | ||
2727 | } | ||
2728 | |||
2729 | mp->tx_desc_count++; | ||
2730 | BUG_ON(mp->tx_desc_count > mp->tx_ring_size); | ||
2731 | |||
2732 | /* Get the Tx Desc ring indexes */ | ||
2733 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2734 | tx_desc_used = mp->tx_used_desc_q; | ||
2735 | |||
2736 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2737 | |||
2738 | tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2739 | |||
2740 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2741 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2742 | current_descriptor->l4i_chk = p_pkt_info->l4i_chk; | ||
2743 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2744 | |||
2745 | command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC | | ||
2746 | ETH_BUFFER_OWNED_BY_DMA; | ||
2747 | if (command & ETH_TX_FIRST_DESC) { | ||
2748 | tx_first_desc = tx_desc_curr; | ||
2749 | mp->tx_first_desc_q = tx_first_desc; | ||
2750 | first_descriptor = current_descriptor; | ||
2751 | mp->tx_first_command = command; | ||
2752 | } else { | ||
2753 | tx_first_desc = mp->tx_first_desc_q; | ||
2754 | first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; | ||
2755 | BUG_ON(first_descriptor == NULL); | ||
2756 | current_descriptor->cmd_sts = command; | ||
2757 | } | ||
2758 | |||
2759 | if (command & ETH_TX_LAST_DESC) { | ||
2760 | wmb(); | ||
2761 | first_descriptor->cmd_sts = mp->tx_first_command; | ||
2762 | |||
2763 | wmb(); | ||
2764 | mv643xx_eth_port_enable_tx(mp->port_num, mp->port_tx_queue_command); | ||
2765 | |||
2766 | /* | ||
2767 | * Finish Tx packet. Update first desc in case of Tx resource | ||
2768 | * error */ | ||
2769 | tx_first_desc = tx_next_desc; | ||
2770 | mp->tx_first_desc_q = tx_first_desc; | ||
2771 | } | ||
2772 | |||
2773 | /* Check for ring index overlap in the Tx desc ring */ | ||
2774 | if (tx_next_desc == tx_desc_used) { | ||
2775 | mp->tx_resource_err = 1; | ||
2776 | mp->tx_curr_desc_q = tx_first_desc; | ||
2777 | |||
2778 | return ETH_QUEUE_LAST_RESOURCE; | ||
2779 | } | ||
2780 | |||
2781 | mp->tx_curr_desc_q = tx_next_desc; | ||
2782 | |||
2783 | return ETH_OK; | ||
2784 | } | ||
2785 | #else | ||
2786 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2787 | struct pkt_info *p_pkt_info) | ||
2788 | { | ||
2789 | int tx_desc_curr; | ||
2790 | int tx_desc_used; | ||
2791 | struct eth_tx_desc *current_descriptor; | ||
2792 | unsigned int command_status; | ||
2793 | |||
2794 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2795 | if (mp->tx_resource_err) | ||
2796 | return ETH_QUEUE_FULL; | ||
2797 | |||
2798 | mp->tx_desc_count++; | ||
2799 | BUG_ON(mp->tx_desc_count > mp->tx_ring_size); | ||
2800 | |||
2801 | /* Get the Tx Desc ring indexes */ | ||
2802 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2803 | tx_desc_used = mp->tx_used_desc_q; | ||
2804 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2805 | |||
2806 | command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; | ||
2807 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2808 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2809 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2810 | |||
2811 | /* Set last desc with DMA ownership and interrupt enable. */ | ||
2812 | wmb(); | ||
2813 | current_descriptor->cmd_sts = command_status | | ||
2814 | ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; | ||
2815 | |||
2816 | wmb(); | ||
2817 | mv643xx_eth_port_enable_tx(mp->port_num, mp->port_tx_queue_command); | ||
2818 | |||
2819 | /* Finish Tx packet. Update first desc in case of Tx resource error */ | ||
2820 | tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2821 | |||
2822 | /* Update the current descriptor */ | ||
2823 | mp->tx_curr_desc_q = tx_desc_curr; | ||
2824 | |||
2825 | /* Check for ring index overlap in the Tx desc ring */ | ||
2826 | if (tx_desc_curr == tx_desc_used) { | ||
2827 | mp->tx_resource_err = 1; | ||
2828 | return ETH_QUEUE_LAST_RESOURCE; | ||
2829 | } | ||
2830 | |||
2831 | return ETH_OK; | ||
2832 | } | ||
2833 | #endif | ||
2834 | |||
2835 | /* | ||
2836 | * eth_tx_return_desc - Free all used Tx descriptors | 2605 | * eth_tx_return_desc - Free all used Tx descriptors |
2837 | * | 2606 | * |
2838 | * DESCRIPTION: | 2607 | * DESCRIPTION: |
@@ -2858,7 +2627,6 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | |||
2858 | struct pkt_info *p_pkt_info) | 2627 | struct pkt_info *p_pkt_info) |
2859 | { | 2628 | { |
2860 | int tx_desc_used; | 2629 | int tx_desc_used; |
2861 | int tx_busy_desc; | ||
2862 | struct eth_tx_desc *p_tx_desc_used; | 2630 | struct eth_tx_desc *p_tx_desc_used; |
2863 | unsigned int command_status; | 2631 | unsigned int command_status; |
2864 | unsigned long flags; | 2632 | unsigned long flags; |
@@ -2866,33 +2634,23 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | |||
2866 | 2634 | ||
2867 | spin_lock_irqsave(&mp->lock, flags); | 2635 | spin_lock_irqsave(&mp->lock, flags); |
2868 | 2636 | ||
2869 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 2637 | BUG_ON(mp->tx_desc_count < 0); |
2870 | tx_busy_desc = mp->tx_first_desc_q; | 2638 | if (mp->tx_desc_count == 0) { |
2871 | #else | 2639 | /* no more tx descs in use */ |
2872 | tx_busy_desc = mp->tx_curr_desc_q; | 2640 | err = ETH_ERROR; |
2873 | #endif | 2641 | goto out; |
2642 | } | ||
2874 | 2643 | ||
2875 | /* Get the Tx Desc ring indexes */ | 2644 | /* Get the Tx Desc ring indexes */ |
2876 | tx_desc_used = mp->tx_used_desc_q; | 2645 | tx_desc_used = mp->tx_used_desc_q; |
2877 | 2646 | ||
2878 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; | 2647 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; |
2879 | 2648 | ||
2880 | /* Sanity check */ | 2649 | BUG_ON(p_tx_desc_used == NULL); |
2881 | if (p_tx_desc_used == NULL) { | ||
2882 | err = ETH_ERROR; | ||
2883 | goto out; | ||
2884 | } | ||
2885 | |||
2886 | /* Stop release. About to overlap the current available Tx descriptor */ | ||
2887 | if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) { | ||
2888 | err = ETH_ERROR; | ||
2889 | goto out; | ||
2890 | } | ||
2891 | 2650 | ||
2892 | command_status = p_tx_desc_used->cmd_sts; | 2651 | command_status = p_tx_desc_used->cmd_sts; |
2893 | |||
2894 | /* Still transmitting... */ | ||
2895 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | 2652 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { |
2653 | /* Still transmitting... */ | ||
2896 | err = ETH_ERROR; | 2654 | err = ETH_ERROR; |
2897 | goto out; | 2655 | goto out; |
2898 | } | 2656 | } |
@@ -2907,9 +2665,6 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | |||
2907 | /* Update the next descriptor to release. */ | 2665 | /* Update the next descriptor to release. */ |
2908 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; | 2666 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; |
2909 | 2667 | ||
2910 | /* Any Tx return cancels the Tx resource error status */ | ||
2911 | mp->tx_resource_err = 0; | ||
2912 | |||
2913 | BUG_ON(mp->tx_desc_count == 0); | 2668 | BUG_ON(mp->tx_desc_count == 0); |
2914 | mp->tx_desc_count--; | 2669 | mp->tx_desc_count--; |
2915 | 2670 | ||
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index a553054e8da7..8768e1ba45df 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -330,7 +330,6 @@ struct mv643xx_private { | |||
330 | u32 tx_sram_size; /* Size of tx sram area */ | 330 | u32 tx_sram_size; /* Size of tx sram area */ |
331 | 331 | ||
332 | int rx_resource_err; /* Rx ring resource error flag */ | 332 | int rx_resource_err; /* Rx ring resource error flag */ |
333 | int tx_resource_err; /* Tx ring resource error flag */ | ||
334 | 333 | ||
335 | /* Tx/Rx rings managment indexes fields. For driver use */ | 334 | /* Tx/Rx rings managment indexes fields. For driver use */ |
336 | 335 | ||
@@ -339,10 +338,6 @@ struct mv643xx_private { | |||
339 | 338 | ||
340 | /* Next available and first returning Tx resource */ | 339 | /* Next available and first returning Tx resource */ |
341 | int tx_curr_desc_q, tx_used_desc_q; | 340 | int tx_curr_desc_q, tx_used_desc_q; |
342 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
343 | int tx_first_desc_q; | ||
344 | u32 tx_first_command; | ||
345 | #endif | ||
346 | 341 | ||
347 | #ifdef MV643XX_TX_FAST_REFILL | 342 | #ifdef MV643XX_TX_FAST_REFILL |
348 | u32 tx_clean_threshold; | 343 | u32 tx_clean_threshold; |
@@ -350,12 +345,12 @@ struct mv643xx_private { | |||
350 | 345 | ||
351 | struct eth_rx_desc *p_rx_desc_area; | 346 | struct eth_rx_desc *p_rx_desc_area; |
352 | dma_addr_t rx_desc_dma; | 347 | dma_addr_t rx_desc_dma; |
353 | unsigned int rx_desc_area_size; | 348 | int rx_desc_area_size; |
354 | struct sk_buff **rx_skb; | 349 | struct sk_buff **rx_skb; |
355 | 350 | ||
356 | struct eth_tx_desc *p_tx_desc_area; | 351 | struct eth_tx_desc *p_tx_desc_area; |
357 | dma_addr_t tx_desc_dma; | 352 | dma_addr_t tx_desc_dma; |
358 | unsigned int tx_desc_area_size; | 353 | int tx_desc_area_size; |
359 | struct sk_buff **tx_skb; | 354 | struct sk_buff **tx_skb; |
360 | 355 | ||
361 | struct work_struct tx_timeout_task; | 356 | struct work_struct tx_timeout_task; |
@@ -367,13 +362,13 @@ struct mv643xx_private { | |||
367 | struct mv643xx_mib_counters mib_counters; | 362 | struct mv643xx_mib_counters mib_counters; |
368 | spinlock_t lock; | 363 | spinlock_t lock; |
369 | /* Size of Tx Ring per queue */ | 364 | /* Size of Tx Ring per queue */ |
370 | unsigned int tx_ring_size; | 365 | int tx_ring_size; |
371 | /* Number of tx descriptors in use */ | 366 | /* Number of tx descriptors in use */ |
372 | unsigned int tx_desc_count; | 367 | int tx_desc_count; |
373 | /* Size of Rx Ring per queue */ | 368 | /* Size of Rx Ring per queue */ |
374 | unsigned int rx_ring_size; | 369 | int rx_ring_size; |
375 | /* Number of rx descriptors in use */ | 370 | /* Number of rx descriptors in use */ |
376 | unsigned int rx_desc_count; | 371 | int rx_desc_count; |
377 | 372 | ||
378 | /* | 373 | /* |
379 | * rx_task used to fill RX ring out of bottom half context | 374 | * rx_task used to fill RX ring out of bottom half context |
@@ -416,8 +411,6 @@ static void eth_port_read_smi_reg(unsigned int eth_port_num, | |||
416 | static void eth_clear_mib_counters(unsigned int eth_port_num); | 411 | static void eth_clear_mib_counters(unsigned int eth_port_num); |
417 | 412 | ||
418 | /* Port data flow control routines */ | 413 | /* Port data flow control routines */ |
419 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
420 | struct pkt_info *p_pkt_info); | ||
421 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | 414 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, |
422 | struct pkt_info *p_pkt_info); | 415 | struct pkt_info *p_pkt_info); |
423 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 416 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |