diff options
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 191 |
1 files changed, 130 insertions, 61 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 7b5124057664..01ab31b34a85 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -262,17 +262,30 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
262 | rds_ring->max_rx_desc_count = | 262 | rds_ring->max_rx_desc_count = |
263 | adapter->max_rx_desc_count; | 263 | adapter->max_rx_desc_count; |
264 | rds_ring->flags = RCV_DESC_NORMAL; | 264 | rds_ring->flags = RCV_DESC_NORMAL; |
265 | rds_ring->dma_size = RX_DMA_MAP_LEN; | 265 | if (adapter->ahw.cut_through) { |
266 | rds_ring->skb_size = MAX_RX_BUFFER_LENGTH; | 266 | rds_ring->dma_size = |
267 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
268 | rds_ring->skb_size = | ||
269 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
270 | } else { | ||
271 | rds_ring->dma_size = RX_DMA_MAP_LEN; | ||
272 | rds_ring->skb_size = | ||
273 | MAX_RX_BUFFER_LENGTH; | ||
274 | } | ||
267 | break; | 275 | break; |
268 | 276 | ||
269 | case RCV_DESC_JUMBO: | 277 | case RCV_DESC_JUMBO: |
270 | rds_ring->max_rx_desc_count = | 278 | rds_ring->max_rx_desc_count = |
271 | adapter->max_jumbo_rx_desc_count; | 279 | adapter->max_jumbo_rx_desc_count; |
272 | rds_ring->flags = RCV_DESC_JUMBO; | 280 | rds_ring->flags = RCV_DESC_JUMBO; |
273 | rds_ring->dma_size = RX_JUMBO_DMA_MAP_LEN; | 281 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) |
282 | rds_ring->dma_size = | ||
283 | NX_P3_RX_JUMBO_BUF_MAX_LEN; | ||
284 | else | ||
285 | rds_ring->dma_size = | ||
286 | NX_P2_RX_JUMBO_BUF_MAX_LEN; | ||
274 | rds_ring->skb_size = | 287 | rds_ring->skb_size = |
275 | MAX_RX_JUMBO_BUFFER_LENGTH; | 288 | rds_ring->dma_size + NET_IP_ALIGN; |
276 | break; | 289 | break; |
277 | 290 | ||
278 | case RCV_RING_LRO: | 291 | case RCV_RING_LRO: |
@@ -294,6 +307,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
294 | goto err_out; | 307 | goto err_out; |
295 | } | 308 | } |
296 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); |
310 | INIT_LIST_HEAD(&rds_ring->free_list); | ||
297 | rds_ring->begin_alloc = 0; | 311 | rds_ring->begin_alloc = 0; |
298 | /* | 312 | /* |
299 | * Now go through all of them, set reference handles | 313 | * Now go through all of them, set reference handles |
@@ -302,6 +316,8 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
302 | num_rx_bufs = rds_ring->max_rx_desc_count; | 316 | num_rx_bufs = rds_ring->max_rx_desc_count; |
303 | rx_buf = rds_ring->rx_buf_arr; | 317 | rx_buf = rds_ring->rx_buf_arr; |
304 | for (i = 0; i < num_rx_bufs; i++) { | 318 | for (i = 0; i < num_rx_bufs; i++) { |
319 | list_add_tail(&rx_buf->list, | ||
320 | &rds_ring->free_list); | ||
305 | rx_buf->ref_handle = i; | 321 | rx_buf->ref_handle = i; |
306 | rx_buf->state = NETXEN_BUFFER_FREE; | 322 | rx_buf->state = NETXEN_BUFFER_FREE; |
307 | rx_buf++; | 323 | rx_buf++; |
@@ -1137,15 +1153,47 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter) | |||
1137 | return 0; | 1153 | return 0; |
1138 | } | 1154 | } |
1139 | 1155 | ||
1156 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, | ||
1157 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) | ||
1158 | { | ||
1159 | struct netxen_rx_buffer *buffer; | ||
1160 | struct sk_buff *skb; | ||
1161 | |||
1162 | buffer = &rds_ring->rx_buf_arr[index]; | ||
1163 | |||
1164 | pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, | ||
1165 | PCI_DMA_FROMDEVICE); | ||
1166 | |||
1167 | skb = buffer->skb; | ||
1168 | if (!skb) | ||
1169 | goto no_skb; | ||
1170 | |||
1171 | if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { | ||
1172 | adapter->stats.csummed++; | ||
1173 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1174 | } else | ||
1175 | skb->ip_summed = CHECKSUM_NONE; | ||
1176 | |||
1177 | skb->dev = adapter->netdev; | ||
1178 | |||
1179 | buffer->skb = NULL; | ||
1180 | |||
1181 | no_skb: | ||
1182 | buffer->state = NETXEN_BUFFER_FREE; | ||
1183 | buffer->lro_current_frags = 0; | ||
1184 | buffer->lro_expected_frags = 0; | ||
1185 | list_add_tail(&buffer->list, &rds_ring->free_list); | ||
1186 | return skb; | ||
1187 | } | ||
1188 | |||
1140 | /* | 1189 | /* |
1141 | * netxen_process_rcv() send the received packet to the protocol stack. | 1190 | * netxen_process_rcv() send the received packet to the protocol stack. |
1142 | * and if the number of receives exceeds RX_BUFFERS_REFILL, then we | 1191 | * and if the number of receives exceeds RX_BUFFERS_REFILL, then we |
1143 | * invoke the routine to send more rx buffers to the Phantom... | 1192 | * invoke the routine to send more rx buffers to the Phantom... |
1144 | */ | 1193 | */ |
1145 | static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | 1194 | static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, |
1146 | struct status_desc *desc) | 1195 | struct status_desc *desc, struct status_desc *frag_desc) |
1147 | { | 1196 | { |
1148 | struct pci_dev *pdev = adapter->pdev; | ||
1149 | struct net_device *netdev = adapter->netdev; | 1197 | struct net_device *netdev = adapter->netdev; |
1150 | u64 sts_data = le64_to_cpu(desc->status_desc_data); | 1198 | u64 sts_data = le64_to_cpu(desc->status_desc_data); |
1151 | int index = netxen_get_sts_refhandle(sts_data); | 1199 | int index = netxen_get_sts_refhandle(sts_data); |
@@ -1154,8 +1202,8 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1154 | struct sk_buff *skb; | 1202 | struct sk_buff *skb; |
1155 | u32 length = netxen_get_sts_totallength(sts_data); | 1203 | u32 length = netxen_get_sts_totallength(sts_data); |
1156 | u32 desc_ctx; | 1204 | u32 desc_ctx; |
1205 | u16 pkt_offset = 0, cksum; | ||
1157 | struct nx_host_rds_ring *rds_ring; | 1206 | struct nx_host_rds_ring *rds_ring; |
1158 | int ret; | ||
1159 | 1207 | ||
1160 | desc_ctx = netxen_get_sts_type(sts_data); | 1208 | desc_ctx = netxen_get_sts_type(sts_data); |
1161 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { | 1209 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { |
@@ -1191,41 +1239,52 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1191 | } | 1239 | } |
1192 | } | 1240 | } |
1193 | 1241 | ||
1194 | pci_unmap_single(pdev, buffer->dma, rds_ring->dma_size, | 1242 | cksum = netxen_get_sts_status(sts_data); |
1195 | PCI_DMA_FROMDEVICE); | ||
1196 | |||
1197 | skb = (struct sk_buff *)buffer->skb; | ||
1198 | 1243 | ||
1199 | if (likely(adapter->rx_csum && | 1244 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); |
1200 | netxen_get_sts_status(sts_data) == STATUS_CKSUM_OK)) { | 1245 | if (!skb) |
1201 | adapter->stats.csummed++; | 1246 | return; |
1202 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1203 | } else | ||
1204 | skb->ip_summed = CHECKSUM_NONE; | ||
1205 | 1247 | ||
1206 | skb->dev = netdev; | ||
1207 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1248 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1208 | /* True length was only available on the last pkt */ | 1249 | /* True length was only available on the last pkt */ |
1209 | skb_put(skb, buffer->lro_length); | 1250 | skb_put(skb, buffer->lro_length); |
1210 | } else { | 1251 | } else { |
1211 | skb_put(skb, length); | 1252 | if (length > rds_ring->skb_size) |
1253 | skb_put(skb, rds_ring->skb_size); | ||
1254 | else | ||
1255 | skb_put(skb, length); | ||
1256 | |||
1257 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); | ||
1258 | if (pkt_offset) | ||
1259 | skb_pull(skb, pkt_offset); | ||
1212 | } | 1260 | } |
1213 | 1261 | ||
1214 | skb->protocol = eth_type_trans(skb, netdev); | 1262 | skb->protocol = eth_type_trans(skb, netdev); |
1215 | 1263 | ||
1216 | ret = netif_receive_skb(skb); | ||
1217 | netdev->last_rx = jiffies; | ||
1218 | |||
1219 | /* | 1264 | /* |
1220 | * We just consumed one buffer so post a buffer. | 1265 | * rx buffer chaining is disabled, walk and free |
1266 | * any spurious rx buffer chain. | ||
1221 | */ | 1267 | */ |
1222 | buffer->skb = NULL; | 1268 | if (frag_desc) { |
1223 | buffer->state = NETXEN_BUFFER_FREE; | 1269 | u16 i, nr_frags = desc->nr_frags; |
1224 | buffer->lro_current_frags = 0; | 1270 | |
1225 | buffer->lro_expected_frags = 0; | 1271 | dev_kfree_skb_any(skb); |
1272 | for (i = 0; i < nr_frags; i++) { | ||
1273 | index = frag_desc->frag_handles[i]; | ||
1274 | skb = netxen_process_rxbuf(adapter, | ||
1275 | rds_ring, index, cksum); | ||
1276 | if (skb) | ||
1277 | dev_kfree_skb_any(skb); | ||
1278 | } | ||
1279 | adapter->stats.rxdropped++; | ||
1280 | } else { | ||
1226 | 1281 | ||
1227 | adapter->stats.no_rcv++; | 1282 | netif_receive_skb(skb); |
1228 | adapter->stats.rxbytes += length; | 1283 | netdev->last_rx = jiffies; |
1284 | |||
1285 | adapter->stats.no_rcv++; | ||
1286 | adapter->stats.rxbytes += length; | ||
1287 | } | ||
1229 | } | 1288 | } |
1230 | 1289 | ||
1231 | /* Process Receive status ring */ | 1290 | /* Process Receive status ring */ |
@@ -1233,9 +1292,11 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1233 | { | 1292 | { |
1234 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | 1293 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); |
1235 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 1294 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; |
1236 | struct status_desc *desc; /* used to read status desc here */ | 1295 | struct status_desc *desc, *frag_desc; |
1237 | u32 consumer = recv_ctx->status_rx_consumer; | 1296 | u32 consumer = recv_ctx->status_rx_consumer; |
1238 | int count = 0, ring; | 1297 | int count = 0, ring; |
1298 | u64 sts_data; | ||
1299 | u16 opcode; | ||
1239 | 1300 | ||
1240 | while (count < max) { | 1301 | while (count < max) { |
1241 | desc = &desc_head[consumer]; | 1302 | desc = &desc_head[consumer]; |
@@ -1244,9 +1305,26 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1244 | netxen_get_sts_owner(desc)); | 1305 | netxen_get_sts_owner(desc)); |
1245 | break; | 1306 | break; |
1246 | } | 1307 | } |
1247 | netxen_process_rcv(adapter, ctxid, desc); | 1308 | |
1309 | sts_data = le64_to_cpu(desc->status_desc_data); | ||
1310 | opcode = netxen_get_sts_opcode(sts_data); | ||
1311 | frag_desc = NULL; | ||
1312 | if (opcode == NETXEN_NIC_RXPKT_DESC) { | ||
1313 | if (desc->nr_frags) { | ||
1314 | consumer = get_next_index(consumer, | ||
1315 | adapter->max_rx_desc_count); | ||
1316 | frag_desc = &desc_head[consumer]; | ||
1317 | netxen_set_sts_owner(frag_desc, | ||
1318 | STATUS_OWNER_PHANTOM); | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | netxen_process_rcv(adapter, ctxid, desc, frag_desc); | ||
1323 | |||
1248 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); | 1324 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); |
1249 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | 1325 | |
1326 | consumer = get_next_index(consumer, | ||
1327 | adapter->max_rx_desc_count); | ||
1250 | count++; | 1328 | count++; |
1251 | } | 1329 | } |
1252 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 1330 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
@@ -1348,36 +1426,31 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1348 | int index = 0; | 1426 | int index = 0; |
1349 | netxen_ctx_msg msg = 0; | 1427 | netxen_ctx_msg msg = 0; |
1350 | dma_addr_t dma; | 1428 | dma_addr_t dma; |
1429 | struct list_head *head; | ||
1351 | 1430 | ||
1352 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1431 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1353 | 1432 | ||
1354 | producer = rds_ring->producer; | 1433 | producer = rds_ring->producer; |
1355 | index = rds_ring->begin_alloc; | 1434 | index = rds_ring->begin_alloc; |
1356 | buffer = &rds_ring->rx_buf_arr[index]; | 1435 | head = &rds_ring->free_list; |
1436 | |||
1357 | /* We can start writing rx descriptors into the phantom memory. */ | 1437 | /* We can start writing rx descriptors into the phantom memory. */ |
1358 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1438 | while (!list_empty(head)) { |
1439 | |||
1359 | skb = dev_alloc_skb(rds_ring->skb_size); | 1440 | skb = dev_alloc_skb(rds_ring->skb_size); |
1360 | if (unlikely(!skb)) { | 1441 | if (unlikely(!skb)) { |
1361 | /* | ||
1362 | * TODO | ||
1363 | * We need to schedule the posting of buffers to the pegs. | ||
1364 | */ | ||
1365 | rds_ring->begin_alloc = index; | 1442 | rds_ring->begin_alloc = index; |
1366 | DPRINTK(ERR, "netxen_post_rx_buffers: " | ||
1367 | " allocated only %d buffers\n", count); | ||
1368 | break; | 1443 | break; |
1369 | } | 1444 | } |
1370 | 1445 | ||
1446 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1447 | list_del(&buffer->list); | ||
1448 | |||
1371 | count++; /* now there should be no failure */ | 1449 | count++; /* now there should be no failure */ |
1372 | pdesc = &rds_ring->desc_head[producer]; | 1450 | pdesc = &rds_ring->desc_head[producer]; |
1373 | 1451 | ||
1374 | #if defined(XGB_DEBUG) | 1452 | if (!adapter->ahw.cut_through) |
1375 | *(unsigned long *)(skb->head) = 0xc0debabe; | 1453 | skb_reserve(skb, 2); |
1376 | if (skb_is_nonlinear(skb)) { | ||
1377 | printk("Allocated SKB @%p is nonlinear\n"); | ||
1378 | } | ||
1379 | #endif | ||
1380 | skb_reserve(skb, 2); | ||
1381 | /* This will be setup when we receive the | 1454 | /* This will be setup when we receive the |
1382 | * buffer after it has been filled FSL TBD TBD | 1455 | * buffer after it has been filled FSL TBD TBD |
1383 | * skb->dev = netdev; | 1456 | * skb->dev = netdev; |
@@ -1395,7 +1468,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1395 | producer = | 1468 | producer = |
1396 | get_next_index(producer, rds_ring->max_rx_desc_count); | 1469 | get_next_index(producer, rds_ring->max_rx_desc_count); |
1397 | index = get_next_index(index, rds_ring->max_rx_desc_count); | 1470 | index = get_next_index(index, rds_ring->max_rx_desc_count); |
1398 | buffer = &rds_ring->rx_buf_arr[index]; | ||
1399 | } | 1471 | } |
1400 | /* if we did allocate buffers, then write the count to Phantom */ | 1472 | /* if we did allocate buffers, then write the count to Phantom */ |
1401 | if (count) { | 1473 | if (count) { |
@@ -1439,32 +1511,29 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1439 | struct netxen_rx_buffer *buffer; | 1511 | struct netxen_rx_buffer *buffer; |
1440 | int count = 0; | 1512 | int count = 0; |
1441 | int index = 0; | 1513 | int index = 0; |
1514 | struct list_head *head; | ||
1442 | 1515 | ||
1443 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1516 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1444 | 1517 | ||
1445 | producer = rds_ring->producer; | 1518 | producer = rds_ring->producer; |
1446 | index = rds_ring->begin_alloc; | 1519 | index = rds_ring->begin_alloc; |
1447 | buffer = &rds_ring->rx_buf_arr[index]; | 1520 | head = &rds_ring->free_list; |
1448 | /* We can start writing rx descriptors into the phantom memory. */ | 1521 | /* We can start writing rx descriptors into the phantom memory. */ |
1449 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1522 | while (!list_empty(head)) { |
1523 | |||
1450 | skb = dev_alloc_skb(rds_ring->skb_size); | 1524 | skb = dev_alloc_skb(rds_ring->skb_size); |
1451 | if (unlikely(!skb)) { | 1525 | if (unlikely(!skb)) { |
1452 | /* | ||
1453 | * We need to schedule the posting of buffers to the pegs. | ||
1454 | */ | ||
1455 | rds_ring->begin_alloc = index; | 1526 | rds_ring->begin_alloc = index; |
1456 | DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " | ||
1457 | " allocated only %d buffers\n", count); | ||
1458 | break; | 1527 | break; |
1459 | } | 1528 | } |
1529 | |||
1530 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1531 | list_del(&buffer->list); | ||
1532 | |||
1460 | count++; /* now there should be no failure */ | 1533 | count++; /* now there should be no failure */ |
1461 | pdesc = &rds_ring->desc_head[producer]; | 1534 | pdesc = &rds_ring->desc_head[producer]; |
1462 | skb_reserve(skb, 2); | 1535 | if (!adapter->ahw.cut_through) |
1463 | /* | 1536 | skb_reserve(skb, 2); |
1464 | * This will be setup when we receive the | ||
1465 | * buffer after it has been filled | ||
1466 | * skb->dev = netdev; | ||
1467 | */ | ||
1468 | buffer->skb = skb; | 1537 | buffer->skb = skb; |
1469 | buffer->state = NETXEN_BUFFER_BUSY; | 1538 | buffer->state = NETXEN_BUFFER_BUSY; |
1470 | buffer->dma = pci_map_single(pdev, skb->data, | 1539 | buffer->dma = pci_map_single(pdev, skb->data, |