diff options
-rw-r--r-- | drivers/net/qlge/qlge.h | 15 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 273 |
2 files changed, 214 insertions, 74 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 421471790601..bc7a2e43c62e 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -56,7 +56,8 @@ | |||
56 | MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) | 56 | MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) |
57 | #define SMALL_BUFFER_SIZE 512 | 57 | #define SMALL_BUFFER_SIZE 512 |
58 | #define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) | 58 | #define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) |
59 | #define LARGE_BUFFER_SIZE PAGE_SIZE | 59 | #define LARGE_BUFFER_MAX_SIZE 8192 |
60 | #define LARGE_BUFFER_MIN_SIZE 2048 | ||
60 | #define MAX_SPLIT_SIZE 1023 | 61 | #define MAX_SPLIT_SIZE 1023 |
61 | #define QLGE_SB_PAD 32 | 62 | #define QLGE_SB_PAD 32 |
62 | 63 | ||
@@ -1201,9 +1202,17 @@ struct tx_ring_desc { | |||
1201 | struct tx_ring_desc *next; | 1202 | struct tx_ring_desc *next; |
1202 | }; | 1203 | }; |
1203 | 1204 | ||
1205 | struct page_chunk { | ||
1206 | struct page *page; /* master page */ | ||
1207 | char *va; /* virt addr for this chunk */ | ||
1208 | u64 map; /* mapping for master */ | ||
1209 | unsigned int offset; /* offset for this chunk */ | ||
1210 | unsigned int last_flag; /* flag set for last chunk in page */ | ||
1211 | }; | ||
1212 | |||
1204 | struct bq_desc { | 1213 | struct bq_desc { |
1205 | union { | 1214 | union { |
1206 | struct page *lbq_page; | 1215 | struct page_chunk pg_chunk; |
1207 | struct sk_buff *skb; | 1216 | struct sk_buff *skb; |
1208 | } p; | 1217 | } p; |
1209 | __le64 *addr; | 1218 | __le64 *addr; |
@@ -1272,6 +1281,7 @@ struct rx_ring { | |||
1272 | dma_addr_t lbq_base_dma; | 1281 | dma_addr_t lbq_base_dma; |
1273 | void *lbq_base_indirect; | 1282 | void *lbq_base_indirect; |
1274 | dma_addr_t lbq_base_indirect_dma; | 1283 | dma_addr_t lbq_base_indirect_dma; |
1284 | struct page_chunk pg_chunk; /* current page for chunks */ | ||
1275 | struct bq_desc *lbq; /* array of control blocks */ | 1285 | struct bq_desc *lbq; /* array of control blocks */ |
1276 | void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ | 1286 | void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ |
1277 | u32 lbq_prod_idx; /* current sw prod idx */ | 1287 | u32 lbq_prod_idx; /* current sw prod idx */ |
@@ -1526,6 +1536,7 @@ struct ql_adapter { | |||
1526 | 1536 | ||
1527 | struct rx_ring rx_ring[MAX_RX_RINGS]; | 1537 | struct rx_ring rx_ring[MAX_RX_RINGS]; |
1528 | struct tx_ring tx_ring[MAX_TX_RINGS]; | 1538 | struct tx_ring tx_ring[MAX_TX_RINGS]; |
1539 | unsigned int lbq_buf_order; | ||
1529 | 1540 | ||
1530 | int rx_csum; | 1541 | int rx_csum; |
1531 | u32 default_rx_queue; | 1542 | u32 default_rx_queue; |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 817613919b51..34242fbcadff 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -1025,6 +1025,11 @@ end: | |||
1025 | return status; | 1025 | return status; |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) | ||
1029 | { | ||
1030 | return PAGE_SIZE << qdev->lbq_buf_order; | ||
1031 | } | ||
1032 | |||
1028 | /* Get the next large buffer. */ | 1033 | /* Get the next large buffer. */ |
1029 | static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | 1034 | static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) |
1030 | { | 1035 | { |
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | |||
1036 | return lbq_desc; | 1041 | return lbq_desc; |
1037 | } | 1042 | } |
1038 | 1043 | ||
1044 | static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, | ||
1045 | struct rx_ring *rx_ring) | ||
1046 | { | ||
1047 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); | ||
1048 | |||
1049 | pci_dma_sync_single_for_cpu(qdev->pdev, | ||
1050 | pci_unmap_addr(lbq_desc, mapaddr), | ||
1051 | rx_ring->lbq_buf_size, | ||
1052 | PCI_DMA_FROMDEVICE); | ||
1053 | |||
1054 | /* If it's the last chunk of our master page then | ||
1055 | * we unmap it. | ||
1056 | */ | ||
1057 | if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) | ||
1058 | == ql_lbq_block_size(qdev)) | ||
1059 | pci_unmap_page(qdev->pdev, | ||
1060 | lbq_desc->p.pg_chunk.map, | ||
1061 | ql_lbq_block_size(qdev), | ||
1062 | PCI_DMA_FROMDEVICE); | ||
1063 | return lbq_desc; | ||
1064 | } | ||
1065 | |||
1039 | /* Get the next small buffer. */ | 1066 | /* Get the next small buffer. */ |
1040 | static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) | 1067 | static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) |
1041 | { | 1068 | { |
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring) | |||
1063 | ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); | 1090 | ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); |
1064 | } | 1091 | } |
1065 | 1092 | ||
1093 | static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, | ||
1094 | struct bq_desc *lbq_desc) | ||
1095 | { | ||
1096 | if (!rx_ring->pg_chunk.page) { | ||
1097 | u64 map; | ||
1098 | rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | | ||
1099 | GFP_ATOMIC, | ||
1100 | qdev->lbq_buf_order); | ||
1101 | if (unlikely(!rx_ring->pg_chunk.page)) { | ||
1102 | QPRINTK(qdev, DRV, ERR, | ||
1103 | "page allocation failed.\n"); | ||
1104 | return -ENOMEM; | ||
1105 | } | ||
1106 | rx_ring->pg_chunk.offset = 0; | ||
1107 | map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, | ||
1108 | 0, ql_lbq_block_size(qdev), | ||
1109 | PCI_DMA_FROMDEVICE); | ||
1110 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
1111 | __free_pages(rx_ring->pg_chunk.page, | ||
1112 | qdev->lbq_buf_order); | ||
1113 | QPRINTK(qdev, DRV, ERR, | ||
1114 | "PCI mapping failed.\n"); | ||
1115 | return -ENOMEM; | ||
1116 | } | ||
1117 | rx_ring->pg_chunk.map = map; | ||
1118 | rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); | ||
1119 | } | ||
1120 | |||
1121 | /* Copy the current master pg_chunk info | ||
1122 | * to the current descriptor. | ||
1123 | */ | ||
1124 | lbq_desc->p.pg_chunk = rx_ring->pg_chunk; | ||
1125 | |||
1126 | /* Adjust the master page chunk for next | ||
1127 | * buffer get. | ||
1128 | */ | ||
1129 | rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; | ||
1130 | if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { | ||
1131 | rx_ring->pg_chunk.page = NULL; | ||
1132 | lbq_desc->p.pg_chunk.last_flag = 1; | ||
1133 | } else { | ||
1134 | rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; | ||
1135 | get_page(rx_ring->pg_chunk.page); | ||
1136 | lbq_desc->p.pg_chunk.last_flag = 0; | ||
1137 | } | ||
1138 | return 0; | ||
1139 | } | ||
1066 | /* Process (refill) a large buffer queue. */ | 1140 | /* Process (refill) a large buffer queue. */ |
1067 | static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 1141 | static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
1068 | { | 1142 | { |
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1072 | u64 map; | 1146 | u64 map; |
1073 | int i; | 1147 | int i; |
1074 | 1148 | ||
1075 | while (rx_ring->lbq_free_cnt > 16) { | 1149 | while (rx_ring->lbq_free_cnt > 32) { |
1076 | for (i = 0; i < 16; i++) { | 1150 | for (i = 0; i < 16; i++) { |
1077 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1151 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1078 | "lbq: try cleaning clean_idx = %d.\n", | 1152 | "lbq: try cleaning clean_idx = %d.\n", |
1079 | clean_idx); | 1153 | clean_idx); |
1080 | lbq_desc = &rx_ring->lbq[clean_idx]; | 1154 | lbq_desc = &rx_ring->lbq[clean_idx]; |
1081 | if (lbq_desc->p.lbq_page == NULL) { | 1155 | if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { |
1082 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1156 | QPRINTK(qdev, IFUP, ERR, |
1083 | "lbq: getting new page for index %d.\n", | 1157 | "Could not get a page chunk.\n"); |
1084 | lbq_desc->index); | ||
1085 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | ||
1086 | if (lbq_desc->p.lbq_page == NULL) { | ||
1087 | rx_ring->lbq_clean_idx = clean_idx; | ||
1088 | QPRINTK(qdev, RX_STATUS, ERR, | ||
1089 | "Couldn't get a page.\n"); | ||
1090 | return; | ||
1091 | } | ||
1092 | map = pci_map_page(qdev->pdev, | ||
1093 | lbq_desc->p.lbq_page, | ||
1094 | 0, PAGE_SIZE, | ||
1095 | PCI_DMA_FROMDEVICE); | ||
1096 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
1097 | rx_ring->lbq_clean_idx = clean_idx; | ||
1098 | put_page(lbq_desc->p.lbq_page); | ||
1099 | lbq_desc->p.lbq_page = NULL; | ||
1100 | QPRINTK(qdev, RX_STATUS, ERR, | ||
1101 | "PCI mapping failed.\n"); | ||
1102 | return; | 1158 | return; |
1103 | } | 1159 | } |
1160 | |||
1161 | map = lbq_desc->p.pg_chunk.map + | ||
1162 | lbq_desc->p.pg_chunk.offset; | ||
1104 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | 1163 | pci_unmap_addr_set(lbq_desc, mapaddr, map); |
1105 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | 1164 | pci_unmap_len_set(lbq_desc, maplen, |
1165 | rx_ring->lbq_buf_size); | ||
1106 | *lbq_desc->addr = cpu_to_le64(map); | 1166 | *lbq_desc->addr = cpu_to_le64(map); |
1107 | } | 1167 | |
1168 | pci_dma_sync_single_for_device(qdev->pdev, map, | ||
1169 | rx_ring->lbq_buf_size, | ||
1170 | PCI_DMA_FROMDEVICE); | ||
1108 | clean_idx++; | 1171 | clean_idx++; |
1109 | if (clean_idx == rx_ring->lbq_len) | 1172 | if (clean_idx == rx_ring->lbq_len) |
1110 | clean_idx = 0; | 1173 | clean_idx = 0; |
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1480 | * chain it to the header buffer's skb and let | 1543 | * chain it to the header buffer's skb and let |
1481 | * it rip. | 1544 | * it rip. |
1482 | */ | 1545 | */ |
1483 | lbq_desc = ql_get_curr_lbuf(rx_ring); | 1546 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1484 | pci_unmap_page(qdev->pdev, | ||
1485 | pci_unmap_addr(lbq_desc, | ||
1486 | mapaddr), | ||
1487 | pci_unmap_len(lbq_desc, maplen), | ||
1488 | PCI_DMA_FROMDEVICE); | ||
1489 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1547 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1490 | "Chaining page to skb.\n"); | 1548 | "Chaining page at offset = %d," |
1491 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | 1549 | "for %d bytes to skb.\n", |
1492 | 0, length); | 1550 | lbq_desc->p.pg_chunk.offset, length); |
1551 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, | ||
1552 | lbq_desc->p.pg_chunk.offset, | ||
1553 | length); | ||
1493 | skb->len += length; | 1554 | skb->len += length; |
1494 | skb->data_len += length; | 1555 | skb->data_len += length; |
1495 | skb->truesize += length; | 1556 | skb->truesize += length; |
1496 | lbq_desc->p.lbq_page = NULL; | ||
1497 | } else { | 1557 | } else { |
1498 | /* | 1558 | /* |
1499 | * The headers and data are in a single large buffer. We | 1559 | * The headers and data are in a single large buffer. We |
1500 | * copy it to a new skb and let it go. This can happen with | 1560 | * copy it to a new skb and let it go. This can happen with |
1501 | * jumbo mtu on a non-TCP/UDP frame. | 1561 | * jumbo mtu on a non-TCP/UDP frame. |
1502 | */ | 1562 | */ |
1503 | lbq_desc = ql_get_curr_lbuf(rx_ring); | 1563 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1504 | skb = netdev_alloc_skb(qdev->ndev, length); | 1564 | skb = netdev_alloc_skb(qdev->ndev, length); |
1505 | if (skb == NULL) { | 1565 | if (skb == NULL) { |
1506 | QPRINTK(qdev, PROBE, DEBUG, | 1566 | QPRINTK(qdev, PROBE, DEBUG, |
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1515 | skb_reserve(skb, NET_IP_ALIGN); | 1575 | skb_reserve(skb, NET_IP_ALIGN); |
1516 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1576 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1517 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); | 1577 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); |
1518 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | 1578 | skb_fill_page_desc(skb, 0, |
1519 | 0, length); | 1579 | lbq_desc->p.pg_chunk.page, |
1580 | lbq_desc->p.pg_chunk.offset, | ||
1581 | length); | ||
1520 | skb->len += length; | 1582 | skb->len += length; |
1521 | skb->data_len += length; | 1583 | skb->data_len += length; |
1522 | skb->truesize += length; | 1584 | skb->truesize += length; |
1523 | length -= length; | 1585 | length -= length; |
1524 | lbq_desc->p.lbq_page = NULL; | ||
1525 | __pskb_pull_tail(skb, | 1586 | __pskb_pull_tail(skb, |
1526 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | 1587 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? |
1527 | VLAN_ETH_HLEN : ETH_HLEN); | 1588 | VLAN_ETH_HLEN : ETH_HLEN); |
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1538 | * frames. If the MTU goes up we could | 1599 | * frames. If the MTU goes up we could |
1539 | * eventually be in trouble. | 1600 | * eventually be in trouble. |
1540 | */ | 1601 | */ |
1541 | int size, offset, i = 0; | 1602 | int size, i = 0; |
1542 | __le64 *bq, bq_array[8]; | ||
1543 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1603 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1544 | pci_unmap_single(qdev->pdev, | 1604 | pci_unmap_single(qdev->pdev, |
1545 | pci_unmap_addr(sbq_desc, mapaddr), | 1605 | pci_unmap_addr(sbq_desc, mapaddr), |
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1558 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1618 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1559 | "%d bytes of headers & data in chain of large.\n", length); | 1619 | "%d bytes of headers & data in chain of large.\n", length); |
1560 | skb = sbq_desc->p.skb; | 1620 | skb = sbq_desc->p.skb; |
1561 | bq = &bq_array[0]; | ||
1562 | memcpy(bq, skb->data, sizeof(bq_array)); | ||
1563 | sbq_desc->p.skb = NULL; | 1621 | sbq_desc->p.skb = NULL; |
1564 | skb_reserve(skb, NET_IP_ALIGN); | 1622 | skb_reserve(skb, NET_IP_ALIGN); |
1565 | } else { | ||
1566 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1567 | "Headers in small, %d bytes of data in chain of large.\n", length); | ||
1568 | bq = (__le64 *)sbq_desc->p.skb->data; | ||
1569 | } | 1623 | } |
1570 | while (length > 0) { | 1624 | while (length > 0) { |
1571 | lbq_desc = ql_get_curr_lbuf(rx_ring); | 1625 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1572 | pci_unmap_page(qdev->pdev, | 1626 | size = (length < rx_ring->lbq_buf_size) ? length : |
1573 | pci_unmap_addr(lbq_desc, | 1627 | rx_ring->lbq_buf_size; |
1574 | mapaddr), | ||
1575 | pci_unmap_len(lbq_desc, | ||
1576 | maplen), | ||
1577 | PCI_DMA_FROMDEVICE); | ||
1578 | size = (length < PAGE_SIZE) ? length : PAGE_SIZE; | ||
1579 | offset = 0; | ||
1580 | 1628 | ||
1581 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1629 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1582 | "Adding page %d to skb for %d bytes.\n", | 1630 | "Adding page %d to skb for %d bytes.\n", |
1583 | i, size); | 1631 | i, size); |
1584 | skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, | 1632 | skb_fill_page_desc(skb, i, |
1585 | offset, size); | 1633 | lbq_desc->p.pg_chunk.page, |
1634 | lbq_desc->p.pg_chunk.offset, | ||
1635 | size); | ||
1586 | skb->len += size; | 1636 | skb->len += size; |
1587 | skb->data_len += size; | 1637 | skb->data_len += size; |
1588 | skb->truesize += size; | 1638 | skb->truesize += size; |
1589 | length -= size; | 1639 | length -= size; |
1590 | lbq_desc->p.lbq_page = NULL; | ||
1591 | bq++; | ||
1592 | i++; | 1640 | i++; |
1593 | } | 1641 | } |
1594 | __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | 1642 | __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? |
@@ -2305,20 +2353,29 @@ err: | |||
2305 | 2353 | ||
2306 | static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 2354 | static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
2307 | { | 2355 | { |
2308 | int i; | ||
2309 | struct bq_desc *lbq_desc; | 2356 | struct bq_desc *lbq_desc; |
2310 | 2357 | ||
2311 | for (i = 0; i < rx_ring->lbq_len; i++) { | 2358 | uint32_t curr_idx, clean_idx; |
2312 | lbq_desc = &rx_ring->lbq[i]; | 2359 | |
2313 | if (lbq_desc->p.lbq_page) { | 2360 | curr_idx = rx_ring->lbq_curr_idx; |
2361 | clean_idx = rx_ring->lbq_clean_idx; | ||
2362 | while (curr_idx != clean_idx) { | ||
2363 | lbq_desc = &rx_ring->lbq[curr_idx]; | ||
2364 | |||
2365 | if (lbq_desc->p.pg_chunk.last_flag) { | ||
2314 | pci_unmap_page(qdev->pdev, | 2366 | pci_unmap_page(qdev->pdev, |
2315 | pci_unmap_addr(lbq_desc, mapaddr), | 2367 | lbq_desc->p.pg_chunk.map, |
2316 | pci_unmap_len(lbq_desc, maplen), | 2368 | ql_lbq_block_size(qdev), |
2317 | PCI_DMA_FROMDEVICE); | 2369 | PCI_DMA_FROMDEVICE); |
2318 | 2370 | lbq_desc->p.pg_chunk.last_flag = 0; | |
2319 | put_page(lbq_desc->p.lbq_page); | ||
2320 | lbq_desc->p.lbq_page = NULL; | ||
2321 | } | 2371 | } |
2372 | |||
2373 | put_page(lbq_desc->p.pg_chunk.page); | ||
2374 | lbq_desc->p.pg_chunk.page = NULL; | ||
2375 | |||
2376 | if (++curr_idx == rx_ring->lbq_len) | ||
2377 | curr_idx = 0; | ||
2378 | |||
2322 | } | 2379 | } |
2323 | } | 2380 | } |
2324 | 2381 | ||
@@ -2616,6 +2673,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2616 | /* Set up the shadow registers for this ring. */ | 2673 | /* Set up the shadow registers for this ring. */ |
2617 | rx_ring->prod_idx_sh_reg = shadow_reg; | 2674 | rx_ring->prod_idx_sh_reg = shadow_reg; |
2618 | rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; | 2675 | rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; |
2676 | *rx_ring->prod_idx_sh_reg = 0; | ||
2619 | shadow_reg += sizeof(u64); | 2677 | shadow_reg += sizeof(u64); |
2620 | shadow_reg_dma += sizeof(u64); | 2678 | shadow_reg_dma += sizeof(u64); |
2621 | rx_ring->lbq_base_indirect = shadow_reg; | 2679 | rx_ring->lbq_base_indirect = shadow_reg; |
@@ -3496,6 +3554,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||
3496 | struct rx_ring *rx_ring; | 3554 | struct rx_ring *rx_ring; |
3497 | struct tx_ring *tx_ring; | 3555 | struct tx_ring *tx_ring; |
3498 | int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); | 3556 | int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); |
3557 | unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? | ||
3558 | LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; | ||
3559 | |||
3560 | qdev->lbq_buf_order = get_order(lbq_buf_len); | ||
3499 | 3561 | ||
3500 | /* In a perfect world we have one RSS ring for each CPU | 3562 | /* In a perfect world we have one RSS ring for each CPU |
3501 | * and each has it's own vector. To do that we ask for | 3563 | * and each has it's own vector. To do that we ask for |
@@ -3543,7 +3605,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||
3543 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | 3605 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; |
3544 | rx_ring->lbq_size = | 3606 | rx_ring->lbq_size = |
3545 | rx_ring->lbq_len * sizeof(__le64); | 3607 | rx_ring->lbq_len * sizeof(__le64); |
3546 | rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; | 3608 | rx_ring->lbq_buf_size = (u16)lbq_buf_len; |
3609 | QPRINTK(qdev, IFUP, DEBUG, | ||
3610 | "lbq_buf_size %d, order = %d\n", | ||
3611 | rx_ring->lbq_buf_size, qdev->lbq_buf_order); | ||
3547 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | 3612 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; |
3548 | rx_ring->sbq_size = | 3613 | rx_ring->sbq_size = |
3549 | rx_ring->sbq_len * sizeof(__le64); | 3614 | rx_ring->sbq_len * sizeof(__le64); |
@@ -3593,14 +3658,63 @@ error_up: | |||
3593 | return err; | 3658 | return err; |
3594 | } | 3659 | } |
3595 | 3660 | ||
3661 | static int ql_change_rx_buffers(struct ql_adapter *qdev) | ||
3662 | { | ||
3663 | struct rx_ring *rx_ring; | ||
3664 | int i, status; | ||
3665 | u32 lbq_buf_len; | ||
3666 | |||
3667 | /* Wait for an oustanding reset to complete. */ | ||
3668 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { | ||
3669 | int i = 3; | ||
3670 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | ||
3671 | QPRINTK(qdev, IFUP, ERR, | ||
3672 | "Waiting for adapter UP...\n"); | ||
3673 | ssleep(1); | ||
3674 | } | ||
3675 | |||
3676 | if (!i) { | ||
3677 | QPRINTK(qdev, IFUP, ERR, | ||
3678 | "Timed out waiting for adapter UP\n"); | ||
3679 | return -ETIMEDOUT; | ||
3680 | } | ||
3681 | } | ||
3682 | |||
3683 | status = ql_adapter_down(qdev); | ||
3684 | if (status) | ||
3685 | goto error; | ||
3686 | |||
3687 | /* Get the new rx buffer size. */ | ||
3688 | lbq_buf_len = (qdev->ndev->mtu > 1500) ? | ||
3689 | LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; | ||
3690 | qdev->lbq_buf_order = get_order(lbq_buf_len); | ||
3691 | |||
3692 | for (i = 0; i < qdev->rss_ring_count; i++) { | ||
3693 | rx_ring = &qdev->rx_ring[i]; | ||
3694 | /* Set the new size. */ | ||
3695 | rx_ring->lbq_buf_size = lbq_buf_len; | ||
3696 | } | ||
3697 | |||
3698 | status = ql_adapter_up(qdev); | ||
3699 | if (status) | ||
3700 | goto error; | ||
3701 | |||
3702 | return status; | ||
3703 | error: | ||
3704 | QPRINTK(qdev, IFUP, ALERT, | ||
3705 | "Driver up/down cycle failed, closing device.\n"); | ||
3706 | set_bit(QL_ADAPTER_UP, &qdev->flags); | ||
3707 | dev_close(qdev->ndev); | ||
3708 | return status; | ||
3709 | } | ||
3710 | |||
3596 | static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | 3711 | static int qlge_change_mtu(struct net_device *ndev, int new_mtu) |
3597 | { | 3712 | { |
3598 | struct ql_adapter *qdev = netdev_priv(ndev); | 3713 | struct ql_adapter *qdev = netdev_priv(ndev); |
3714 | int status; | ||
3599 | 3715 | ||
3600 | if (ndev->mtu == 1500 && new_mtu == 9000) { | 3716 | if (ndev->mtu == 1500 && new_mtu == 9000) { |
3601 | QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); | 3717 | QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); |
3602 | queue_delayed_work(qdev->workqueue, | ||
3603 | &qdev->mpi_port_cfg_work, 0); | ||
3604 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { | 3718 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { |
3605 | QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); | 3719 | QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); |
3606 | } else if ((ndev->mtu == 1500 && new_mtu == 1500) || | 3720 | } else if ((ndev->mtu == 1500 && new_mtu == 1500) || |
@@ -3608,8 +3722,23 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | |||
3608 | return 0; | 3722 | return 0; |
3609 | } else | 3723 | } else |
3610 | return -EINVAL; | 3724 | return -EINVAL; |
3725 | |||
3726 | queue_delayed_work(qdev->workqueue, | ||
3727 | &qdev->mpi_port_cfg_work, 3*HZ); | ||
3728 | |||
3729 | if (!netif_running(qdev->ndev)) { | ||
3730 | ndev->mtu = new_mtu; | ||
3731 | return 0; | ||
3732 | } | ||
3733 | |||
3611 | ndev->mtu = new_mtu; | 3734 | ndev->mtu = new_mtu; |
3612 | return 0; | 3735 | status = ql_change_rx_buffers(qdev); |
3736 | if (status) { | ||
3737 | QPRINTK(qdev, IFUP, ERR, | ||
3738 | "Changing MTU failed.\n"); | ||
3739 | } | ||
3740 | |||
3741 | return status; | ||
3613 | } | 3742 | } |
3614 | 3743 | ||
3615 | static struct net_device_stats *qlge_get_stats(struct net_device | 3744 | static struct net_device_stats *qlge_get_stats(struct net_device |