aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/qlge/qlge.h11
-rw-r--r--drivers/net/qlge/qlge_main.c55
2 files changed, 15 insertions, 51 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 71cc48799b59..76ef2bc297cc 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -818,15 +818,6 @@ struct tx_doorbell_context {
818}; 818};
819 819
820/* DATA STRUCTURES SHARED WITH HARDWARE. */ 820/* DATA STRUCTURES SHARED WITH HARDWARE. */
821
822struct bq_element {
823 u32 addr_lo;
824#define BQ_END 0x00000001
825#define BQ_CONT 0x00000002
826#define BQ_MASK 0x00000003
827 u32 addr_hi;
828} __attribute((packed));
829
830struct tx_buf_desc { 821struct tx_buf_desc {
831 __le64 addr; 822 __le64 addr;
832 __le32 len; 823 __le32 len;
@@ -1139,7 +1130,7 @@ struct bq_desc {
1139 struct page *lbq_page; 1130 struct page *lbq_page;
1140 struct sk_buff *skb; 1131 struct sk_buff *skb;
1141 } p; 1132 } p;
1142 struct bq_element *bq; 1133 __le64 *addr;
1143 int index; 1134 int index;
1144 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1135 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1145 DECLARE_PCI_UNMAP_LEN(maplen); 1136 DECLARE_PCI_UNMAP_LEN(maplen);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index d7894aa2ebe3..f4c016012f18 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -874,7 +874,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
874{ 874{
875 int clean_idx = rx_ring->lbq_clean_idx; 875 int clean_idx = rx_ring->lbq_clean_idx;
876 struct bq_desc *lbq_desc; 876 struct bq_desc *lbq_desc;
877 struct bq_element *bq;
878 u64 map; 877 u64 map;
879 int i; 878 int i;
880 879
@@ -884,7 +883,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
884 "lbq: try cleaning clean_idx = %d.\n", 883 "lbq: try cleaning clean_idx = %d.\n",
885 clean_idx); 884 clean_idx);
886 lbq_desc = &rx_ring->lbq[clean_idx]; 885 lbq_desc = &rx_ring->lbq[clean_idx];
887 bq = lbq_desc->bq;
888 if (lbq_desc->p.lbq_page == NULL) { 886 if (lbq_desc->p.lbq_page == NULL) {
889 QPRINTK(qdev, RX_STATUS, DEBUG, 887 QPRINTK(qdev, RX_STATUS, DEBUG,
890 "lbq: getting new page for index %d.\n", 888 "lbq: getting new page for index %d.\n",
@@ -906,10 +904,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
906 } 904 }
907 pci_unmap_addr_set(lbq_desc, mapaddr, map); 905 pci_unmap_addr_set(lbq_desc, mapaddr, map);
908 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 906 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
909 bq->addr_lo = /*lbq_desc->addr_lo = */ 907 *lbq_desc->addr = cpu_to_le64(map);
910 cpu_to_le32(map);
911 bq->addr_hi = /*lbq_desc->addr_hi = */
912 cpu_to_le32(map >> 32);
913 } 908 }
914 clean_idx++; 909 clean_idx++;
915 if (clean_idx == rx_ring->lbq_len) 910 if (clean_idx == rx_ring->lbq_len)
@@ -934,7 +929,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
934{ 929{
935 int clean_idx = rx_ring->sbq_clean_idx; 930 int clean_idx = rx_ring->sbq_clean_idx;
936 struct bq_desc *sbq_desc; 931 struct bq_desc *sbq_desc;
937 struct bq_element *bq;
938 u64 map; 932 u64 map;
939 int i; 933 int i;
940 934
@@ -944,7 +938,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
944 QPRINTK(qdev, RX_STATUS, DEBUG, 938 QPRINTK(qdev, RX_STATUS, DEBUG,
945 "sbq: try cleaning clean_idx = %d.\n", 939 "sbq: try cleaning clean_idx = %d.\n",
946 clean_idx); 940 clean_idx);
947 bq = sbq_desc->bq;
948 if (sbq_desc->p.skb == NULL) { 941 if (sbq_desc->p.skb == NULL) {
949 QPRINTK(qdev, RX_STATUS, DEBUG, 942 QPRINTK(qdev, RX_STATUS, DEBUG,
950 "sbq: getting new skb for index %d.\n", 943 "sbq: getting new skb for index %d.\n",
@@ -971,8 +964,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
971 pci_unmap_addr_set(sbq_desc, mapaddr, map); 964 pci_unmap_addr_set(sbq_desc, mapaddr, map);
972 pci_unmap_len_set(sbq_desc, maplen, 965 pci_unmap_len_set(sbq_desc, maplen,
973 rx_ring->sbq_buf_size / 2); 966 rx_ring->sbq_buf_size / 2);
974 bq->addr_lo = cpu_to_le32(map); 967 *sbq_desc->addr = cpu_to_le64(map);
975 bq->addr_hi = cpu_to_le32(map >> 32);
976 } 968 }
977 969
978 clean_idx++; 970 clean_idx++;
@@ -1340,7 +1332,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1340 * eventually be in trouble. 1332 * eventually be in trouble.
1341 */ 1333 */
1342 int size, offset, i = 0; 1334 int size, offset, i = 0;
1343 struct bq_element *bq, bq_array[8]; 1335 __le64 *bq, bq_array[8];
1344 sbq_desc = ql_get_curr_sbuf(rx_ring); 1336 sbq_desc = ql_get_curr_sbuf(rx_ring);
1345 pci_unmap_single(qdev->pdev, 1337 pci_unmap_single(qdev->pdev,
1346 pci_unmap_addr(sbq_desc, mapaddr), 1338 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1366,16 +1358,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1366 } else { 1358 } else {
1367 QPRINTK(qdev, RX_STATUS, DEBUG, 1359 QPRINTK(qdev, RX_STATUS, DEBUG,
1368 "Headers in small, %d bytes of data in chain of large.\n", length); 1360 "Headers in small, %d bytes of data in chain of large.\n", length);
1369 bq = (struct bq_element *)sbq_desc->p.skb->data; 1361 bq = (__le64 *)sbq_desc->p.skb->data;
1370 } 1362 }
1371 while (length > 0) { 1363 while (length > 0) {
1372 lbq_desc = ql_get_curr_lbuf(rx_ring); 1364 lbq_desc = ql_get_curr_lbuf(rx_ring);
1373 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1374 QPRINTK(qdev, RX_STATUS, ERR,
1375 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1376 lbq_desc->bq->addr_lo, bq->addr_lo);
1377 return NULL;
1378 }
1379 pci_unmap_page(qdev->pdev, 1365 pci_unmap_page(qdev->pdev,
1380 pci_unmap_addr(lbq_desc, 1366 pci_unmap_addr(lbq_desc,
1381 mapaddr), 1367 mapaddr),
@@ -2093,8 +2079,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2093 put_page(lbq_desc->p.lbq_page); 2079 put_page(lbq_desc->p.lbq_page);
2094 lbq_desc->p.lbq_page = NULL; 2080 lbq_desc->p.lbq_page = NULL;
2095 } 2081 }
2096 lbq_desc->bq->addr_lo = 0;
2097 lbq_desc->bq->addr_hi = 0;
2098 } 2082 }
2099} 2083}
2100 2084
@@ -2107,12 +2091,12 @@ static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2107 int i; 2091 int i;
2108 struct bq_desc *lbq_desc; 2092 struct bq_desc *lbq_desc;
2109 u64 map; 2093 u64 map;
2110 struct bq_element *bq = rx_ring->lbq_base; 2094 __le64 *bq = rx_ring->lbq_base;
2111 2095
2112 for (i = 0; i < rx_ring->lbq_len; i++) { 2096 for (i = 0; i < rx_ring->lbq_len; i++) {
2113 lbq_desc = &rx_ring->lbq[i]; 2097 lbq_desc = &rx_ring->lbq[i];
2114 memset(lbq_desc, 0, sizeof(lbq_desc)); 2098 memset(lbq_desc, 0, sizeof(lbq_desc));
2115 lbq_desc->bq = bq; 2099 lbq_desc->addr = bq;
2116 lbq_desc->index = i; 2100 lbq_desc->index = i;
2117 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); 2101 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2118 if (unlikely(!lbq_desc->p.lbq_page)) { 2102 if (unlikely(!lbq_desc->p.lbq_page)) {
@@ -2129,8 +2113,7 @@ static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2129 } 2113 }
2130 pci_unmap_addr_set(lbq_desc, mapaddr, map); 2114 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2131 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 2115 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2132 bq->addr_lo = cpu_to_le32(map); 2116 *lbq_desc->addr = cpu_to_le64(map);
2133 bq->addr_hi = cpu_to_le32(map >> 32);
2134 } 2117 }
2135 bq++; 2118 bq++;
2136 } 2119 }
@@ -2159,13 +2142,6 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2159 dev_kfree_skb(sbq_desc->p.skb); 2142 dev_kfree_skb(sbq_desc->p.skb);
2160 sbq_desc->p.skb = NULL; 2143 sbq_desc->p.skb = NULL;
2161 } 2144 }
2162 if (sbq_desc->bq == NULL) {
2163 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2164 i);
2165 return;
2166 }
2167 sbq_desc->bq->addr_lo = 0;
2168 sbq_desc->bq->addr_hi = 0;
2169 } 2145 }
2170} 2146}
2171 2147
@@ -2177,13 +2153,13 @@ static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2177 struct bq_desc *sbq_desc; 2153 struct bq_desc *sbq_desc;
2178 struct sk_buff *skb; 2154 struct sk_buff *skb;
2179 u64 map; 2155 u64 map;
2180 struct bq_element *bq = rx_ring->sbq_base; 2156 __le64 *bq = rx_ring->sbq_base;
2181 2157
2182 for (i = 0; i < rx_ring->sbq_len; i++) { 2158 for (i = 0; i < rx_ring->sbq_len; i++) {
2183 sbq_desc = &rx_ring->sbq[i]; 2159 sbq_desc = &rx_ring->sbq[i];
2184 memset(sbq_desc, 0, sizeof(sbq_desc)); 2160 memset(sbq_desc, 0, sizeof(sbq_desc));
2185 sbq_desc->index = i; 2161 sbq_desc->index = i;
2186 sbq_desc->bq = bq; 2162 sbq_desc->addr = bq;
2187 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); 2163 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2188 if (unlikely(!skb)) { 2164 if (unlikely(!skb)) {
2189 /* Better luck next round */ 2165 /* Better luck next round */
@@ -2209,10 +2185,7 @@ static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2209 } 2185 }
2210 pci_unmap_addr_set(sbq_desc, mapaddr, map); 2186 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2211 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); 2187 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2212 bq->addr_lo = /*sbq_desc->addr_lo = */ 2188 *sbq_desc->addr = cpu_to_le64(map);
2213 cpu_to_le32(map);
2214 bq->addr_hi = /*sbq_desc->addr_hi = */
2215 cpu_to_le32(map >> 32);
2216 bq++; 2189 bq++;
2217 } 2190 }
2218 return 0; 2191 return 0;
@@ -3356,11 +3329,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3356 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3329 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3357 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3330 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3358 rx_ring->lbq_size = 3331 rx_ring->lbq_size =
3359 rx_ring->lbq_len * sizeof(struct bq_element); 3332 rx_ring->lbq_len * sizeof(__le64);
3360 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3333 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3361 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3334 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3362 rx_ring->sbq_size = 3335 rx_ring->sbq_size =
3363 rx_ring->sbq_len * sizeof(struct bq_element); 3336 rx_ring->sbq_len * sizeof(__le64);
3364 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3337 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3365 rx_ring->type = DEFAULT_Q; 3338 rx_ring->type = DEFAULT_Q;
3366 } else if (i < qdev->rss_ring_first_cq_id) { 3339 } else if (i < qdev->rss_ring_first_cq_id) {
@@ -3387,11 +3360,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3387 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3360 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3388 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3361 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3389 rx_ring->lbq_size = 3362 rx_ring->lbq_size =
3390 rx_ring->lbq_len * sizeof(struct bq_element); 3363 rx_ring->lbq_len * sizeof(__le64);
3391 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3364 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3392 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3365 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3393 rx_ring->sbq_size = 3366 rx_ring->sbq_size =
3394 rx_ring->sbq_len * sizeof(struct bq_element); 3367 rx_ring->sbq_len * sizeof(__le64);
3395 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3368 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3396 rx_ring->type = RX_Q; 3369 rx_ring->type = RX_Q;
3397 } 3370 }