aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h57
-rw-r--r--drivers/net/qlge/qlge_dbg.c13
-rw-r--r--drivers/net/qlge/qlge_ethtool.c8
-rw-r--r--drivers/net/qlge/qlge_main.c116
4 files changed, 78 insertions, 116 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index ba2e1c5b6bcf..459663a4023d 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -818,15 +818,6 @@ struct tx_doorbell_context {
818}; 818};
819 819
820/* DATA STRUCTURES SHARED WITH HARDWARE. */ 820/* DATA STRUCTURES SHARED WITH HARDWARE. */
821
822struct bq_element {
823 u32 addr_lo;
824#define BQ_END 0x00000001
825#define BQ_CONT 0x00000002
826#define BQ_MASK 0x00000003
827 u32 addr_hi;
828} __attribute((packed));
829
830struct tx_buf_desc { 821struct tx_buf_desc {
831 __le64 addr; 822 __le64 addr;
832 __le32 len; 823 __le32 len;
@@ -860,8 +851,8 @@ struct ob_mac_iocb_req {
860 __le16 frame_len; 851 __le16 frame_len;
861#define OB_MAC_IOCB_LEN_MASK 0x3ffff 852#define OB_MAC_IOCB_LEN_MASK 0x3ffff
862 __le16 reserved2; 853 __le16 reserved2;
863 __le32 tid; 854 u32 tid;
864 __le32 txq_idx; 855 u32 txq_idx;
865 __le32 reserved3; 856 __le32 reserved3;
866 __le16 vlan_tci; 857 __le16 vlan_tci;
867 __le16 reserved4; 858 __le16 reserved4;
@@ -880,8 +871,8 @@ struct ob_mac_iocb_rsp {
880 u8 flags2; /* */ 871 u8 flags2; /* */
881 u8 flags3; /* */ 872 u8 flags3; /* */
882#define OB_MAC_IOCB_RSP_B 0x80 /* */ 873#define OB_MAC_IOCB_RSP_B 0x80 /* */
883 __le32 tid; 874 u32 tid;
884 __le32 txq_idx; 875 u32 txq_idx;
885 __le32 reserved[13]; 876 __le32 reserved[13];
886} __attribute((packed)); 877} __attribute((packed));
887 878
@@ -903,8 +894,8 @@ struct ob_mac_tso_iocb_req {
903#define OB_MAC_TSO_IOCB_V 0x04 894#define OB_MAC_TSO_IOCB_V 0x04
904 __le32 reserved1[2]; 895 __le32 reserved1[2];
905 __le32 frame_len; 896 __le32 frame_len;
906 __le32 tid; 897 u32 tid;
907 __le32 txq_idx; 898 u32 txq_idx;
908 __le16 total_hdrs_len; 899 __le16 total_hdrs_len;
909 __le16 net_trans_offset; 900 __le16 net_trans_offset;
910#define OB_MAC_TRANSPORT_HDR_SHIFT 6 901#define OB_MAC_TRANSPORT_HDR_SHIFT 6
@@ -925,8 +916,8 @@ struct ob_mac_tso_iocb_rsp {
925 u8 flags2; /* */ 916 u8 flags2; /* */
926 u8 flags3; /* */ 917 u8 flags3; /* */
927#define OB_MAC_TSO_IOCB_RSP_B 0x8000 918#define OB_MAC_TSO_IOCB_RSP_B 0x8000
928 __le32 tid; 919 u32 tid;
929 __le32 txq_idx; 920 u32 txq_idx;
930 __le32 reserved2[13]; 921 __le32 reserved2[13];
931} __attribute((packed)); 922} __attribute((packed));
932 923
@@ -979,10 +970,11 @@ struct ib_mac_iocb_rsp {
979 970
980 __le16 reserved1; 971 __le16 reserved1;
981 __le32 reserved2[6]; 972 __le32 reserved2[6];
982 __le32 flags4; 973 u8 reserved3[3];
983#define IB_MAC_IOCB_RSP_HV 0x20000000 /* */ 974 u8 flags4;
984#define IB_MAC_IOCB_RSP_HS 0x40000000 /* */ 975#define IB_MAC_IOCB_RSP_HV 0x20
985#define IB_MAC_IOCB_RSP_HL 0x80000000 /* */ 976#define IB_MAC_IOCB_RSP_HS 0x40
977#define IB_MAC_IOCB_RSP_HL 0x80
986 __le32 hdr_len; /* */ 978 __le32 hdr_len; /* */
987 __le32 hdr_addr_lo; /* */ 979 __le32 hdr_addr_lo; /* */
988 __le32 hdr_addr_hi; /* */ 980 __le32 hdr_addr_hi; /* */
@@ -1126,7 +1118,7 @@ struct map_list {
1126struct tx_ring_desc { 1118struct tx_ring_desc {
1127 struct sk_buff *skb; 1119 struct sk_buff *skb;
1128 struct ob_mac_iocb_req *queue_entry; 1120 struct ob_mac_iocb_req *queue_entry;
1129 int index; 1121 u32 index;
1130 struct oal oal; 1122 struct oal oal;
1131 struct map_list map[MAX_SKB_FRAGS + 1]; 1123 struct map_list map[MAX_SKB_FRAGS + 1];
1132 int map_cnt; 1124 int map_cnt;
@@ -1138,8 +1130,8 @@ struct bq_desc {
1138 struct page *lbq_page; 1130 struct page *lbq_page;
1139 struct sk_buff *skb; 1131 struct sk_buff *skb;
1140 } p; 1132 } p;
1141 struct bq_element *bq; 1133 __le64 *addr;
1142 int index; 1134 u32 index;
1143 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1135 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1144 DECLARE_PCI_UNMAP_LEN(maplen); 1136 DECLARE_PCI_UNMAP_LEN(maplen);
1145}; 1137};
@@ -1189,7 +1181,7 @@ struct rx_ring {
1189 u32 cq_size; 1181 u32 cq_size;
1190 u32 cq_len; 1182 u32 cq_len;
1191 u16 cq_id; 1183 u16 cq_id;
1192 u32 *prod_idx_sh_reg; /* Shadowed producer register. */ 1184 volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
1193 dma_addr_t prod_idx_sh_reg_dma; 1185 dma_addr_t prod_idx_sh_reg_dma;
1194 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ 1186 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1195 u32 cnsmr_idx; /* current sw idx */ 1187 u32 cnsmr_idx; /* current sw idx */
@@ -1467,21 +1459,6 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
1467 mmiowb(); 1459 mmiowb();
1468} 1460}
1469 1461
1470/*
1471 * Shadow Registers:
1472 * Outbound queues have a consumer index that is maintained by the chip.
1473 * Inbound queues have a producer index that is maintained by the chip.
1474 * For lower overhead, these registers are "shadowed" to host memory
1475 * which allows the device driver to track the queue progress without
1476 * PCI reads. When an entry is placed on an inbound queue, the chip will
1477 * update the relevant index register and then copy the value to the
1478 * shadow register in host memory.
1479 */
1480static inline unsigned int ql_read_sh_reg(const volatile void *addr)
1481{
1482 return *(volatile unsigned int __force *)addr;
1483}
1484
1485extern char qlge_driver_name[]; 1462extern char qlge_driver_name[];
1486extern const char qlge_driver_version[]; 1463extern const char qlge_driver_version[];
1487extern const struct ethtool_ops qlge_ethtool_ops; 1464extern const struct ethtool_ops qlge_ethtool_ops;
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 47df304a02c8..3f5e02d2e4a9 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -821,14 +821,11 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
821 le16_to_cpu(ib_mac_rsp->vlan_id)); 821 le16_to_cpu(ib_mac_rsp->vlan_id));
822 822
823 printk(KERN_ERR PFX "flags4 = %s%s%s.\n", 823 printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
824 le32_to_cpu(ib_mac_rsp-> 824 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
825 flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "", 825 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
826 le32_to_cpu(ib_mac_rsp-> 826 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
827 flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "", 827
828 le32_to_cpu(ib_mac_rsp-> 828 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
829 flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
830
831 if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
832 printk(KERN_ERR PFX "hdr length = %d.\n", 829 printk(KERN_ERR PFX "hdr length = %d.\n",
833 le32_to_cpu(ib_mac_rsp->hdr_len)); 830 le32_to_cpu(ib_mac_rsp->hdr_len));
834 printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n", 831 printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index eefb81b13758..9d922e2ff226 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -56,9 +56,9 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
56 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) { 56 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
57 rx_ring = &qdev->rx_ring[i]; 57 rx_ring = &qdev->rx_ring[i];
58 cqicb = (struct cqicb *)rx_ring; 58 cqicb = (struct cqicb *)rx_ring;
59 cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs); 59 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
60 cqicb->pkt_delay = 60 cqicb->pkt_delay =
61 le16_to_cpu(qdev->tx_max_coalesced_frames); 61 cpu_to_le16(qdev->tx_max_coalesced_frames);
62 cqicb->flags = FLAGS_LI; 62 cqicb->flags = FLAGS_LI;
63 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), 63 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
64 CFG_LCQ, rx_ring->cq_id); 64 CFG_LCQ, rx_ring->cq_id);
@@ -79,9 +79,9 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
79 i++) { 79 i++) {
80 rx_ring = &qdev->rx_ring[i]; 80 rx_ring = &qdev->rx_ring[i];
81 cqicb = (struct cqicb *)rx_ring; 81 cqicb = (struct cqicb *)rx_ring;
82 cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs); 82 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
83 cqicb->pkt_delay = 83 cqicb->pkt_delay =
84 le16_to_cpu(qdev->rx_max_coalesced_frames); 84 cpu_to_le16(qdev->rx_max_coalesced_frames);
85 cqicb->flags = FLAGS_LI; 85 cqicb->flags = FLAGS_LI;
86 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), 86 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
87 CFG_LCQ, rx_ring->cq_id); 87 CFG_LCQ, rx_ring->cq_id);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 718a7bd0cd1a..f4c016012f18 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -257,7 +257,7 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
257 { 257 {
258 status = 258 status =
259 ql_wait_reg_rdy(qdev, 259 ql_wait_reg_rdy(qdev,
260 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 260 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
261 if (status) 261 if (status)
262 goto exit; 262 goto exit;
263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ 263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -265,13 +265,13 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ 265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
266 status = 266 status =
267 ql_wait_reg_rdy(qdev, 267 ql_wait_reg_rdy(qdev,
268 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); 268 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
269 if (status) 269 if (status)
270 goto exit; 270 goto exit;
271 *value++ = ql_read32(qdev, MAC_ADDR_DATA); 271 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
272 status = 272 status =
273 ql_wait_reg_rdy(qdev, 273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 if (status) 275 if (status)
276 goto exit; 276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ 277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -279,14 +279,14 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ 279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status = 280 status =
281 ql_wait_reg_rdy(qdev, 281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); 282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 if (status) 283 if (status)
284 goto exit; 284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA); 285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 if (type == MAC_ADDR_TYPE_CAM_MAC) { 286 if (type == MAC_ADDR_TYPE_CAM_MAC) {
287 status = 287 status =
288 ql_wait_reg_rdy(qdev, 288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 if (status) 290 if (status)
291 goto exit; 291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ 292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -294,7 +294,7 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ 294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status = 295 status =
296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, 296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 MAC_ADDR_MR, MAC_ADDR_E); 297 MAC_ADDR_MR, 0);
298 if (status) 298 if (status)
299 goto exit; 299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA); 300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
@@ -344,7 +344,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
344 344
345 status = 345 status =
346 ql_wait_reg_rdy(qdev, 346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status) 348 if (status)
349 goto exit; 349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ 350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -353,7 +353,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
353 ql_write32(qdev, MAC_ADDR_DATA, lower); 353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status = 354 status =
355 ql_wait_reg_rdy(qdev, 355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status) 357 if (status)
358 goto exit; 358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ 359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -362,7 +362,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
362 ql_write32(qdev, MAC_ADDR_DATA, upper); 362 ql_write32(qdev, MAC_ADDR_DATA, upper);
363 status = 363 status =
364 ql_wait_reg_rdy(qdev, 364 ql_wait_reg_rdy(qdev,
365 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 365 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 if (status) 366 if (status)
367 goto exit; 367 goto exit;
368 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ 368 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
@@ -400,7 +400,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
400 400
401 status = 401 status =
402 ql_wait_reg_rdy(qdev, 402 ql_wait_reg_rdy(qdev,
403 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); 403 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
404 if (status) 404 if (status)
405 goto exit; 405 goto exit;
406 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ 406 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
@@ -431,13 +431,13 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
431 if (status) 431 if (status)
432 goto exit; 432 goto exit;
433 433
434 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E); 434 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
435 if (status) 435 if (status)
436 goto exit; 436 goto exit;
437 437
438 ql_write32(qdev, RT_IDX, 438 ql_write32(qdev, RT_IDX,
439 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); 439 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
440 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E); 440 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
441 if (status) 441 if (status)
442 goto exit; 442 goto exit;
443 *value = ql_read32(qdev, RT_DATA); 443 *value = ql_read32(qdev, RT_DATA);
@@ -874,7 +874,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
874{ 874{
875 int clean_idx = rx_ring->lbq_clean_idx; 875 int clean_idx = rx_ring->lbq_clean_idx;
876 struct bq_desc *lbq_desc; 876 struct bq_desc *lbq_desc;
877 struct bq_element *bq;
878 u64 map; 877 u64 map;
879 int i; 878 int i;
880 879
@@ -884,7 +883,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
884 "lbq: try cleaning clean_idx = %d.\n", 883 "lbq: try cleaning clean_idx = %d.\n",
885 clean_idx); 884 clean_idx);
886 lbq_desc = &rx_ring->lbq[clean_idx]; 885 lbq_desc = &rx_ring->lbq[clean_idx];
887 bq = lbq_desc->bq;
888 if (lbq_desc->p.lbq_page == NULL) { 886 if (lbq_desc->p.lbq_page == NULL) {
889 QPRINTK(qdev, RX_STATUS, DEBUG, 887 QPRINTK(qdev, RX_STATUS, DEBUG,
890 "lbq: getting new page for index %d.\n", 888 "lbq: getting new page for index %d.\n",
@@ -906,10 +904,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
906 } 904 }
907 pci_unmap_addr_set(lbq_desc, mapaddr, map); 905 pci_unmap_addr_set(lbq_desc, mapaddr, map);
908 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 906 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
909 bq->addr_lo = /*lbq_desc->addr_lo = */ 907 *lbq_desc->addr = cpu_to_le64(map);
910 cpu_to_le32(map);
911 bq->addr_hi = /*lbq_desc->addr_hi = */
912 cpu_to_le32(map >> 32);
913 } 908 }
914 clean_idx++; 909 clean_idx++;
915 if (clean_idx == rx_ring->lbq_len) 910 if (clean_idx == rx_ring->lbq_len)
@@ -934,7 +929,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
934{ 929{
935 int clean_idx = rx_ring->sbq_clean_idx; 930 int clean_idx = rx_ring->sbq_clean_idx;
936 struct bq_desc *sbq_desc; 931 struct bq_desc *sbq_desc;
937 struct bq_element *bq;
938 u64 map; 932 u64 map;
939 int i; 933 int i;
940 934
@@ -944,7 +938,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
944 QPRINTK(qdev, RX_STATUS, DEBUG, 938 QPRINTK(qdev, RX_STATUS, DEBUG,
945 "sbq: try cleaning clean_idx = %d.\n", 939 "sbq: try cleaning clean_idx = %d.\n",
946 clean_idx); 940 clean_idx);
947 bq = sbq_desc->bq;
948 if (sbq_desc->p.skb == NULL) { 941 if (sbq_desc->p.skb == NULL) {
949 QPRINTK(qdev, RX_STATUS, DEBUG, 942 QPRINTK(qdev, RX_STATUS, DEBUG,
950 "sbq: getting new skb for index %d.\n", 943 "sbq: getting new skb for index %d.\n",
@@ -963,11 +956,15 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
963 sbq_desc->p.skb->data, 956 sbq_desc->p.skb->data,
964 rx_ring->sbq_buf_size / 957 rx_ring->sbq_buf_size /
965 2, PCI_DMA_FROMDEVICE); 958 2, PCI_DMA_FROMDEVICE);
959 if (pci_dma_mapping_error(qdev->pdev, map)) {
960 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
961 rx_ring->sbq_clean_idx = clean_idx;
962 return;
963 }
966 pci_unmap_addr_set(sbq_desc, mapaddr, map); 964 pci_unmap_addr_set(sbq_desc, mapaddr, map);
967 pci_unmap_len_set(sbq_desc, maplen, 965 pci_unmap_len_set(sbq_desc, maplen,
968 rx_ring->sbq_buf_size / 2); 966 rx_ring->sbq_buf_size / 2);
969 bq->addr_lo = cpu_to_le32(map); 967 *sbq_desc->addr = cpu_to_le64(map);
970 bq->addr_hi = cpu_to_le32(map >> 32);
971 } 968 }
972 969
973 clean_idx++; 970 clean_idx++;
@@ -1303,6 +1300,11 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1303 "No skb available, drop the packet.\n"); 1300 "No skb available, drop the packet.\n");
1304 return NULL; 1301 return NULL;
1305 } 1302 }
1303 pci_unmap_page(qdev->pdev,
1304 pci_unmap_addr(lbq_desc,
1305 mapaddr),
1306 pci_unmap_len(lbq_desc, maplen),
1307 PCI_DMA_FROMDEVICE);
1306 skb_reserve(skb, NET_IP_ALIGN); 1308 skb_reserve(skb, NET_IP_ALIGN);
1307 QPRINTK(qdev, RX_STATUS, DEBUG, 1309 QPRINTK(qdev, RX_STATUS, DEBUG,
1308 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); 1310 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
@@ -1330,7 +1332,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1330 * eventually be in trouble. 1332 * eventually be in trouble.
1331 */ 1333 */
1332 int size, offset, i = 0; 1334 int size, offset, i = 0;
1333 struct bq_element *bq, bq_array[8]; 1335 __le64 *bq, bq_array[8];
1334 sbq_desc = ql_get_curr_sbuf(rx_ring); 1336 sbq_desc = ql_get_curr_sbuf(rx_ring);
1335 pci_unmap_single(qdev->pdev, 1337 pci_unmap_single(qdev->pdev,
1336 pci_unmap_addr(sbq_desc, mapaddr), 1338 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1356,16 +1358,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1356 } else { 1358 } else {
1357 QPRINTK(qdev, RX_STATUS, DEBUG, 1359 QPRINTK(qdev, RX_STATUS, DEBUG,
1358 "Headers in small, %d bytes of data in chain of large.\n", length); 1360 "Headers in small, %d bytes of data in chain of large.\n", length);
1359 bq = (struct bq_element *)sbq_desc->p.skb->data; 1361 bq = (__le64 *)sbq_desc->p.skb->data;
1360 } 1362 }
1361 while (length > 0) { 1363 while (length > 0) {
1362 lbq_desc = ql_get_curr_lbuf(rx_ring); 1364 lbq_desc = ql_get_curr_lbuf(rx_ring);
1363 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1364 QPRINTK(qdev, RX_STATUS, ERR,
1365 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1366 lbq_desc->bq->addr_lo, bq->addr_lo);
1367 return NULL;
1368 }
1369 pci_unmap_page(qdev->pdev, 1365 pci_unmap_page(qdev->pdev,
1370 pci_unmap_addr(lbq_desc, 1366 pci_unmap_addr(lbq_desc,
1371 mapaddr), 1367 mapaddr),
@@ -1549,7 +1545,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1549static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) 1545static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1550{ 1546{
1551 struct ql_adapter *qdev = rx_ring->qdev; 1547 struct ql_adapter *qdev = rx_ring->qdev;
1552 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1548 u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
1553 struct ob_mac_iocb_rsp *net_rsp = NULL; 1549 struct ob_mac_iocb_rsp *net_rsp = NULL;
1554 int count = 0; 1550 int count = 0;
1555 1551
@@ -1575,7 +1571,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1575 } 1571 }
1576 count++; 1572 count++;
1577 ql_update_cq(rx_ring); 1573 ql_update_cq(rx_ring);
1578 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1574 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
1579 } 1575 }
1580 ql_write_cq_idx(rx_ring); 1576 ql_write_cq_idx(rx_ring);
1581 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { 1577 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
@@ -1595,7 +1591,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1595static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) 1591static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1596{ 1592{
1597 struct ql_adapter *qdev = rx_ring->qdev; 1593 struct ql_adapter *qdev = rx_ring->qdev;
1598 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1594 u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
1599 struct ql_net_rsp_iocb *net_rsp; 1595 struct ql_net_rsp_iocb *net_rsp;
1600 int count = 0; 1596 int count = 0;
1601 1597
@@ -1628,7 +1624,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1628 } 1624 }
1629 count++; 1625 count++;
1630 ql_update_cq(rx_ring); 1626 ql_update_cq(rx_ring);
1631 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1627 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
1632 if (count == budget) 1628 if (count == budget)
1633 break; 1629 break;
1634 } 1630 }
@@ -1791,7 +1787,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1791 * Check the default queue and wake handler if active. 1787 * Check the default queue and wake handler if active.
1792 */ 1788 */
1793 rx_ring = &qdev->rx_ring[0]; 1789 rx_ring = &qdev->rx_ring[0];
1794 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { 1790 if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1795 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); 1791 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1796 ql_disable_completion_interrupt(qdev, intr_context->intr); 1792 ql_disable_completion_interrupt(qdev, intr_context->intr);
1797 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, 1793 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
@@ -1805,7 +1801,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1805 */ 1801 */
1806 for (i = 1; i < qdev->rx_ring_count; i++) { 1802 for (i = 1; i < qdev->rx_ring_count; i++) {
1807 rx_ring = &qdev->rx_ring[i]; 1803 rx_ring = &qdev->rx_ring[i];
1808 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != 1804 if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) !=
1809 rx_ring->cnsmr_idx) { 1805 rx_ring->cnsmr_idx) {
1810 QPRINTK(qdev, INTR, INFO, 1806 QPRINTK(qdev, INTR, INFO,
1811 "Waking handler for rx_ring[%d].\n", i); 1807 "Waking handler for rx_ring[%d].\n", i);
@@ -1874,7 +1870,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
1874{ 1870{
1875 int len; 1871 int len;
1876 struct iphdr *iph = ip_hdr(skb); 1872 struct iphdr *iph = ip_hdr(skb);
1877 u16 *check; 1873 __sum16 *check;
1878 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; 1874 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1879 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); 1875 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1880 mac_iocb_ptr->net_trans_offset = 1876 mac_iocb_ptr->net_trans_offset =
@@ -2083,8 +2079,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2083 put_page(lbq_desc->p.lbq_page); 2079 put_page(lbq_desc->p.lbq_page);
2084 lbq_desc->p.lbq_page = NULL; 2080 lbq_desc->p.lbq_page = NULL;
2085 } 2081 }
2086 lbq_desc->bq->addr_lo = 0;
2087 lbq_desc->bq->addr_hi = 0;
2088 } 2082 }
2089} 2083}
2090 2084
@@ -2097,12 +2091,12 @@ static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2097 int i; 2091 int i;
2098 struct bq_desc *lbq_desc; 2092 struct bq_desc *lbq_desc;
2099 u64 map; 2093 u64 map;
2100 struct bq_element *bq = rx_ring->lbq_base; 2094 __le64 *bq = rx_ring->lbq_base;
2101 2095
2102 for (i = 0; i < rx_ring->lbq_len; i++) { 2096 for (i = 0; i < rx_ring->lbq_len; i++) {
2103 lbq_desc = &rx_ring->lbq[i]; 2097 lbq_desc = &rx_ring->lbq[i];
2104 memset(lbq_desc, 0, sizeof(lbq_desc)); 2098 memset(lbq_desc, 0, sizeof(lbq_desc));
2105 lbq_desc->bq = bq; 2099 lbq_desc->addr = bq;
2106 lbq_desc->index = i; 2100 lbq_desc->index = i;
2107 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); 2101 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2108 if (unlikely(!lbq_desc->p.lbq_page)) { 2102 if (unlikely(!lbq_desc->p.lbq_page)) {
@@ -2119,8 +2113,7 @@ static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2119 } 2113 }
2120 pci_unmap_addr_set(lbq_desc, mapaddr, map); 2114 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2121 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 2115 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2122 bq->addr_lo = cpu_to_le32(map); 2116 *lbq_desc->addr = cpu_to_le64(map);
2123 bq->addr_hi = cpu_to_le32(map >> 32);
2124 } 2117 }
2125 bq++; 2118 bq++;
2126 } 2119 }
@@ -2149,13 +2142,6 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2149 dev_kfree_skb(sbq_desc->p.skb); 2142 dev_kfree_skb(sbq_desc->p.skb);
2150 sbq_desc->p.skb = NULL; 2143 sbq_desc->p.skb = NULL;
2151 } 2144 }
2152 if (sbq_desc->bq == NULL) {
2153 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2154 i);
2155 return;
2156 }
2157 sbq_desc->bq->addr_lo = 0;
2158 sbq_desc->bq->addr_hi = 0;
2159 } 2145 }
2160} 2146}
2161 2147
@@ -2167,13 +2153,13 @@ static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2167 struct bq_desc *sbq_desc; 2153 struct bq_desc *sbq_desc;
2168 struct sk_buff *skb; 2154 struct sk_buff *skb;
2169 u64 map; 2155 u64 map;
2170 struct bq_element *bq = rx_ring->sbq_base; 2156 __le64 *bq = rx_ring->sbq_base;
2171 2157
2172 for (i = 0; i < rx_ring->sbq_len; i++) { 2158 for (i = 0; i < rx_ring->sbq_len; i++) {
2173 sbq_desc = &rx_ring->sbq[i]; 2159 sbq_desc = &rx_ring->sbq[i];
2174 memset(sbq_desc, 0, sizeof(sbq_desc)); 2160 memset(sbq_desc, 0, sizeof(sbq_desc));
2175 sbq_desc->index = i; 2161 sbq_desc->index = i;
2176 sbq_desc->bq = bq; 2162 sbq_desc->addr = bq;
2177 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); 2163 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2178 if (unlikely(!skb)) { 2164 if (unlikely(!skb)) {
2179 /* Better luck next round */ 2165 /* Better luck next round */
@@ -2199,10 +2185,7 @@ static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2199 } 2185 }
2200 pci_unmap_addr_set(sbq_desc, mapaddr, map); 2186 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2201 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); 2187 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2202 bq->addr_lo = /*sbq_desc->addr_lo = */ 2188 *sbq_desc->addr = cpu_to_le64(map);
2203 cpu_to_le32(map);
2204 bq->addr_hi = /*sbq_desc->addr_hi = */
2205 cpu_to_le32(map >> 32);
2206 bq++; 2189 bq++;
2207 } 2190 }
2208 return 0; 2191 return 0;
@@ -2481,7 +2464,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2481 memset((void *)cqicb, 0, sizeof(struct cqicb)); 2464 memset((void *)cqicb, 0, sizeof(struct cqicb));
2482 cqicb->msix_vect = rx_ring->irq; 2465 cqicb->msix_vect = rx_ring->irq;
2483 2466
2484 cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT); 2467 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2468 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2485 2469
2486 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); 2470 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2487 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32); 2471 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
@@ -2503,8 +2487,11 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2503 cpu_to_le32(rx_ring->lbq_base_indirect_dma); 2487 cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2504 cqicb->lbq_addr_hi = 2488 cqicb->lbq_addr_hi =
2505 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32); 2489 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2506 cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size); 2490 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2507 bq_len = (u16) rx_ring->lbq_len; 2491 (u16) rx_ring->lbq_buf_size;
2492 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2493 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2494 (u16) rx_ring->lbq_len;
2508 cqicb->lbq_len = cpu_to_le16(bq_len); 2495 cqicb->lbq_len = cpu_to_le16(bq_len);
2509 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16; 2496 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2510 rx_ring->lbq_curr_idx = 0; 2497 rx_ring->lbq_curr_idx = 0;
@@ -2520,7 +2507,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2520 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32); 2507 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2521 cqicb->sbq_buf_size = 2508 cqicb->sbq_buf_size =
2522 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); 2509 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2523 bq_len = (u16) rx_ring->sbq_len; 2510 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2511 (u16) rx_ring->sbq_len;
2524 cqicb->sbq_len = cpu_to_le16(bq_len); 2512 cqicb->sbq_len = cpu_to_le16(bq_len);
2525 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16; 2513 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2526 rx_ring->sbq_curr_idx = 0; 2514 rx_ring->sbq_curr_idx = 0;
@@ -3341,11 +3329,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3341 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3329 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3342 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3330 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3343 rx_ring->lbq_size = 3331 rx_ring->lbq_size =
3344 rx_ring->lbq_len * sizeof(struct bq_element); 3332 rx_ring->lbq_len * sizeof(__le64);
3345 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3333 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3346 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3334 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3347 rx_ring->sbq_size = 3335 rx_ring->sbq_size =
3348 rx_ring->sbq_len * sizeof(struct bq_element); 3336 rx_ring->sbq_len * sizeof(__le64);
3349 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3337 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3350 rx_ring->type = DEFAULT_Q; 3338 rx_ring->type = DEFAULT_Q;
3351 } else if (i < qdev->rss_ring_first_cq_id) { 3339 } else if (i < qdev->rss_ring_first_cq_id) {
@@ -3372,11 +3360,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3372 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3360 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3373 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3361 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3374 rx_ring->lbq_size = 3362 rx_ring->lbq_size =
3375 rx_ring->lbq_len * sizeof(struct bq_element); 3363 rx_ring->lbq_len * sizeof(__le64);
3376 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3364 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3377 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3365 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3378 rx_ring->sbq_size = 3366 rx_ring->sbq_size =
3379 rx_ring->sbq_len * sizeof(struct bq_element); 3367 rx_ring->sbq_len * sizeof(__le64);
3380 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3368 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3381 rx_ring->type = RX_Q; 3369 rx_ring->type = RX_Q;
3382 } 3370 }