aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3/vmxnet3_drv.c
diff options
context:
space:
mode:
authorShreyas Bhatewara <sbhatewara@vmware.com>2009-11-16 08:41:33 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-17 07:08:50 -0500
commit115924b6bdc7cc6bf7da5b933b09281e1f4e17a9 (patch)
tree8d883a13b6dc2b29caa77a6e178e921e4843db1c /drivers/net/vmxnet3/vmxnet3_drv.c
parent649300b9278dc9fc9c7dfaaa3719ead70882e726 (diff)
net: Getting rid of the x86 dependency to built vmxnet3
This patch removes config dependency on x86 to build vmxnet3 driver. Thus the driver can be built on big endian architectures now. Although vmxnet3 is not supported on VMs other than x86 architecture, all this code goes in to ensure correctness. If the code is not dependent on x86, it should not assume little endian architecture in any of its operations. Signed-off-by: Shreyas Bhatewara <sbhatewara@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_drv.c')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c357
1 files changed, 256 insertions, 101 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af0..8f24fe5822f4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -29,7 +29,6 @@
29char vmxnet3_driver_name[] = "vmxnet3"; 29char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31 31
32
33/* 32/*
34 * PCI Device ID Table 33 * PCI Device ID Table
35 * Last entry must be all 0s 34 * Last entry must be all 0s
@@ -151,11 +150,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
151 } 150 }
152} 151}
153 152
154
155static void 153static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 154vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{ 155{
158 u32 events = adapter->shared->ecr; 156 u32 events = le32_to_cpu(adapter->shared->ecr);
159 if (!events) 157 if (!events)
160 return; 158 return;
161 159
@@ -173,7 +171,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 if (adapter->tqd_start->status.stopped) { 171 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n", 172 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name, 173 adapter->netdev->name,
176 adapter->tqd_start->status.error); 174 le32_to_cpu(adapter->tqd_start->status.error));
177 } 175 }
178 if (adapter->rqd_start->status.stopped) { 176 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n", 177 printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +183,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
185 } 183 }
186} 184}
187 185
186#ifdef __BIG_ENDIAN_BITFIELD
187/*
188 * The device expects the bitfields in shared structures to be written in
189 * little endian. When CPU is big endian, the following routines are used to
190 * correctly read and write into ABI.
191 * The general technique used here is : double word bitfields are defined in
192 * opposite order for big endian architecture. Then before reading them in
193 * driver the complete double word is translated using le32_to_cpu. Similarly
194 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
195 * double words into required format.
196 * In order to avoid touching bits in shared structure more than once, temporary
197 * descriptors are used. These are passed as srcDesc to following functions.
198 */
199static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
200 struct Vmxnet3_RxDesc *dstDesc)
201{
202 u32 *src = (u32 *)srcDesc + 2;
203 u32 *dst = (u32 *)dstDesc + 2;
204 dstDesc->addr = le64_to_cpu(srcDesc->addr);
205 *dst = le32_to_cpu(*src);
206 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
207}
208
209static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
210 struct Vmxnet3_TxDesc *dstDesc)
211{
212 int i;
213 u32 *src = (u32 *)(srcDesc + 1);
214 u32 *dst = (u32 *)(dstDesc + 1);
215
216 /* Working backwards so that the gen bit is set at the end. */
217 for (i = 2; i > 0; i--) {
218 src--;
219 dst--;
220 *dst = cpu_to_le32(*src);
221 }
222}
223
224
225static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
226 struct Vmxnet3_RxCompDesc *dstDesc)
227{
228 int i = 0;
229 u32 *src = (u32 *)srcDesc;
230 u32 *dst = (u32 *)dstDesc;
231 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
232 *dst = le32_to_cpu(*src);
233 src++;
234 dst++;
235 }
236}
237
238
239/* Used to read bitfield values from double words. */
240static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
241{
242 u32 temp = le32_to_cpu(*bitfield);
243 u32 mask = ((1 << size) - 1) << pos;
244 temp &= mask;
245 temp >>= pos;
246 return temp;
247}
248
249
250
251#endif /* __BIG_ENDIAN_BITFIELD */
252
253#ifdef __BIG_ENDIAN_BITFIELD
254
255# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
256 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
257 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
258# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
259 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
260 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
261# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
262 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
263 VMXNET3_TCD_GEN_SIZE)
264# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
265 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
266# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
267 (dstrcd) = (tmp); \
268 vmxnet3_RxCompToCPU((rcd), (tmp)); \
269 } while (0)
270# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
271 (dstrxd) = (tmp); \
272 vmxnet3_RxDescToCPU((rxd), (tmp)); \
273 } while (0)
274
275#else
276
277# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
278# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
279# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
280# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
281# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
282# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
283
284#endif /* __BIG_ENDIAN_BITFIELD */
285
188 286
189static void 287static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 288vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +310,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
212 310
213 /* no out of order completion */ 311 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 312 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 313 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
216 314
217 skb = tq->buf_info[eop_idx].skb; 315 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL); 316 BUG_ON(skb == NULL);
@@ -246,9 +344,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
246 union Vmxnet3_GenericDesc *gdesc; 344 union Vmxnet3_GenericDesc *gdesc;
247 345
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 346 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 347 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 348 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
251 adapter->pdev, adapter); 349 &gdesc->tcd), tq, adapter->pdev,
350 adapter);
252 351
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 352 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 353 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +571,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
472 } 571 }
473 572
474 BUG_ON(rbi->dma_addr == 0); 573 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr; 574 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 575 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
477 rbi->len; 576 | val | rbi->len);
478 577
479 num_allocated++; 578 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring); 579 vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +630,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
531 630
532 /* no need to map the buffer if headers are copied */ 631 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) { 632 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 633 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
535 tq->tx_ring.next2fill * 634 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc); 635 sizeof(struct Vmxnet3_TxDataDesc));
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 636 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
538 ctx->sop_txd->dword[3] = 0; 637 ctx->sop_txd->dword[3] = 0;
539 638
540 tbi = tq->buf_info + tq->tx_ring.next2fill; 639 tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +641,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
542 641
543 dev_dbg(&adapter->netdev->dev, 642 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 643 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 644 tq->tx_ring.next2fill,
645 le64_to_cpu(ctx->sop_txd->txd.addr),
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 646 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 647 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548 648
@@ -570,14 +670,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 670 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 671 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572 672
573 gdesc->txd.addr = tbi->dma_addr; 673 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
574 gdesc->dword[2] = dw2 | buf_size; 674 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
575 gdesc->dword[3] = 0; 675 gdesc->dword[3] = 0;
576 676
577 dev_dbg(&adapter->netdev->dev, 677 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 678 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr, 679 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
580 gdesc->dword[2], gdesc->dword[3]); 680 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 681 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 682 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583 683
@@ -599,14 +699,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 699 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 700 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601 701
602 gdesc->txd.addr = tbi->dma_addr; 702 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
603 gdesc->dword[2] = dw2 | frag->size; 703 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
604 gdesc->dword[3] = 0; 704 gdesc->dword[3] = 0;
605 705
606 dev_dbg(&adapter->netdev->dev, 706 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n", 707 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr, 708 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
609 gdesc->dword[2], gdesc->dword[3]); 709 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 710 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 711 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 } 712 }
@@ -751,6 +851,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
751 unsigned long flags; 851 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx; 852 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc; 853 union Vmxnet3_GenericDesc *gdesc;
854#ifdef __BIG_ENDIAN_BITFIELD
855 /* Use temporary descriptor to avoid touching bits multiple times */
856 union Vmxnet3_GenericDesc tempTxDesc;
857#endif
754 858
755 /* conservatively estimate # of descriptors to use */ 859 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 860 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +931,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 931 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828 932
829 /* setup the EOP desc */ 933 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 934 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
831 935
832 /* setup the SOP desc */ 936 /* setup the SOP desc */
937#ifdef __BIG_ENDIAN_BITFIELD
938 gdesc = &tempTxDesc;
939 gdesc->dword[2] = ctx.sop_txd->dword[2];
940 gdesc->dword[3] = ctx.sop_txd->dword[3];
941#else
833 gdesc = ctx.sop_txd; 942 gdesc = ctx.sop_txd;
943#endif
834 if (ctx.mss) { 944 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 945 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO; 946 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss; 947 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 948 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
839 ctx.mss - 1) / ctx.mss; 949 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
840 } else { 950 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) { 951 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 952 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +957,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
847 gdesc->txd.om = 0; 957 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0; 958 gdesc->txd.msscof = 0;
849 } 959 }
850 tq->shared->txNumDeferred++; 960 le32_add_cpu(&tq->shared->txNumDeferred, 1);
851 } 961 }
852 962
853 if (vlan_tx_tag_present(skb)) { 963 if (vlan_tx_tag_present(skb)) {
@@ -855,19 +965,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
855 gdesc->txd.tci = vlan_tx_tag_get(skb); 965 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 } 966 }
857 967
858 wmb(); 968 /* finally flips the GEN bit of the SOP desc. */
859 969 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
860 /* finally flips the GEN bit of the SOP desc */ 970 VMXNET3_TXD_GEN);
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 971#ifdef __BIG_ENDIAN_BITFIELD
972 /* Finished updating in bitfields of Tx Desc, so write them in original
973 * place.
974 */
975 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
976 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
977 gdesc = ctx.sop_txd;
978#endif
862 dev_dbg(&adapter->netdev->dev, 979 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 980 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 981 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 982 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
866 gdesc->dword[3]); 983 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
867 984
868 spin_unlock_irqrestore(&tq->tx_lock, flags); 985 spin_unlock_irqrestore(&tq->tx_lock, flags);
869 986
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 987 if (le32_to_cpu(tq->shared->txNumDeferred) >=
988 le32_to_cpu(tq->shared->txThreshold)) {
871 tq->shared->txNumDeferred = 0; 989 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 990 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill); 991 tq->tx_ring.next2fill);
@@ -889,9 +1007,8 @@ static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1007vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{ 1008{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1009 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893 1010
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 1011 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
895} 1012}
896 1013
897 1014
@@ -902,7 +1019,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
902{ 1019{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1020 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */ 1021 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 1022 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) { 1023 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY; 1024 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1025 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1074,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
957 u32 num_rxd = 0; 1074 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd; 1075 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1076 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960 1077#ifdef __BIG_ENDIAN_BITFIELD
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1078 struct Vmxnet3_RxDesc rxCmdDesc;
1079 struct Vmxnet3_RxCompDesc rxComp;
1080#endif
1081 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1082 &rxComp);
962 while (rcd->gen == rq->comp_ring.gen) { 1083 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi; 1084 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb; 1085 struct sk_buff *skb;
@@ -976,11 +1097,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
976 1097
977 idx = rcd->rxdIdx; 1098 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1099 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979 1100 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 1101 &rxCmdDesc);
981 rbi = rq->buf_info[ring_idx] + idx; 1102 rbi = rq->buf_info[ring_idx] + idx;
982 1103
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 1104 BUG_ON(rxd->addr != rbi->dma_addr ||
1105 rxd->len != rbi->len);
984 1106
985 if (unlikely(rcd->eop && rcd->err)) { 1107 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1108 vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1200,8 @@ rcd_done:
1078 } 1200 }
1079 1201
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1202 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1203 vmxnet3_getRxComp(rcd,
1204 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1082 } 1205 }
1083 1206
1084 return num_rxd; 1207 return num_rxd;
@@ -1094,7 +1217,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1094 1217
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1218 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1219 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1220#ifdef __BIG_ENDIAN_BITFIELD
1221 struct Vmxnet3_RxDesc rxDesc;
1222#endif
1223 vmxnet3_getRxDesc(rxd,
1224 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1098 1225
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1226 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) { 1227 rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1473,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1346 err = request_irq(adapter->intr.msix_entries[0].vector, 1473 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name, 1474 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev); 1475 adapter->netdev);
1349 } else 1476 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1477 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev); 1478 adapter->netdev->name, adapter->netdev);
1354 } else { 1479 } else
1480#endif
1481 {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1482 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name, 1483 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev); 1484 adapter->netdev);
@@ -1412,6 +1539,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1412} 1539}
1413 1540
1414 1541
1542inline void set_flag_le16(__le16 *data, u16 flag)
1543{
1544 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1545}
1546
1547inline void set_flag_le64(__le64 *data, u64 flag)
1548{
1549 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1550}
1551
1552inline void reset_flag_le64(__le64 *data, u64 flag)
1553{
1554 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1555}
1556
1557
1415static void 1558static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1559vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{ 1560{
@@ -1427,7 +1570,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1427 adapter->vlan_grp = grp; 1570 adapter->vlan_grp = grp;
1428 1571
1429 /* update FEATURES to device */ 1572 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1573 set_flag_le64(&devRead->misc.uptFeatures,
1574 UPT1_F_RXVLAN);
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1575 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE); 1576 VMXNET3_CMD_UPDATE_FEATURE);
1433 /* 1577 /*
@@ -1450,7 +1594,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1594 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL; 1595 adapter->vlan_grp = NULL;
1452 1596
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1597 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1454 int i; 1598 int i;
1455 1599
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1600 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1607,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1607 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464 1608
1465 /* update FEATURES to device */ 1609 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1610 reset_flag_le64(&devRead->misc.uptFeatures,
1611 UPT1_F_RXVLAN);
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1612 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE); 1613 VMXNET3_CMD_UPDATE_FEATURE);
1469 } 1614 }
@@ -1565,9 +1710,10 @@ vmxnet3_set_mc(struct net_device *netdev)
1565 new_table = vmxnet3_copy_mc(netdev); 1710 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) { 1711 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST; 1712 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count * 1713 rxConf->mfTableLen = cpu_to_le16(
1569 ETH_ALEN; 1714 netdev->mc_count * ETH_ALEN);
1570 rxConf->mfTablePA = virt_to_phys(new_table); 1715 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1716 new_table));
1571 } else { 1717 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list" 1718 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name); 1719 ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1728,7 @@ vmxnet3_set_mc(struct net_device *netdev)
1582 } 1728 }
1583 1729
1584 if (new_mode != rxConf->rxMode) { 1730 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode; 1731 rxConf->rxMode = cpu_to_le32(new_mode);
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1732 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE); 1733 VMXNET3_CMD_UPDATE_RX_MODE);
1588 } 1734 }
@@ -1610,63 +1756,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1610 memset(shared, 0, sizeof(*shared)); 1756 memset(shared, 0, sizeof(*shared));
1611 1757
1612 /* driver settings */ 1758 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC; 1759 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1760 devRead->misc.driverInfo.version = cpu_to_le32(
1761 VMXNET3_DRIVER_VERSION_NUM);
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1762 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1763 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1764 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1765 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1619 devRead->misc.driverInfo.uptVerSpt = 1; 1766 *((u32 *)&devRead->misc.driverInfo.gos));
1767 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1768 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1620 1769
1621 devRead->misc.ddPA = virt_to_phys(adapter); 1770 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1771 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1623 1772
1624 /* set up feature flags */ 1773 /* set up feature flags */
1625 if (adapter->rxcsum) 1774 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1775 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1627 1776
1628 if (adapter->lro) { 1777 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO; 1778 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1779 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1631 } 1780 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1781 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) { 1782 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1783 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1635 } 1784 }
1636 1785
1637 devRead->misc.mtu = adapter->netdev->mtu; 1786 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1787 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1788 devRead->misc.queueDescLen = cpu_to_le32(
1640 sizeof(struct Vmxnet3_RxQueueDesc); 1789 sizeof(struct Vmxnet3_TxQueueDesc) +
1790 sizeof(struct Vmxnet3_RxQueueDesc));
1641 1791
1642 /* tx queue settings */ 1792 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1793 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644 1794
1645 devRead->misc.numTxQueues = 1; 1795 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf; 1796 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1797 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1798 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1799 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1800 tqc->ddPA = cpu_to_le64(virt_to_phys(
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1801 adapter->tx_queue.buf_info));
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1802 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1803 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1804 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1655 tqc->txRingSize; 1805 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1806 tqc->txRingSize);
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1807 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657 1808
1658 /* rx queue settings */ 1809 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1; 1810 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf; 1811 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1812 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1813 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1814 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1815 rqc->ddPA = cpu_to_le64(virt_to_phys(
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1816 adapter->rx_queue.buf_info));
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1817 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1818 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1819 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1820 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1821 (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1822 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671 1823
1672 /* intr settings */ 1824 /* intr settings */
@@ -1715,11 +1867,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1715 1867
1716 vmxnet3_setup_driver_shared(adapter); 1868 vmxnet3_setup_driver_shared(adapter);
1717 1869
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1870 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1871 adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1873 adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV); 1875 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1876 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2576,7 @@ vmxnet3_suspend(struct device *device)
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2576 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2577 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427 2578
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2579 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2429 i++; 2580 i++;
2430 } 2581 }
2431 2582
@@ -2467,19 +2618,21 @@ vmxnet3_suspend(struct device *device)
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2618 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev); 2619 in_dev_put(in_dev);
2469 2620
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2621 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2471 i++; 2622 i++;
2472 } 2623 }
2473 2624
2474skip_arp: 2625skip_arp:
2475 if (adapter->wol & WAKE_MAGIC) 2626 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2627 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2477 2628
2478 pmConf->numFilters = i; 2629 pmConf->numFilters = i;
2479 2630
2480 adapter->shared->devRead.pmConfDesc.confVer = 1; 2631 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2632 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2633 *pmConf));
2634 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2635 pmConf));
2483 2636
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2637 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG); 2638 VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2663,11 @@ vmxnet3_resume(struct device *device)
2510 pmConf = adapter->pm_conf; 2663 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf)); 2664 memset(pmConf, 0, sizeof(*pmConf));
2512 2665
2513 adapter->shared->devRead.pmConfDesc.confVer = 1; 2666 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2667 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2668 *pmConf));
2669 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2670 pmConf));
2516 2671
2517 netif_device_attach(netdev); 2672 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0); 2673 pci_set_power_state(pdev, PCI_D0);