aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorShreyas Bhatewara <sbhatewara@vmware.com>2009-11-16 08:41:33 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-17 07:08:50 -0500
commit115924b6bdc7cc6bf7da5b933b09281e1f4e17a9 (patch)
tree8d883a13b6dc2b29caa77a6e178e921e4843db1c /drivers
parent649300b9278dc9fc9c7dfaaa3719ead70882e726 (diff)
net: Getting rid of the x86 dependency to built vmxnet3
This patch removes config dependency on x86 to build vmxnet3 driver. Thus the driver can be built on big endian architectures now. Although vmxnet3 is not supported on VMs other than x86 architecture, all this code goes in to ensure correctness. If the code is not dependent on x86, it should not assume little endian architecture in any of its operations. Signed-off-by: Shreyas Bhatewara <sbhatewara@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h246
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c357
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h12
5 files changed, 439 insertions, 188 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e012c2e0825a..6399abbdad6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3235,7 +3235,7 @@ config VIRTIO_NET
3235 3235
3236config VMXNET3 3236config VMXNET3
3237 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3238 depends on PCI && X86 && INET 3238 depends on PCI && INET
3239 help 3239 help
3240 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3241 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index dc8ee4438a4f..b4889e6c4a57 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -90,23 +90,60 @@ enum {
90 VMXNET3_CMD_GET_CONF_INTR 90 VMXNET3_CMD_GET_CONF_INTR
91}; 91};
92 92
93struct Vmxnet3_TxDesc { 93/*
94 u64 addr; 94 * Little Endian layout of bitfields -
95 * Byte 0 : 7.....len.....0
96 * Byte 1 : rsvd gen 13.len.8
97 * Byte 2 : 5.msscof.0 ext1 dtype
98 * Byte 3 : 13...msscof...6
99 *
100 * Big Endian layout of bitfields -
101 * Byte 0: 13...msscof...6
102 * Byte 1 : 5.msscof.0 ext1 dtype
103 * Byte 2 : rsvd gen 13.len.8
104 * Byte 3 : 7.....len.....0
105 *
106 * Thus, le32_to_cpu on the dword will allow the big endian driver to read
107 * the bit fields correctly. And cpu_to_le32 will convert bitfields
108 * bit fields written by big endian driver to format required by device.
109 */
95 110
96 u32 len:14; 111struct Vmxnet3_TxDesc {
97 u32 gen:1; /* generation bit */ 112 __le64 addr;
98 u32 rsvd:1; 113
99 u32 dtype:1; /* descriptor type */ 114#ifdef __BIG_ENDIAN_BITFIELD
100 u32 ext1:1; 115 u32 msscof:14; /* MSS, checksum offset, flags */
101 u32 msscof:14; /* MSS, checksum offset, flags */ 116 u32 ext1:1;
102 117 u32 dtype:1; /* descriptor type */
103 u32 hlen:10; /* header len */ 118 u32 rsvd:1;
104 u32 om:2; /* offload mode */ 119 u32 gen:1; /* generation bit */
105 u32 eop:1; /* End Of Packet */ 120 u32 len:14;
106 u32 cq:1; /* completion request */ 121#else
107 u32 ext2:1; 122 u32 len:14;
108 u32 ti:1; /* VLAN Tag Insertion */ 123 u32 gen:1; /* generation bit */
109 u32 tci:16; /* Tag to Insert */ 124 u32 rsvd:1;
125 u32 dtype:1; /* descriptor type */
126 u32 ext1:1;
127 u32 msscof:14; /* MSS, checksum offset, flags */
128#endif /* __BIG_ENDIAN_BITFIELD */
129
130#ifdef __BIG_ENDIAN_BITFIELD
131 u32 tci:16; /* Tag to Insert */
132 u32 ti:1; /* VLAN Tag Insertion */
133 u32 ext2:1;
134 u32 cq:1; /* completion request */
135 u32 eop:1; /* End Of Packet */
136 u32 om:2; /* offload mode */
137 u32 hlen:10; /* header len */
138#else
139 u32 hlen:10; /* header len */
140 u32 om:2; /* offload mode */
141 u32 eop:1; /* End Of Packet */
142 u32 cq:1; /* completion request */
143 u32 ext2:1;
144 u32 ti:1; /* VLAN Tag Insertion */
145 u32 tci:16; /* Tag to Insert */
146#endif /* __BIG_ENDIAN_BITFIELD */
110}; 147};
111 148
112/* TxDesc.OM values */ 149/* TxDesc.OM values */
@@ -118,6 +155,8 @@ struct Vmxnet3_TxDesc {
118#define VMXNET3_TXD_EOP_SHIFT 12 155#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13 156#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14 157#define VMXNET3_TXD_GEN_SHIFT 14
158#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
159#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
121 160
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) 161#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) 162#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
@@ -130,29 +169,40 @@ struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE]; 169 u8 data[VMXNET3_HDR_COPY_SIZE];
131}; 170};
132 171
172#define VMXNET3_TCD_GEN_SHIFT 31
173#define VMXNET3_TCD_GEN_SIZE 1
174#define VMXNET3_TCD_TXIDX_SHIFT 0
175#define VMXNET3_TCD_TXIDX_SIZE 12
176#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
133 177
134struct Vmxnet3_TxCompDesc { 178struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */ 179 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20; 180 u32 ext1:20;
137 181
138 u32 ext2; 182 __le32 ext2;
139 u32 ext3; 183 __le32 ext3;
140 184
141 u32 rsvd:24; 185 u32 rsvd:24;
142 u32 type:7; /* completion type */ 186 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */ 187 u32 gen:1; /* generation bit */
144}; 188};
145 189
146
147struct Vmxnet3_RxDesc { 190struct Vmxnet3_RxDesc {
148 u64 addr; 191 __le64 addr;
149 192
193#ifdef __BIG_ENDIAN_BITFIELD
194 u32 gen:1; /* Generation bit */
195 u32 rsvd:15;
196 u32 dtype:1; /* Descriptor type */
197 u32 btype:1; /* Buffer Type */
198 u32 len:14;
199#else
150 u32 len:14; 200 u32 len:14;
151 u32 btype:1; /* Buffer Type */ 201 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */ 202 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15; 203 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */ 204 u32 gen:1; /* Generation bit */
155 205#endif
156 u32 ext1; 206 u32 ext1;
157}; 207};
158 208
@@ -164,8 +214,17 @@ struct Vmxnet3_RxDesc {
164#define VMXNET3_RXD_BTYPE_SHIFT 14 214#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31 215#define VMXNET3_RXD_GEN_SHIFT 31
166 216
167
168struct Vmxnet3_RxCompDesc { 217struct Vmxnet3_RxCompDesc {
218#ifdef __BIG_ENDIAN_BITFIELD
219 u32 ext2:1;
220 u32 cnc:1; /* Checksum Not Calculated */
221 u32 rssType:4; /* RSS hash type used */
222 u32 rqID:10; /* rx queue/ring ID */
223 u32 sop:1; /* Start of Packet */
224 u32 eop:1; /* End of Packet */
225 u32 ext1:2;
226 u32 rxdIdx:12; /* Index of the RxDesc */
227#else
169 u32 rxdIdx:12; /* Index of the RxDesc */ 228 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2; 229 u32 ext1:2;
171 u32 eop:1; /* End of Packet */ 230 u32 eop:1; /* End of Packet */
@@ -174,14 +233,36 @@ struct Vmxnet3_RxCompDesc {
174 u32 rssType:4; /* RSS hash type used */ 233 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */ 234 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1; 235 u32 ext2:1;
236#endif /* __BIG_ENDIAN_BITFIELD */
177 237
178 u32 rssHash; /* RSS hash value */ 238 __le32 rssHash; /* RSS hash value */
179 239
240#ifdef __BIG_ENDIAN_BITFIELD
241 u32 tci:16; /* Tag stripped */
242 u32 ts:1; /* Tag is stripped */
243 u32 err:1; /* Error */
244 u32 len:14; /* data length */
245#else
180 u32 len:14; /* data length */ 246 u32 len:14; /* data length */
181 u32 err:1; /* Error */ 247 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */ 248 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */ 249 u32 tci:16; /* Tag stripped */
250#endif /* __BIG_ENDIAN_BITFIELD */
251
184 252
253#ifdef __BIG_ENDIAN_BITFIELD
254 u32 gen:1; /* generation bit */
255 u32 type:7; /* completion type */
256 u32 fcs:1; /* Frame CRC correct */
257 u32 frg:1; /* IP Fragment */
258 u32 v4:1; /* IPv4 */
259 u32 v6:1; /* IPv6 */
260 u32 ipc:1; /* IP Checksum Correct */
261 u32 tcp:1; /* TCP packet */
262 u32 udp:1; /* UDP packet */
263 u32 tuc:1; /* TCP/UDP Checksum Correct */
264 u32 csum:16;
265#else
185 u32 csum:16; 266 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */ 267 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */ 268 u32 udp:1; /* UDP packet */
@@ -193,6 +274,7 @@ struct Vmxnet3_RxCompDesc {
193 u32 fcs:1; /* Frame CRC correct */ 274 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */ 275 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */ 276 u32 gen:1; /* generation bit */
277#endif /* __BIG_ENDIAN_BITFIELD */
196}; 278};
197 279
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ 280/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
@@ -206,6 +288,8 @@ struct Vmxnet3_RxCompDesc {
206/* csum OK for TCP/UDP pkts over IP */ 288/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ 289#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT) 290 1 << VMXNET3_RCD_IPC_SHIFT)
291#define VMXNET3_TXD_GEN_SIZE 1
292#define VMXNET3_TXD_EOP_SIZE 1
209 293
210/* value of RxCompDesc.rssType */ 294/* value of RxCompDesc.rssType */
211enum { 295enum {
@@ -219,9 +303,9 @@ enum {
219 303
220/* a union for accessing all cmd/completion descriptors */ 304/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc { 305union Vmxnet3_GenericDesc {
222 u64 qword[2]; 306 __le64 qword[2];
223 u32 dword[4]; 307 __le32 dword[4];
224 u16 word[8]; 308 __le16 word[8];
225 struct Vmxnet3_TxDesc txd; 309 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd; 310 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd; 311 struct Vmxnet3_TxCompDesc tcd;
@@ -287,18 +371,24 @@ enum {
287 371
288 372
289struct Vmxnet3_GOSInfo { 373struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */ 374#ifdef __BIG_ENDIAN_BITFIELD
291 u32 gosType:4; /* which guest */ 375 u32 gosMisc:10; /* other info about gos */
292 u32 gosVer:16; /* gos version */ 376 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */ 377 u32 gosType:4; /* which guest */
378 u32 gosBits:2; /* 32-bit or 64-bit? */
379#else
380 u32 gosBits:2; /* 32-bit or 64-bit? */
381 u32 gosType:4; /* which guest */
382 u32 gosVer:16; /* gos version */
383 u32 gosMisc:10; /* other info about gos */
384#endif /* __BIG_ENDIAN_BITFIELD */
294}; 385};
295 386
296
297struct Vmxnet3_DriverInfo { 387struct Vmxnet3_DriverInfo {
298 u32 version; 388 __le32 version;
299 struct Vmxnet3_GOSInfo gos; 389 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt; 390 __le32 vmxnet3RevSpt;
301 u32 uptVerSpt; 391 __le32 uptVerSpt;
302}; 392};
303 393
304 394
@@ -315,42 +405,42 @@ struct Vmxnet3_DriverInfo {
315 405
316struct Vmxnet3_MiscConf { 406struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo; 407 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures; 408 __le64 uptFeatures;
319 u64 ddPA; /* driver data PA */ 409 __le64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */ 410 __le64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */ 411 __le32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */ 412 __le32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu; 413 __le32 mtu;
324 u16 maxNumRxSG; 414 __le16 maxNumRxSG;
325 u8 numTxQueues; 415 u8 numTxQueues;
326 u8 numRxQueues; 416 u8 numRxQueues;
327 u32 reserved[4]; 417 __le32 reserved[4];
328}; 418};
329 419
330 420
331struct Vmxnet3_TxQueueConf { 421struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA; 422 __le64 txRingBasePA;
333 u64 dataRingBasePA; 423 __le64 dataRingBasePA;
334 u64 compRingBasePA; 424 __le64 compRingBasePA;
335 u64 ddPA; /* driver data */ 425 __le64 ddPA; /* driver data */
336 u64 reserved; 426 __le64 reserved;
337 u32 txRingSize; /* # of tx desc */ 427 __le32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */ 428 __le32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */ 429 __le32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */ 430 __le32 ddLen; /* size of driver data */
341 u8 intrIdx; 431 u8 intrIdx;
342 u8 _pad[7]; 432 u8 _pad[7];
343}; 433};
344 434
345 435
346struct Vmxnet3_RxQueueConf { 436struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2]; 437 __le64 rxRingBasePA[2];
348 u64 compRingBasePA; 438 __le64 compRingBasePA;
349 u64 ddPA; /* driver data */ 439 __le64 ddPA; /* driver data */
350 u64 reserved; 440 __le64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */ 441 __le32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */ 442 __le32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */ 443 __le32 ddLen; /* size of driver data */
354 u8 intrIdx; 444 u8 intrIdx;
355 u8 _pad[7]; 445 u8 _pad[7];
356}; 446};
@@ -381,7 +471,7 @@ struct Vmxnet3_IntrConf {
381 u8 eventIntrIdx; 471 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for 472 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */ 473 * each intr */
384 u32 reserved[3]; 474 __le32 reserved[3];
385}; 475};
386 476
387/* one bit per VLAN ID, the size is in the units of u32 */ 477/* one bit per VLAN ID, the size is in the units of u32 */
@@ -391,21 +481,21 @@ struct Vmxnet3_IntrConf {
391struct Vmxnet3_QueueStatus { 481struct Vmxnet3_QueueStatus {
392 bool stopped; 482 bool stopped;
393 u8 _pad[3]; 483 u8 _pad[3];
394 u32 error; 484 __le32 error;
395}; 485};
396 486
397 487
398struct Vmxnet3_TxQueueCtrl { 488struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred; 489 __le32 txNumDeferred;
400 u32 txThreshold; 490 __le32 txThreshold;
401 u64 reserved; 491 __le64 reserved;
402}; 492};
403 493
404 494
405struct Vmxnet3_RxQueueCtrl { 495struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd; 496 bool updateRxProd;
407 u8 _pad[7]; 497 u8 _pad[7];
408 u64 reserved; 498 __le64 reserved;
409}; 499};
410 500
411enum { 501enum {
@@ -417,11 +507,11 @@ enum {
417}; 507};
418 508
419struct Vmxnet3_RxFilterConf { 509struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */ 510 __le32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */ 511 __le16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1; 512 __le16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */ 513 __le64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ 514 __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425}; 515};
426 516
427 517
@@ -444,7 +534,7 @@ struct Vmxnet3_PM_PktFilter {
444 534
445 535
446struct Vmxnet3_PMConf { 536struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ 537 __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters; 538 u8 numFilters;
449 u8 pad[5]; 539 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; 540 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
@@ -452,9 +542,9 @@ struct Vmxnet3_PMConf {
452 542
453 543
454struct Vmxnet3_VariableLenConfDesc { 544struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer; 545 __le32 confVer;
456 u32 confLen; 546 __le32 confLen;
457 u64 confPA; 547 __le64 confPA;
458}; 548};
459 549
460 550
@@ -491,12 +581,12 @@ struct Vmxnet3_DSDevRead {
491 581
492/* All structures in DriverShared are padded to multiples of 8 bytes */ 582/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared { 583struct Vmxnet3_DriverShared {
494 u32 magic; 584 __le32 magic;
495 /* make devRead start at 64bit boundaries */ 585 /* make devRead start at 64bit boundaries */
496 u32 pad; 586 __le32 pad;
497 struct Vmxnet3_DSDevRead devRead; 587 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr; 588 __le32 ecr;
499 u32 reserved[5]; 589 __le32 reserved[5];
500}; 590};
501 591
502 592
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af0..8f24fe5822f4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -29,7 +29,6 @@
29char vmxnet3_driver_name[] = "vmxnet3"; 29char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31 31
32
33/* 32/*
34 * PCI Device ID Table 33 * PCI Device ID Table
35 * Last entry must be all 0s 34 * Last entry must be all 0s
@@ -151,11 +150,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
151 } 150 }
152} 151}
153 152
154
155static void 153static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 154vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{ 155{
158 u32 events = adapter->shared->ecr; 156 u32 events = le32_to_cpu(adapter->shared->ecr);
159 if (!events) 157 if (!events)
160 return; 158 return;
161 159
@@ -173,7 +171,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 if (adapter->tqd_start->status.stopped) { 171 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n", 172 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name, 173 adapter->netdev->name,
176 adapter->tqd_start->status.error); 174 le32_to_cpu(adapter->tqd_start->status.error));
177 } 175 }
178 if (adapter->rqd_start->status.stopped) { 176 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n", 177 printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +183,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
185 } 183 }
186} 184}
187 185
186#ifdef __BIG_ENDIAN_BITFIELD
187/*
188 * The device expects the bitfields in shared structures to be written in
189 * little endian. When CPU is big endian, the following routines are used to
190 * correctly read and write into ABI.
191 * The general technique used here is : double word bitfields are defined in
192 * opposite order for big endian architecture. Then before reading them in
193 * driver the complete double word is translated using le32_to_cpu. Similarly
194 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
195 * double words into required format.
196 * In order to avoid touching bits in shared structure more than once, temporary
197 * descriptors are used. These are passed as srcDesc to following functions.
198 */
199static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
200 struct Vmxnet3_RxDesc *dstDesc)
201{
202 u32 *src = (u32 *)srcDesc + 2;
203 u32 *dst = (u32 *)dstDesc + 2;
204 dstDesc->addr = le64_to_cpu(srcDesc->addr);
205 *dst = le32_to_cpu(*src);
206 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
207}
208
209static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
210 struct Vmxnet3_TxDesc *dstDesc)
211{
212 int i;
213 u32 *src = (u32 *)(srcDesc + 1);
214 u32 *dst = (u32 *)(dstDesc + 1);
215
216 /* Working backwards so that the gen bit is set at the end. */
217 for (i = 2; i > 0; i--) {
218 src--;
219 dst--;
220 *dst = cpu_to_le32(*src);
221 }
222}
223
224
225static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
226 struct Vmxnet3_RxCompDesc *dstDesc)
227{
228 int i = 0;
229 u32 *src = (u32 *)srcDesc;
230 u32 *dst = (u32 *)dstDesc;
231 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
232 *dst = le32_to_cpu(*src);
233 src++;
234 dst++;
235 }
236}
237
238
239/* Used to read bitfield values from double words. */
240static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
241{
242 u32 temp = le32_to_cpu(*bitfield);
243 u32 mask = ((1 << size) - 1) << pos;
244 temp &= mask;
245 temp >>= pos;
246 return temp;
247}
248
249
250
251#endif /* __BIG_ENDIAN_BITFIELD */
252
253#ifdef __BIG_ENDIAN_BITFIELD
254
255# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
256 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
257 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
258# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
259 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
260 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
261# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
262 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
263 VMXNET3_TCD_GEN_SIZE)
264# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
265 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
266# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
267 (dstrcd) = (tmp); \
268 vmxnet3_RxCompToCPU((rcd), (tmp)); \
269 } while (0)
270# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
271 (dstrxd) = (tmp); \
272 vmxnet3_RxDescToCPU((rxd), (tmp)); \
273 } while (0)
274
275#else
276
277# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
278# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
279# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
280# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
281# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
282# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
283
284#endif /* __BIG_ENDIAN_BITFIELD */
285
188 286
189static void 287static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 288vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +310,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
212 310
213 /* no out of order completion */ 311 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 312 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 313 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
216 314
217 skb = tq->buf_info[eop_idx].skb; 315 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL); 316 BUG_ON(skb == NULL);
@@ -246,9 +344,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
246 union Vmxnet3_GenericDesc *gdesc; 344 union Vmxnet3_GenericDesc *gdesc;
247 345
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 346 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 347 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 348 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
251 adapter->pdev, adapter); 349 &gdesc->tcd), tq, adapter->pdev,
350 adapter);
252 351
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 352 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 353 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +571,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
472 } 571 }
473 572
474 BUG_ON(rbi->dma_addr == 0); 573 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr; 574 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 575 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
477 rbi->len; 576 | val | rbi->len);
478 577
479 num_allocated++; 578 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring); 579 vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +630,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
531 630
532 /* no need to map the buffer if headers are copied */ 631 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) { 632 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 633 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
535 tq->tx_ring.next2fill * 634 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc); 635 sizeof(struct Vmxnet3_TxDataDesc));
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 636 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
538 ctx->sop_txd->dword[3] = 0; 637 ctx->sop_txd->dword[3] = 0;
539 638
540 tbi = tq->buf_info + tq->tx_ring.next2fill; 639 tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +641,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
542 641
543 dev_dbg(&adapter->netdev->dev, 642 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 643 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 644 tq->tx_ring.next2fill,
645 le64_to_cpu(ctx->sop_txd->txd.addr),
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 646 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 647 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548 648
@@ -570,14 +670,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 670 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 671 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572 672
573 gdesc->txd.addr = tbi->dma_addr; 673 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
574 gdesc->dword[2] = dw2 | buf_size; 674 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
575 gdesc->dword[3] = 0; 675 gdesc->dword[3] = 0;
576 676
577 dev_dbg(&adapter->netdev->dev, 677 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 678 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr, 679 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
580 gdesc->dword[2], gdesc->dword[3]); 680 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 681 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 682 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583 683
@@ -599,14 +699,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 699 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 700 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601 701
602 gdesc->txd.addr = tbi->dma_addr; 702 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
603 gdesc->dword[2] = dw2 | frag->size; 703 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
604 gdesc->dword[3] = 0; 704 gdesc->dword[3] = 0;
605 705
606 dev_dbg(&adapter->netdev->dev, 706 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n", 707 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr, 708 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
609 gdesc->dword[2], gdesc->dword[3]); 709 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 710 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 711 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 } 712 }
@@ -751,6 +851,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
751 unsigned long flags; 851 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx; 852 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc; 853 union Vmxnet3_GenericDesc *gdesc;
854#ifdef __BIG_ENDIAN_BITFIELD
855 /* Use temporary descriptor to avoid touching bits multiple times */
856 union Vmxnet3_GenericDesc tempTxDesc;
857#endif
754 858
755 /* conservatively estimate # of descriptors to use */ 859 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 860 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +931,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 931 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828 932
829 /* setup the EOP desc */ 933 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 934 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
831 935
832 /* setup the SOP desc */ 936 /* setup the SOP desc */
937#ifdef __BIG_ENDIAN_BITFIELD
938 gdesc = &tempTxDesc;
939 gdesc->dword[2] = ctx.sop_txd->dword[2];
940 gdesc->dword[3] = ctx.sop_txd->dword[3];
941#else
833 gdesc = ctx.sop_txd; 942 gdesc = ctx.sop_txd;
943#endif
834 if (ctx.mss) { 944 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 945 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO; 946 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss; 947 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 948 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
839 ctx.mss - 1) / ctx.mss; 949 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
840 } else { 950 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) { 951 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 952 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +957,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
847 gdesc->txd.om = 0; 957 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0; 958 gdesc->txd.msscof = 0;
849 } 959 }
850 tq->shared->txNumDeferred++; 960 le32_add_cpu(&tq->shared->txNumDeferred, 1);
851 } 961 }
852 962
853 if (vlan_tx_tag_present(skb)) { 963 if (vlan_tx_tag_present(skb)) {
@@ -855,19 +965,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
855 gdesc->txd.tci = vlan_tx_tag_get(skb); 965 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 } 966 }
857 967
858 wmb(); 968 /* finally flips the GEN bit of the SOP desc. */
859 969 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
860 /* finally flips the GEN bit of the SOP desc */ 970 VMXNET3_TXD_GEN);
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 971#ifdef __BIG_ENDIAN_BITFIELD
972 /* Finished updating in bitfields of Tx Desc, so write them in original
973 * place.
974 */
975 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
976 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
977 gdesc = ctx.sop_txd;
978#endif
862 dev_dbg(&adapter->netdev->dev, 979 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 980 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 981 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 982 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
866 gdesc->dword[3]); 983 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
867 984
868 spin_unlock_irqrestore(&tq->tx_lock, flags); 985 spin_unlock_irqrestore(&tq->tx_lock, flags);
869 986
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 987 if (le32_to_cpu(tq->shared->txNumDeferred) >=
988 le32_to_cpu(tq->shared->txThreshold)) {
871 tq->shared->txNumDeferred = 0; 989 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 990 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill); 991 tq->tx_ring.next2fill);
@@ -889,9 +1007,8 @@ static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1007vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{ 1008{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1009 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893 1010
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 1011 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
895} 1012}
896 1013
897 1014
@@ -902,7 +1019,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
902{ 1019{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1020 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */ 1021 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 1022 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) { 1023 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY; 1024 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1025 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1074,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
957 u32 num_rxd = 0; 1074 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd; 1075 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1076 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960 1077#ifdef __BIG_ENDIAN_BITFIELD
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1078 struct Vmxnet3_RxDesc rxCmdDesc;
1079 struct Vmxnet3_RxCompDesc rxComp;
1080#endif
1081 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1082 &rxComp);
962 while (rcd->gen == rq->comp_ring.gen) { 1083 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi; 1084 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb; 1085 struct sk_buff *skb;
@@ -976,11 +1097,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
976 1097
977 idx = rcd->rxdIdx; 1098 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1099 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979 1100 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 1101 &rxCmdDesc);
981 rbi = rq->buf_info[ring_idx] + idx; 1102 rbi = rq->buf_info[ring_idx] + idx;
982 1103
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 1104 BUG_ON(rxd->addr != rbi->dma_addr ||
1105 rxd->len != rbi->len);
984 1106
985 if (unlikely(rcd->eop && rcd->err)) { 1107 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1108 vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1200,8 @@ rcd_done:
1078 } 1200 }
1079 1201
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1202 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1203 vmxnet3_getRxComp(rcd,
1204 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1082 } 1205 }
1083 1206
1084 return num_rxd; 1207 return num_rxd;
@@ -1094,7 +1217,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1094 1217
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1218 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1219 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1220#ifdef __BIG_ENDIAN_BITFIELD
1221 struct Vmxnet3_RxDesc rxDesc;
1222#endif
1223 vmxnet3_getRxDesc(rxd,
1224 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1098 1225
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1226 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) { 1227 rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1473,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1346 err = request_irq(adapter->intr.msix_entries[0].vector, 1473 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name, 1474 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev); 1475 adapter->netdev);
1349 } else 1476 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1477 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev); 1478 adapter->netdev->name, adapter->netdev);
1354 } else { 1479 } else
1480#endif
1481 {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1482 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name, 1483 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev); 1484 adapter->netdev);
@@ -1412,6 +1539,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1412} 1539}
1413 1540
1414 1541
1542inline void set_flag_le16(__le16 *data, u16 flag)
1543{
1544 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1545}
1546
1547inline void set_flag_le64(__le64 *data, u64 flag)
1548{
1549 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1550}
1551
1552inline void reset_flag_le64(__le64 *data, u64 flag)
1553{
1554 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1555}
1556
1557
1415static void 1558static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1559vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{ 1560{
@@ -1427,7 +1570,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1427 adapter->vlan_grp = grp; 1570 adapter->vlan_grp = grp;
1428 1571
1429 /* update FEATURES to device */ 1572 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1573 set_flag_le64(&devRead->misc.uptFeatures,
1574 UPT1_F_RXVLAN);
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1575 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE); 1576 VMXNET3_CMD_UPDATE_FEATURE);
1433 /* 1577 /*
@@ -1450,7 +1594,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1594 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL; 1595 adapter->vlan_grp = NULL;
1452 1596
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1597 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1454 int i; 1598 int i;
1455 1599
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1600 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1607,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1607 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464 1608
1465 /* update FEATURES to device */ 1609 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1610 reset_flag_le64(&devRead->misc.uptFeatures,
1611 UPT1_F_RXVLAN);
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1612 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE); 1613 VMXNET3_CMD_UPDATE_FEATURE);
1469 } 1614 }
@@ -1565,9 +1710,10 @@ vmxnet3_set_mc(struct net_device *netdev)
1565 new_table = vmxnet3_copy_mc(netdev); 1710 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) { 1711 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST; 1712 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count * 1713 rxConf->mfTableLen = cpu_to_le16(
1569 ETH_ALEN; 1714 netdev->mc_count * ETH_ALEN);
1570 rxConf->mfTablePA = virt_to_phys(new_table); 1715 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1716 new_table));
1571 } else { 1717 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list" 1718 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name); 1719 ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1728,7 @@ vmxnet3_set_mc(struct net_device *netdev)
1582 } 1728 }
1583 1729
1584 if (new_mode != rxConf->rxMode) { 1730 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode; 1731 rxConf->rxMode = cpu_to_le32(new_mode);
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1732 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE); 1733 VMXNET3_CMD_UPDATE_RX_MODE);
1588 } 1734 }
@@ -1610,63 +1756,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1610 memset(shared, 0, sizeof(*shared)); 1756 memset(shared, 0, sizeof(*shared));
1611 1757
1612 /* driver settings */ 1758 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC; 1759 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1760 devRead->misc.driverInfo.version = cpu_to_le32(
1761 VMXNET3_DRIVER_VERSION_NUM);
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1762 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1763 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1764 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1765 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1619 devRead->misc.driverInfo.uptVerSpt = 1; 1766 *((u32 *)&devRead->misc.driverInfo.gos));
1767 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1768 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1620 1769
1621 devRead->misc.ddPA = virt_to_phys(adapter); 1770 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1771 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1623 1772
1624 /* set up feature flags */ 1773 /* set up feature flags */
1625 if (adapter->rxcsum) 1774 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1775 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1627 1776
1628 if (adapter->lro) { 1777 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO; 1778 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1779 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1631 } 1780 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1781 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) { 1782 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1783 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1635 } 1784 }
1636 1785
1637 devRead->misc.mtu = adapter->netdev->mtu; 1786 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1787 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1788 devRead->misc.queueDescLen = cpu_to_le32(
1640 sizeof(struct Vmxnet3_RxQueueDesc); 1789 sizeof(struct Vmxnet3_TxQueueDesc) +
1790 sizeof(struct Vmxnet3_RxQueueDesc));
1641 1791
1642 /* tx queue settings */ 1792 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1793 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644 1794
1645 devRead->misc.numTxQueues = 1; 1795 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf; 1796 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1797 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1798 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1799 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1800 tqc->ddPA = cpu_to_le64(virt_to_phys(
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1801 adapter->tx_queue.buf_info));
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1802 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1803 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1804 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1655 tqc->txRingSize; 1805 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1806 tqc->txRingSize);
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1807 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657 1808
1658 /* rx queue settings */ 1809 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1; 1810 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf; 1811 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1812 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1813 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1814 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1815 rqc->ddPA = cpu_to_le64(virt_to_phys(
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1816 adapter->rx_queue.buf_info));
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1817 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1818 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1819 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1820 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1821 (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1822 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671 1823
1672 /* intr settings */ 1824 /* intr settings */
@@ -1715,11 +1867,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1715 1867
1716 vmxnet3_setup_driver_shared(adapter); 1868 vmxnet3_setup_driver_shared(adapter);
1717 1869
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1870 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1871 adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1873 adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV); 1875 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1876 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2576,7 @@ vmxnet3_suspend(struct device *device)
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2576 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2577 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427 2578
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2579 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2429 i++; 2580 i++;
2430 } 2581 }
2431 2582
@@ -2467,19 +2618,21 @@ vmxnet3_suspend(struct device *device)
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2618 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev); 2619 in_dev_put(in_dev);
2469 2620
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2621 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2471 i++; 2622 i++;
2472 } 2623 }
2473 2624
2474skip_arp: 2625skip_arp:
2475 if (adapter->wol & WAKE_MAGIC) 2626 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2627 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2477 2628
2478 pmConf->numFilters = i; 2629 pmConf->numFilters = i;
2479 2630
2480 adapter->shared->devRead.pmConfDesc.confVer = 1; 2631 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2632 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2633 *pmConf));
2634 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2635 pmConf));
2483 2636
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2637 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG); 2638 VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2663,11 @@ vmxnet3_resume(struct device *device)
2510 pmConf = adapter->pm_conf; 2663 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf)); 2664 memset(pmConf, 0, sizeof(*pmConf));
2512 2665
2513 adapter->shared->devRead.pmConfDesc.confVer = 1; 2666 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2667 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2668 *pmConf));
2669 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2670 pmConf));
2516 2671
2517 netif_device_attach(netdev); 2672 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0); 2673 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c2c15e4cafc7..3935c4493fb7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,11 +50,13 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |= 53 set_flag_le64(
54 UPT1_F_RXCSUM; 54 &adapter->shared->devRead.misc.uptFeatures,
55 UPT1_F_RXCSUM);
55 else 56 else
56 adapter->shared->devRead.misc.uptFeatures &= 57 reset_flag_le64(
57 ~UPT1_F_RXCSUM; 58 &adapter->shared->devRead.misc.uptFeatures,
59 UPT1_F_RXCSUM);
58 60
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 445081686d5d..34f392f46fb1 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
330}; 330};
331 331
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
333 writel((val), (adapter)->hw_addr0 + (reg)) 333 writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
334#define VMXNET3_READ_BAR0_REG(adapter, reg) \ 334#define VMXNET3_READ_BAR0_REG(adapter, reg) \
335 readl((adapter)->hw_addr0 + (reg)) 335 le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
336 336
337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr1 + (reg)) 338 writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
339#define VMXNET3_READ_BAR1_REG(adapter, reg) \ 339#define VMXNET3_READ_BAR1_REG(adapter, reg) \
340 readl((adapter)->hw_addr1 + (reg)) 340 le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
341 341
342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,6 +353,10 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356void set_flag_le16(__le16 *data, u16 flag);
357void set_flag_le64(__le64 *data, u64 flag);
358void reset_flag_le64(__le64 *data, u64 flag);
359
356int 360int
357vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 361vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
358 362