aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2008-03-16 18:22:24 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-26 00:18:44 -0400
commit2f220e305b23ab277aa0f91e2a65978f5cc1a785 (patch)
tree005cab70e1afb7d3f6f8acf57ebec13125325235 /drivers
parenteca1ad82bda0293339e1f8439dc9c8dba25ff088 (diff)
skfp annotations
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skfp/h/fplustm.h20
-rw-r--r--drivers/net/skfp/hwmtm.c86
-rw-r--r--drivers/net/skfp/skfddi.c4
4 files changed, 60 insertions, 62 deletions
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 76dc8adc9441..6028bbb3b28a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -401,18 +401,18 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
401/* int len ; length of the frame including the FC */ 401/* int len ; length of the frame including the FC */
402{ 402{
403 int i ; 403 int i ;
404 u_int *p ; 404 __le32 *p ;
405 405
406 CHECK_NPP() ; 406 CHECK_NPP() ;
407 MARW(off) ; /* set memory address reg for writes */ 407 MARW(off) ; /* set memory address reg for writes */
408 408
409 p = (u_int *) mac ; 409 p = (__le32 *) mac ;
410 for (i = (len + 3)/4 ; i ; i--) { 410 for (i = (len + 3)/4 ; i ; i--) {
411 if (i == 1) { 411 if (i == 1) {
412 /* last word, set the tag bit */ 412 /* last word, set the tag bit */
413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; 413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
414 } 414 }
415 write_mdr(smc,MDR_REVERSE(*p)) ; 415 write_mdr(smc,le32_to_cpu(*p)) ;
416 p++ ; 416 p++ ;
417 } 417 }
418 418
@@ -444,7 +444,7 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
444 */ 444 */
445static void directed_beacon(struct s_smc *smc) 445static void directed_beacon(struct s_smc *smc)
446{ 446{
447 SK_LOC_DECL(u_int,a[2]) ; 447 SK_LOC_DECL(__le32,a[2]) ;
448 448
449 /* 449 /*
450 * set UNA in frame 450 * set UNA in frame
@@ -458,9 +458,9 @@ static void directed_beacon(struct s_smc *smc)
458 CHECK_NPP() ; 458 CHECK_NPP() ;
459 /* set memory address reg for writes */ 459 /* set memory address reg for writes */
460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ; 460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
461 write_mdr(smc,MDR_REVERSE(a[0])) ; 461 write_mdr(smc,le32_to_cpu(a[0])) ;
462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */ 462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
463 write_mdr(smc,MDR_REVERSE(a[1])) ; 463 write_mdr(smc,le32_to_cpu(a[1])) ;
464 464
465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ; 465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
466} 466}
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/skfp/h/fplustm.h
index 98bbf654d12f..6d738e1e2393 100644
--- a/drivers/net/skfp/h/fplustm.h
+++ b/drivers/net/skfp/h/fplustm.h
@@ -50,12 +50,12 @@ struct err_st {
50 * Transmit Descriptor struct 50 * Transmit Descriptor struct
51 */ 51 */
52struct s_smt_fp_txd { 52struct s_smt_fp_txd {
53 u_int txd_tbctrl ; /* transmit buffer control */ 53 __le32 txd_tbctrl ; /* transmit buffer control */
54 u_int txd_txdscr ; /* transmit frame status word */ 54 __le32 txd_txdscr ; /* transmit frame status word */
55 u_int txd_tbadr ; /* physical tx buffer address */ 55 __le32 txd_tbadr ; /* physical tx buffer address */
56 u_int txd_ntdadr ; /* physical pointer to the next TxD */ 56 __le32 txd_ntdadr ; /* physical pointer to the next TxD */
57#ifdef ENA_64BIT_SUP 57#ifdef ENA_64BIT_SUP
58 u_int txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 58 __le32 txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/
59#endif 59#endif
60 char far *txd_virt ; /* virtual pointer to the data frag */ 60 char far *txd_virt ; /* virtual pointer to the data frag */
61 /* virt pointer to the next TxD */ 61 /* virt pointer to the next TxD */
@@ -67,12 +67,12 @@ struct s_smt_fp_txd {
67 * Receive Descriptor struct 67 * Receive Descriptor struct
68 */ 68 */
69struct s_smt_fp_rxd { 69struct s_smt_fp_rxd {
70 u_int rxd_rbctrl ; /* receive buffer control */ 70 __le32 rxd_rbctrl ; /* receive buffer control */
71 u_int rxd_rfsw ; /* receive frame status word */ 71 __le32 rxd_rfsw ; /* receive frame status word */
72 u_int rxd_rbadr ; /* physical rx buffer address */ 72 __le32 rxd_rbadr ; /* physical rx buffer address */
73 u_int rxd_nrdadr ; /* physical pointer to the next RxD */ 73 __le32 rxd_nrdadr ; /* physical pointer to the next RxD */
74#ifdef ENA_64BIT_SUP 74#ifdef ENA_64BIT_SUP
75 u_int rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 75 __le32 rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/
76#endif 76#endif
77 char far *rxd_virt ; /* virtual pointer to the data frag */ 77 char far *rxd_virt ; /* virtual pointer to the data frag */
78 /* virt pointer to the next RxD */ 78 /* virt pointer to the next RxD */
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index 46e339315656..4218e97033c9 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -208,7 +208,7 @@ SMbuf* smt_get_mbuf(struct s_smc *smc);
208#if defined(NDIS_OS2) || defined(ODI2) 208#if defined(NDIS_OS2) || defined(ODI2)
209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) 209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
210#else 210#else
211#define CR_READ(var) (u_long)(var) 211#define CR_READ(var) (__le32)(var)
212#endif 212#endif
213 213
214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ 214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
@@ -343,16 +343,16 @@ static u_long init_descr_ring(struct s_smc *smc,
343 for (i=count-1, d1=start; i ; i--) { 343 for (i=count-1, d1=start; i ; i--) {
344 d2 = d1 ; 344 d2 = d1 ;
345 d1++ ; /* descr is owned by the host */ 345 d1++ ; /* descr is owned by the host */
346 d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 346 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
347 d2->r.rxd_next = &d1->r ; 347 d2->r.rxd_next = &d1->r ;
348 phys = mac_drv_virt2phys(smc,(void *)d1) ; 348 phys = mac_drv_virt2phys(smc,(void *)d1) ;
349 d2->r.rxd_nrdadr = AIX_REVERSE(phys) ; 349 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
350 } 350 }
351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; 351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
352 d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 352 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
353 d1->r.rxd_next = &start->r ; 353 d1->r.rxd_next = &start->r ;
354 phys = mac_drv_virt2phys(smc,(void *)start) ; 354 phys = mac_drv_virt2phys(smc,(void *)start) ;
355 d1->r.rxd_nrdadr = AIX_REVERSE(phys) ; 355 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
356 356
357 for (i=count, d1=start; i ; i--) { 357 for (i=count, d1=start; i ; i--) {
358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
@@ -376,7 +376,7 @@ static void init_txd_ring(struct s_smc *smc)
376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; 376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
378 HWM_ASYNC_TXD_COUNT) ; 378 HWM_ASYNC_TXD_COUNT) ;
379 phys = AIX_REVERSE(ds->txd_ntdadr) ; 379 phys = le32_to_cpu(ds->txd_ntdadr) ;
380 ds++ ; 380 ds++ ;
381 queue->tx_curr_put = queue->tx_curr_get = ds ; 381 queue->tx_curr_put = queue->tx_curr_get = ds ;
382 ds-- ; 382 ds-- ;
@@ -390,7 +390,7 @@ static void init_txd_ring(struct s_smc *smc)
390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; 390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
392 HWM_SYNC_TXD_COUNT) ; 392 HWM_SYNC_TXD_COUNT) ;
393 phys = AIX_REVERSE(ds->txd_ntdadr) ; 393 phys = le32_to_cpu(ds->txd_ntdadr) ;
394 ds++ ; 394 ds++ ;
395 queue->tx_curr_put = queue->tx_curr_get = ds ; 395 queue->tx_curr_put = queue->tx_curr_get = ds ;
396 queue->tx_free = HWM_SYNC_TXD_COUNT ; 396 queue->tx_free = HWM_SYNC_TXD_COUNT ;
@@ -412,7 +412,7 @@ static void init_rxd_ring(struct s_smc *smc)
412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; 412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
414 SMT_R1_RXD_COUNT) ; 414 SMT_R1_RXD_COUNT) ;
415 phys = AIX_REVERSE(ds->rxd_nrdadr) ; 415 phys = le32_to_cpu(ds->rxd_nrdadr) ;
416 ds++ ; 416 ds++ ;
417 queue->rx_curr_put = queue->rx_curr_get = ds ; 417 queue->rx_curr_put = queue->rx_curr_get = ds ;
418 queue->rx_free = SMT_R1_RXD_COUNT ; 418 queue->rx_free = SMT_R1_RXD_COUNT ;
@@ -607,12 +607,12 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { 607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
608 t = t->txd_next ; 608 t = t->txd_next ;
609 } 609 }
610 phys = AIX_REVERSE(t->txd_ntdadr) ; 610 phys = le32_to_cpu(t->txd_ntdadr) ;
611 611
612 t = queue->tx_curr_get ; 612 t = queue->tx_curr_get ;
613 while (tx_used) { 613 while (tx_used) {
614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
615 tbctrl = AIX_REVERSE(t->txd_tbctrl) ; 615 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
616 616
617 if (tbctrl & BMU_OWN) { 617 if (tbctrl & BMU_OWN) {
618 if (tbctrl & BMU_STF) { 618 if (tbctrl & BMU_STF) {
@@ -622,10 +622,10 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
622 /* 622 /*
623 * repair the descriptor 623 * repair the descriptor
624 */ 624 */
625 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 625 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
626 } 626 }
627 } 627 }
628 phys = AIX_REVERSE(t->txd_ntdadr) ; 628 phys = le32_to_cpu(t->txd_ntdadr) ;
629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
630 t = t->txd_next ; 630 t = t->txd_next ;
631 tx_used-- ; 631 tx_used-- ;
@@ -659,12 +659,12 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { 659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
660 r = r->rxd_next ; 660 r = r->rxd_next ;
661 } 661 }
662 phys = AIX_REVERSE(r->rxd_nrdadr) ; 662 phys = le32_to_cpu(r->rxd_nrdadr) ;
663 663
664 r = queue->rx_curr_get ; 664 r = queue->rx_curr_get ;
665 while (rx_used) { 665 while (rx_used) {
666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
667 rbctrl = AIX_REVERSE(r->rxd_rbctrl) ; 667 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
668 668
669 if (rbctrl & BMU_OWN) { 669 if (rbctrl & BMU_OWN) {
670 if (rbctrl & BMU_STF) { 670 if (rbctrl & BMU_STF) {
@@ -674,10 +674,10 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
674 /* 674 /*
675 * repair the descriptor 675 * repair the descriptor
676 */ 676 */
677 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 677 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
678 } 678 }
679 } 679 }
680 phys = AIX_REVERSE(r->rxd_nrdadr) ; 680 phys = le32_to_cpu(r->rxd_nrdadr) ;
681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
682 r = r->rxd_next ; 682 r = r->rxd_next ;
683 rx_used-- ; 683 rx_used-- ;
@@ -1094,8 +1094,7 @@ void process_receive(struct s_smc *smc)
1094 do { 1094 do {
1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; 1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1097 rbctrl = CR_READ(r->rxd_rbctrl) ; 1097 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1098 rbctrl = AIX_REVERSE(rbctrl) ;
1099 1098
1100 if (rbctrl & BMU_OWN) { 1099 if (rbctrl & BMU_OWN) {
1101 NDD_TRACE("RHxE",r,rfsw,rbctrl) ; 1100 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
@@ -1118,7 +1117,7 @@ void process_receive(struct s_smc *smc)
1118 smc->os.hwm.detec_count = 0 ; 1117 smc->os.hwm.detec_count = 0 ;
1119 goto rx_end ; 1118 goto rx_end ;
1120 } 1119 }
1121 rfsw = AIX_REVERSE(r->rxd_rfsw) ; 1120 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1122 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { 1121 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1123 /* 1122 /*
1124 * The BMU_STF bit is deleted, 1 frame is 1123 * The BMU_STF bit is deleted, 1 frame is
@@ -1151,7 +1150,7 @@ void process_receive(struct s_smc *smc)
1151 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ 1150 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
1152 /* BMU_ST_BUF will not be changed by the ASIC */ 1151 /* BMU_ST_BUF will not be changed by the ASIC */
1153 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1152 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1154 while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1153 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1155 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1154 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1156 r = r->rxd_next ; 1155 r = r->rxd_next ;
1157 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1156 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1171,7 +1170,7 @@ void process_receive(struct s_smc *smc)
1171 /* 1170 /*
1172 * ASIC Errata no. 7 (STF - Bit Bug) 1171 * ASIC Errata no. 7 (STF - Bit Bug)
1173 */ 1172 */
1174 rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ; 1173 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1175 1174
1176 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ 1175 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1177 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; 1176 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
@@ -1287,7 +1286,7 @@ void process_receive(struct s_smc *smc)
1287 hwm_cpy_rxd2mb(rxd,data,len) ; 1286 hwm_cpy_rxd2mb(rxd,data,len) ;
1288#else 1287#else
1289 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ 1288 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1290 n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ; 1289 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1291 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; 1290 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
1292 memcpy(data,r->rxd_virt,n) ; 1291 memcpy(data,r->rxd_virt,n) ;
1293 data += n ; 1292 data += n ;
@@ -1426,14 +1425,14 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1426 int frame_status) 1425 int frame_status)
1427{ 1426{
1428 struct s_smt_fp_rxd volatile *r ; 1427 struct s_smt_fp_rxd volatile *r ;
1429 u_int rbctrl ; 1428 __le32 rbctrl;
1430 1429
1431 NDD_TRACE("RHfB",virt,len,frame_status) ; 1430 NDD_TRACE("RHfB",virt,len,frame_status) ;
1432 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; 1431 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
1433 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; 1432 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1434 r->rxd_virt = virt ; 1433 r->rxd_virt = virt ;
1435 r->rxd_rbadr = AIX_REVERSE(phys) ; 1434 r->rxd_rbadr = cpu_to_le32(phys) ;
1436 rbctrl = AIX_REVERSE( (((u_long)frame_status & 1435 rbctrl = cpu_to_le32( (((__u32)frame_status &
1437 (FIRST_FRAG|LAST_FRAG))<<26) | 1436 (FIRST_FRAG|LAST_FRAG))<<26) |
1438 (((u_long) frame_status & FIRST_FRAG) << 21) | 1437 (((u_long) frame_status & FIRST_FRAG) << 21) |
1439 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; 1438 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
@@ -1444,7 +1443,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1444 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; 1443 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1445 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; 1444 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1446 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; 1445 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1447 NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ; 1446 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1448} 1447}
1449 1448
1450/* 1449/*
@@ -1494,15 +1493,15 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
1494 while (queue->rx_used) { 1493 while (queue->rx_used) {
1495 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1494 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1496 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; 1495 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
1497 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1496 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1498 frag_count = 1 ; 1497 frag_count = 1 ;
1499 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1498 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1500 r = r->rxd_next ; 1499 r = r->rxd_next ;
1501 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1500 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1502 while (r != queue->rx_curr_put && 1501 while (r != queue->rx_curr_put &&
1503 !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1502 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1504 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1503 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1505 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1504 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1506 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1505 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1507 r = r->rxd_next ; 1506 r = r->rxd_next ;
1508 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1507 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1640,7 +1639,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1640{ 1639{
1641 struct s_smt_fp_txd volatile *t ; 1640 struct s_smt_fp_txd volatile *t ;
1642 struct s_smt_tx_queue *queue ; 1641 struct s_smt_tx_queue *queue ;
1643 u_int tbctrl ; 1642 __le32 tbctrl ;
1644 1643
1645 queue = smc->os.hwm.tx_p ; 1644 queue = smc->os.hwm.tx_p ;
1646 1645
@@ -1657,9 +1656,9 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1657 /* '*t' is already defined */ 1656 /* '*t' is already defined */
1658 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; 1657 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
1659 t->txd_virt = virt ; 1658 t->txd_virt = virt ;
1660 t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; 1659 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1661 t->txd_tbadr = AIX_REVERSE(phys) ; 1660 t->txd_tbadr = cpu_to_le32(phys) ;
1662 tbctrl = AIX_REVERSE((((u_long)frame_status & 1661 tbctrl = cpu_to_le32((((__u32)frame_status &
1663 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | 1662 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1664 BMU_OWN|BMU_CHECK |len) ; 1663 BMU_OWN|BMU_CHECK |len) ;
1665 t->txd_tbctrl = tbctrl ; 1664 t->txd_tbctrl = tbctrl ;
@@ -1826,7 +1825,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1826 struct s_smt_tx_queue *queue ; 1825 struct s_smt_tx_queue *queue ;
1827 struct s_smt_fp_txd volatile *t ; 1826 struct s_smt_fp_txd volatile *t ;
1828 u_long phys ; 1827 u_long phys ;
1829 u_int tbctrl ; 1828 __le32 tbctrl;
1830 1829
1831 NDD_TRACE("THSB",mb,fc,0) ; 1830 NDD_TRACE("THSB",mb,fc,0) ;
1832 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; 1831 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
@@ -1894,14 +1893,14 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1894 DB_TX("init TxD = 0x%x",(void *)t,0,5) ; 1893 DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
1895 if (i == frag_count-1) { 1894 if (i == frag_count-1) {
1896 frame_status |= LAST_FRAG ; 1895 frame_status |= LAST_FRAG ;
1897 t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | 1896 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1898 (((u_long)(mb->sm_len-1)&3) << 27)) ; 1897 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1899 } 1898 }
1900 t->txd_virt = virt[i] ; 1899 t->txd_virt = virt[i] ;
1901 phys = dma_master(smc, (void far *)virt[i], 1900 phys = dma_master(smc, (void far *)virt[i],
1902 frag_len[i], DMA_RD|SMT_BUF) ; 1901 frag_len[i], DMA_RD|SMT_BUF) ;
1903 t->txd_tbadr = AIX_REVERSE(phys) ; 1902 t->txd_tbadr = cpu_to_le32(phys) ;
1904 tbctrl = AIX_REVERSE((((u_long) frame_status & 1903 tbctrl = cpu_to_le32((((__u32)frame_status &
1905 (FIRST_FRAG|LAST_FRAG)) << 26) | 1904 (FIRST_FRAG|LAST_FRAG)) << 26) |
1906 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; 1905 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1907 t->txd_tbctrl = tbctrl ; 1906 t->txd_tbctrl = tbctrl ;
@@ -1971,8 +1970,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1971 do { 1970 do {
1972 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; 1971 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1973 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; 1972 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
1974 tbctrl = CR_READ(t1->txd_tbctrl) ; 1973 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1975 tbctrl = AIX_REVERSE(tbctrl) ;
1976 1974
1977 if (tbctrl & BMU_OWN || !queue->tx_used){ 1975 if (tbctrl & BMU_OWN || !queue->tx_used){
1978 DB_TX("End of TxDs queue %d",i,0,4) ; 1976 DB_TX("End of TxDs queue %d",i,0,4) ;
@@ -1984,7 +1982,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1984 1982
1985 t1 = queue->tx_curr_get ; 1983 t1 = queue->tx_curr_get ;
1986 for (n = frag_count; n; n--) { 1984 for (n = frag_count; n; n--) {
1987 tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; 1985 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1988 dma_complete(smc, 1986 dma_complete(smc,
1989 (union s_fp_descr volatile *) t1, 1987 (union s_fp_descr volatile *) t1,
1990 (int) (DMA_RD | 1988 (int) (DMA_RD |
@@ -2064,7 +2062,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2064 while (tx_used) { 2062 while (tx_used) {
2065 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 2063 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2066 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; 2064 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
2067 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 2065 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2068 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 2066 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2069 t = t->txd_next ; 2067 t = t->txd_next ;
2070 tx_used-- ; 2068 tx_used-- ;
@@ -2086,10 +2084,10 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2086 * tx_curr_get and tx_curr_put to this position 2084 * tx_curr_get and tx_curr_put to this position
2087 */ 2085 */
2088 if (i == QUEUE_S) { 2086 if (i == QUEUE_S) {
2089 outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2087 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2090 } 2088 }
2091 else { 2089 else {
2092 outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2090 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2093 } 2091 }
2094 2092
2095 queue->tx_curr_put = queue->tx_curr_get->txd_next ; 2093 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 7cf9b9f35dee..a2b092bb3626 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -495,7 +495,7 @@ static int skfp_open(struct net_device *dev)
495 495
496 PRINTK(KERN_INFO "entering skfp_open\n"); 496 PRINTK(KERN_INFO "entering skfp_open\n");
497 /* Register IRQ - support shared interrupts by passing device ptr */ 497 /* Register IRQ - support shared interrupts by passing device ptr */
498 err = request_irq(dev->irq, (void *) skfp_interrupt, IRQF_SHARED, 498 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
499 dev->name, dev); 499 dev->name, dev);
500 if (err) 500 if (err)
501 return err; 501 return err;
@@ -1644,7 +1644,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1644 // Get RIF length from Routing Control (RC) field. 1644 // Get RIF length from Routing Control (RC) field.
1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header. 1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1646 1646
1647 ri = ntohs(*((unsigned short *) cp)); 1647 ri = ntohs(*((__be16 *) cp));
1648 RifLength = ri & FDDI_RCF_LEN_MASK; 1648 RifLength = ri & FDDI_RCF_LEN_MASK;
1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) { 1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1650 printk("fddi: Invalid RIF.\n"); 1650 printk("fddi: Invalid RIF.\n");