diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2008-05-17 23:45:09 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-22 06:18:55 -0400 |
commit | 789585e968f07653a29a9e829aed20386043636c (patch) | |
tree | ba503cf08560a6938844fae59349aba914ce684a | |
parent | 40ba182e3ca9f019f299ce5052fcd7e4cf68d11b (diff) |
sb1250: use netdev_alloc_skb
Use netdev_alloc_skb. This sets skb->dev and allows arch specific
allocation. Also simplify and cleanup the alignment code.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r-- | drivers/net/sb1250-mac.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 888b7dec9866..33bb18f810fb 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -179,8 +179,7 @@ enum sbmac_state { | |||
179 | #define SBMAC_MAX_TXDESCR 256 | 179 | #define SBMAC_MAX_TXDESCR 256 |
180 | #define SBMAC_MAX_RXDESCR 256 | 180 | #define SBMAC_MAX_RXDESCR 256 |
181 | 181 | ||
182 | #define ETHER_ALIGN 2 | 182 | #define ETHER_ADDR_LEN 6 |
183 | #define ETHER_ADDR_LEN 6 | ||
184 | #define ENET_PACKET_SIZE 1518 | 183 | #define ENET_PACKET_SIZE 1518 |
185 | /*#define ENET_PACKET_SIZE 9216 */ | 184 | /*#define ENET_PACKET_SIZE 9216 */ |
186 | 185 | ||
@@ -262,8 +261,6 @@ struct sbmac_softc { | |||
262 | spinlock_t sbm_lock; /* spin lock */ | 261 | spinlock_t sbm_lock; /* spin lock */ |
263 | int sbm_devflags; /* current device flags */ | 262 | int sbm_devflags; /* current device flags */ |
264 | 263 | ||
265 | int sbm_buffersize; | ||
266 | |||
267 | /* | 264 | /* |
268 | * Controller-specific things | 265 | * Controller-specific things |
269 | */ | 266 | */ |
@@ -305,10 +302,11 @@ struct sbmac_softc { | |||
305 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, | 302 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, |
306 | int txrx, int maxdescr); | 303 | int txrx, int maxdescr); |
307 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); | 304 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); |
308 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); | 305 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
306 | struct sk_buff *m); | ||
309 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); | 307 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); |
310 | static void sbdma_emptyring(struct sbmacdma *d); | 308 | static void sbdma_emptyring(struct sbmacdma *d); |
311 | static void sbdma_fillring(struct sbmacdma *d); | 309 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); |
312 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 310 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
313 | int work_to_do, int poll); | 311 | int work_to_do, int poll); |
314 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 312 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d) | |||
777 | d->sbdma_remptr = NULL; | 775 | d->sbdma_remptr = NULL; |
778 | } | 776 | } |
779 | 777 | ||
780 | static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | 778 | static inline void sbdma_align_skb(struct sk_buff *skb, |
779 | unsigned int power2, unsigned int offset) | ||
781 | { | 780 | { |
782 | unsigned long addr; | 781 | unsigned char *addr = skb->data; |
783 | unsigned long newaddr; | 782 | unsigned char *newaddr = PTR_ALIGN(addr, power2); |
784 | |||
785 | addr = (unsigned long) skb->data; | ||
786 | |||
787 | newaddr = (addr + power2 - 1) & ~(power2 - 1); | ||
788 | 783 | ||
789 | skb_reserve(skb,newaddr-addr+offset); | 784 | skb_reserve(skb, newaddr - addr + offset); |
790 | } | 785 | } |
791 | 786 | ||
792 | 787 | ||
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
797 | * this queues a buffer for inbound packets. | 792 | * this queues a buffer for inbound packets. |
798 | * | 793 | * |
799 | * Input parameters: | 794 | * Input parameters: |
800 | * d - DMA channel descriptor | 795 | * sc - softc structure |
796 | * d - DMA channel descriptor | ||
801 | * sb - sk_buff to add, or NULL if we should allocate one | 797 | * sb - sk_buff to add, or NULL if we should allocate one |
802 | * | 798 | * |
803 | * Return value: | 799 | * Return value: |
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
806 | ********************************************************************* */ | 802 | ********************************************************************* */ |
807 | 803 | ||
808 | 804 | ||
809 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | 805 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
806 | struct sk_buff *sb) | ||
810 | { | 807 | { |
808 | struct net_device *dev = sc->sbm_dev; | ||
811 | struct sbdmadscr *dsc; | 809 | struct sbdmadscr *dsc; |
812 | struct sbdmadscr *nextdsc; | 810 | struct sbdmadscr *nextdsc; |
813 | struct sk_buff *sb_new = NULL; | 811 | struct sk_buff *sb_new = NULL; |
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
848 | */ | 846 | */ |
849 | 847 | ||
850 | if (sb == NULL) { | 848 | if (sb == NULL) { |
851 | sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); | 849 | sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + |
850 | SMP_CACHE_BYTES * 2 + | ||
851 | NET_IP_ALIGN); | ||
852 | if (sb_new == NULL) { | 852 | if (sb_new == NULL) { |
853 | pr_info("%s: sk_buff allocation failed\n", | 853 | pr_info("%s: sk_buff allocation failed\n", |
854 | d->sbdma_eth->sbm_dev->name); | 854 | d->sbdma_eth->sbm_dev->name); |
855 | return -ENOBUFS; | 855 | return -ENOBUFS; |
856 | } | 856 | } |
857 | 857 | ||
858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); |
859 | } | 859 | } |
860 | else { | 860 | else { |
861 | sb_new = sb; | 861 | sb_new = sb; |
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
874 | * Do not interrupt per DMA transfer. | 874 | * Do not interrupt per DMA transfer. |
875 | */ | 875 | */ |
876 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 876 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; | 877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; |
878 | #else | 878 | #else |
879 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 879 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | | 880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | |
881 | M_DMA_DSCRA_INTERRUPT; | 881 | M_DMA_DSCRA_INTERRUPT; |
882 | #endif | 882 | #endif |
883 | 883 | ||
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d) | |||
1032 | * with sk_buffs | 1032 | * with sk_buffs |
1033 | * | 1033 | * |
1034 | * Input parameters: | 1034 | * Input parameters: |
1035 | * d - DMA channel | 1035 | * sc - softc structure |
1036 | * d - DMA channel | ||
1036 | * | 1037 | * |
1037 | * Return value: | 1038 | * Return value: |
1038 | * nothing | 1039 | * nothing |
1039 | ********************************************************************* */ | 1040 | ********************************************************************* */ |
1040 | 1041 | ||
1041 | static void sbdma_fillring(struct sbmacdma *d) | 1042 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) |
1042 | { | 1043 | { |
1043 | int idx; | 1044 | int idx; |
1044 | 1045 | ||
1045 | for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { | 1046 | for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { |
1046 | if (sbdma_add_rcvbuffer(d,NULL) != 0) | 1047 | if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) |
1047 | break; | 1048 | break; |
1048 | } | 1049 | } |
1049 | } | 1050 | } |
@@ -1159,10 +1160,11 @@ again: | |||
1159 | * packet and put it right back on the receive ring. | 1160 | * packet and put it right back on the receive ring. |
1160 | */ | 1161 | */ |
1161 | 1162 | ||
1162 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == | 1163 | if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == |
1163 | -ENOBUFS)) { | 1164 | -ENOBUFS)) { |
1164 | dev->stats.rx_dropped++; | 1165 | dev->stats.rx_dropped++; |
1165 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ | 1166 | /* Re-add old buffer */ |
1167 | sbdma_add_rcvbuffer(sc, d, sb); | ||
1166 | /* No point in continuing at the moment */ | 1168 | /* No point in continuing at the moment */ |
1167 | printk(KERN_ERR "dropped packet (1)\n"); | 1169 | printk(KERN_ERR "dropped packet (1)\n"); |
1168 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); | 1170 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); |
@@ -1212,7 +1214,7 @@ again: | |||
1212 | * put it back on the receive ring. | 1214 | * put it back on the receive ring. |
1213 | */ | 1215 | */ |
1214 | dev->stats.rx_errors++; | 1216 | dev->stats.rx_errors++; |
1215 | sbdma_add_rcvbuffer(d,sb); | 1217 | sbdma_add_rcvbuffer(sc, d, sb); |
1216 | } | 1218 | } |
1217 | 1219 | ||
1218 | 1220 | ||
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1570 | * Fill the receive ring | 1572 | * Fill the receive ring |
1571 | */ | 1573 | */ |
1572 | 1574 | ||
1573 | sbdma_fillring(&(s->sbm_rxdma)); | 1575 | sbdma_fillring(s, &(s->sbm_rxdma)); |
1574 | 1576 | ||
1575 | /* | 1577 | /* |
1576 | * Turn on the rest of the bits in the enable register | 1578 | * Turn on the rest of the bits in the enable register |
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2312 | dev->dev_addr[i] = eaddr[i]; | 2314 | dev->dev_addr[i] = eaddr[i]; |
2313 | } | 2315 | } |
2314 | 2316 | ||
2315 | |||
2316 | /* | ||
2317 | * Init packet size | ||
2318 | */ | ||
2319 | |||
2320 | sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; | ||
2321 | |||
2322 | /* | 2317 | /* |
2323 | * Initialize context (get pointers to registers and stuff), then | 2318 | * Initialize context (get pointers to registers and stuff), then |
2324 | * allocate the memory for the descriptor tables. | 2319 | * allocate the memory for the descriptor tables. |