diff options
author | Brian King <brking@linux.vnet.ibm.com> | 2007-08-17 10:16:56 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:50:46 -0400 |
commit | 79ef4a4dd44cd4f9942975b0f625bd01549a2aa9 (patch) | |
tree | 177d1e948721db288f2f13487e42d47cd9acef83 /drivers/net/ibmveth.c | |
parent | 3449a2ab31681420515e242920e755262b4f41e9 (diff) |
ibmveth: Remove use of bitfields
Removes the use of bitfields from the ibmveth driver. This results
in slightly smaller object code.
Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r-- | drivers/net/ibmveth.c | 90 |
1 files changed, 45 insertions, 45 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index f6be7b29c845..2dff9f2800cd 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -132,19 +132,29 @@ struct ibmveth_stat ibmveth_stats[] = { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* simple methods of getting data from the current rxq entry */ | 134 | /* simple methods of getting data from the current rxq entry */ |
135 | static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) | ||
136 | { | ||
137 | return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; | ||
138 | } | ||
139 | |||
140 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) | ||
141 | { | ||
142 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; | ||
143 | } | ||
144 | |||
135 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) | 145 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) |
136 | { | 146 | { |
137 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); | 147 | return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); |
138 | } | 148 | } |
139 | 149 | ||
140 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | 150 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) |
141 | { | 151 | { |
142 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); | 152 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); |
143 | } | 153 | } |
144 | 154 | ||
145 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | 155 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) |
146 | { | 156 | { |
147 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); | 157 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); |
148 | } | 158 | } |
149 | 159 | ||
150 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | 160 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) |
@@ -154,7 +164,7 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | |||
154 | 164 | ||
155 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) | 165 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) |
156 | { | 166 | { |
157 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].csum_good); | 167 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); |
158 | } | 168 | } |
159 | 169 | ||
160 | /* setup the initial settings for a buffer pool */ | 170 | /* setup the initial settings for a buffer pool */ |
@@ -254,9 +264,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
254 | correlator = ((u64)pool->index << 32) | index; | 264 | correlator = ((u64)pool->index << 32) | index; |
255 | *(u64*)skb->data = correlator; | 265 | *(u64*)skb->data = correlator; |
256 | 266 | ||
257 | desc.desc = 0; | 267 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; |
258 | desc.fields.valid = 1; | ||
259 | desc.fields.length = pool->buff_size; | ||
260 | desc.fields.address = dma_addr; | 268 | desc.fields.address = dma_addr; |
261 | 269 | ||
262 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 270 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
@@ -397,9 +405,8 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
397 | return; | 405 | return; |
398 | } | 406 | } |
399 | 407 | ||
400 | desc.desc = 0; | 408 | desc.fields.flags_len = IBMVETH_BUF_VALID | |
401 | desc.fields.valid = 1; | 409 | adapter->rx_buff_pool[pool].buff_size; |
402 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; | ||
403 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; | 410 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; |
404 | 411 | ||
405 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 412 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
@@ -555,9 +562,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
555 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 562 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
556 | mac_address = mac_address >> 16; | 563 | mac_address = mac_address >> 16; |
557 | 564 | ||
558 | rxq_desc.desc = 0; | 565 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; |
559 | rxq_desc.fields.valid = 1; | ||
560 | rxq_desc.fields.length = adapter->rx_queue.queue_len; | ||
561 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; | 566 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; |
562 | 567 | ||
563 | ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); | 568 | ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); |
@@ -704,7 +709,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
704 | void (*done) (struct net_device *, u32)) | 709 | void (*done) (struct net_device *, u32)) |
705 | { | 710 | { |
706 | struct ibmveth_adapter *adapter = dev->priv; | 711 | struct ibmveth_adapter *adapter = dev->priv; |
707 | union ibmveth_illan_attributes set_attr, clr_attr, ret_attr; | 712 | u64 set_attr, clr_attr, ret_attr; |
708 | long ret; | 713 | long ret; |
709 | int rc1 = 0, rc2 = 0; | 714 | int rc1 = 0, rc2 = 0; |
710 | int restart = 0; | 715 | int restart = 0; |
@@ -716,21 +721,21 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
716 | adapter->pool_config = 0; | 721 | adapter->pool_config = 0; |
717 | } | 722 | } |
718 | 723 | ||
719 | set_attr.desc = 0; | 724 | set_attr = 0; |
720 | clr_attr.desc = 0; | 725 | clr_attr = 0; |
721 | 726 | ||
722 | if (data) | 727 | if (data) |
723 | set_attr.fields.tcp_csum_offload_ipv4 = 1; | 728 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
724 | else | 729 | else |
725 | clr_attr.fields.tcp_csum_offload_ipv4 = 1; | 730 | clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
726 | 731 | ||
727 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr.desc); | 732 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
728 | 733 | ||
729 | if (ret == H_SUCCESS && !ret_attr.fields.active_trunk && | 734 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && |
730 | !ret_attr.fields.trunk_priority && | 735 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && |
731 | ret_attr.fields.csum_offload_padded_pkt_support) { | 736 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
732 | ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr.desc, | 737 | ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, |
733 | set_attr.desc, &ret_attr.desc); | 738 | set_attr, &ret_attr); |
734 | 739 | ||
735 | if (ret != H_SUCCESS) { | 740 | if (ret != H_SUCCESS) { |
736 | rc1 = -EIO; | 741 | rc1 = -EIO; |
@@ -738,13 +743,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
738 | " %d rc=%ld\n", data, ret); | 743 | " %d rc=%ld\n", data, ret); |
739 | 744 | ||
740 | ret = h_illan_attributes(adapter->vdev->unit_address, | 745 | ret = h_illan_attributes(adapter->vdev->unit_address, |
741 | set_attr.desc, clr_attr.desc, &ret_attr.desc); | 746 | set_attr, clr_attr, &ret_attr); |
742 | } else | 747 | } else |
743 | done(dev, data); | 748 | done(dev, data); |
744 | } else { | 749 | } else { |
745 | rc1 = -EIO; | 750 | rc1 = -EIO; |
746 | ibmveth_error_printk("unable to change checksum offload settings." | 751 | ibmveth_error_printk("unable to change checksum offload settings." |
747 | " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr.desc); | 752 | " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); |
748 | } | 753 | } |
749 | 754 | ||
750 | if (restart) | 755 | if (restart) |
@@ -850,11 +855,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
850 | unsigned int tx_send_failed = 0; | 855 | unsigned int tx_send_failed = 0; |
851 | unsigned int tx_map_failed = 0; | 856 | unsigned int tx_map_failed = 0; |
852 | 857 | ||
853 | desc.desc = 0; | 858 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
854 | desc.fields.length = skb->len; | ||
855 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 859 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, |
856 | desc.fields.length, DMA_TO_DEVICE); | 860 | skb->len, DMA_TO_DEVICE); |
857 | desc.fields.valid = 1; | ||
858 | 861 | ||
859 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 862 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
860 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 863 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -866,8 +869,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
866 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 869 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
867 | unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; | 870 | unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; |
868 | 871 | ||
869 | desc.fields.no_csum = 1; | 872 | desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); |
870 | desc.fields.csum_good = 1; | ||
871 | 873 | ||
872 | /* Need to zero out the checksum */ | 874 | /* Need to zero out the checksum */ |
873 | buf[0] = 0; | 875 | buf[0] = 0; |
@@ -893,7 +895,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
893 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | 895 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { |
894 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | 896 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); |
895 | ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", | 897 | ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", |
896 | desc.fields.valid, desc.fields.length, desc.fields.address); | 898 | (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, |
899 | skb->len, desc.fields.address); | ||
897 | tx_send_failed++; | 900 | tx_send_failed++; |
898 | tx_dropped++; | 901 | tx_dropped++; |
899 | } else { | 902 | } else { |
@@ -903,7 +906,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
903 | } | 906 | } |
904 | 907 | ||
905 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, | 908 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, |
906 | desc.fields.length, DMA_TO_DEVICE); | 909 | skb->len, DMA_TO_DEVICE); |
907 | 910 | ||
908 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 911 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
909 | adapter->stats.tx_dropped += tx_dropped; | 912 | adapter->stats.tx_dropped += tx_dropped; |
@@ -1108,7 +1111,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1108 | long ret; | 1111 | long ret; |
1109 | struct net_device *netdev; | 1112 | struct net_device *netdev; |
1110 | struct ibmveth_adapter *adapter; | 1113 | struct ibmveth_adapter *adapter; |
1111 | union ibmveth_illan_attributes set_attr, ret_attr; | 1114 | u64 set_attr, ret_attr; |
1112 | 1115 | ||
1113 | unsigned char *mac_addr_p; | 1116 | unsigned char *mac_addr_p; |
1114 | unsigned int *mcastFilterSize_p; | 1117 | unsigned int *mcastFilterSize_p; |
@@ -1202,23 +1205,20 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1202 | 1205 | ||
1203 | ibmveth_debug_printk("registering netdev...\n"); | 1206 | ibmveth_debug_printk("registering netdev...\n"); |
1204 | 1207 | ||
1205 | ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr.desc); | 1208 | ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr); |
1206 | 1209 | ||
1207 | if (ret == H_SUCCESS && !ret_attr.fields.active_trunk && | 1210 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && |
1208 | !ret_attr.fields.trunk_priority && | 1211 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && |
1209 | ret_attr.fields.csum_offload_padded_pkt_support) { | 1212 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
1210 | set_attr.desc = 0; | 1213 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
1211 | set_attr.fields.tcp_csum_offload_ipv4 = 1; | ||
1212 | 1214 | ||
1213 | ret = h_illan_attributes(dev->unit_address, 0, set_attr.desc, | 1215 | ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr); |
1214 | &ret_attr.desc); | ||
1215 | 1216 | ||
1216 | if (ret == H_SUCCESS) { | 1217 | if (ret == H_SUCCESS) { |
1217 | adapter->rx_csum = 1; | 1218 | adapter->rx_csum = 1; |
1218 | netdev->features |= NETIF_F_IP_CSUM; | 1219 | netdev->features |= NETIF_F_IP_CSUM; |
1219 | } else | 1220 | } else |
1220 | ret = h_illan_attributes(dev->unit_address, set_attr.desc, | 1221 | ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr); |
1221 | 0, &ret_attr.desc); | ||
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | rc = register_netdev(netdev); | 1224 | rc = register_netdev(netdev); |