aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-09-01 07:47:12 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-03 09:53:45 -0400
commit4d566063a799231b99d9a21128634ea78b89ab72 (patch)
treeb1d55d0c266b27f2438aec5888abd30c02749e59 /drivers/net
parent23d9e60b1ddc67ffedd77161ecff4895708088a4 (diff)
sfc: Removed forced inlining of long functions
gcc will automatically inline static functions with only one caller, and may inline other functions depending on the kernel configuration and size of the intermediate code. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sfc/efx.c2
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/rx.c42
-rw-r--r--drivers/net/sfc/tx.c47
4 files changed, 49 insertions, 50 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 022dc366c362..9f1ac3a25911 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -158,7 +158,7 @@ static void efx_fini_channels(struct efx_nic *efx);
158 * never be concurrently called more than once on the same channel, 158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently. 159 * though different channels may be being processed concurrently.
160 */ 160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) 161static int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{ 162{
163 int rxdmaqs; 163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue; 164 struct efx_rx_queue *rx_queue;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index fb069712222f..9a13e5c8c9f3 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -815,8 +815,8 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
815 * Falcon batches TX completion events; the message we receive is of 815 * Falcon batches TX completion events; the message we receive is of
816 * the form "complete all TX events up to this index". 816 * the form "complete all TX events up to this index".
817 */ 817 */
818static inline void falcon_handle_tx_event(struct efx_channel *channel, 818static void falcon_handle_tx_event(struct efx_channel *channel,
819 efx_qword_t *event) 819 efx_qword_t *event)
820{ 820{
821 unsigned int tx_ev_desc_ptr; 821 unsigned int tx_ev_desc_ptr;
822 unsigned int tx_ev_q_label; 822 unsigned int tx_ev_q_label;
@@ -952,8 +952,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
952 * Also "is multicast" and "matches multicast filter" flags can be used to 952 * Also "is multicast" and "matches multicast filter" flags can be used to
953 * discard non-matching multicast packets. 953 * discard non-matching multicast packets.
954 */ 954 */
955static inline int falcon_handle_rx_event(struct efx_channel *channel, 955static int falcon_handle_rx_event(struct efx_channel *channel,
956 const efx_qword_t *event) 956 const efx_qword_t *event)
957{ 957{
958 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt; 958 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
959 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 959 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 17aa81e66a89..fa1a62aacbae 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -212,8 +212,8 @@ void efx_lro_fini(struct net_lro_mgr *lro_mgr)
212 * and populates a struct efx_rx_buffer with the relevant 212 * and populates a struct efx_rx_buffer with the relevant
213 * information. Return a negative error code or 0 on success. 213 * information. Return a negative error code or 0 on success.
214 */ 214 */
215static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, 215static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
216 struct efx_rx_buffer *rx_buf) 216 struct efx_rx_buffer *rx_buf)
217{ 217{
218 struct efx_nic *efx = rx_queue->efx; 218 struct efx_nic *efx = rx_queue->efx;
219 struct net_device *net_dev = efx->net_dev; 219 struct net_device *net_dev = efx->net_dev;
@@ -252,8 +252,8 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
252 * and populates a struct efx_rx_buffer with the relevant 252 * and populates a struct efx_rx_buffer with the relevant
253 * information. Return a negative error code or 0 on success. 253 * information. Return a negative error code or 0 on success.
254 */ 254 */
255static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 255static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
256 struct efx_rx_buffer *rx_buf) 256 struct efx_rx_buffer *rx_buf)
257{ 257{
258 struct efx_nic *efx = rx_queue->efx; 258 struct efx_nic *efx = rx_queue->efx;
259 int bytes, space, offset; 259 int bytes, space, offset;
@@ -319,8 +319,8 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
319 * and populates a struct efx_rx_buffer with the relevant 319 * and populates a struct efx_rx_buffer with the relevant
320 * information. 320 * information.
321 */ 321 */
322static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, 322static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
323 struct efx_rx_buffer *new_rx_buf) 323 struct efx_rx_buffer *new_rx_buf)
324{ 324{
325 int rc = 0; 325 int rc = 0;
326 326
@@ -340,8 +340,8 @@ static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
340 return rc; 340 return rc;
341} 341}
342 342
343static inline void efx_unmap_rx_buffer(struct efx_nic *efx, 343static void efx_unmap_rx_buffer(struct efx_nic *efx,
344 struct efx_rx_buffer *rx_buf) 344 struct efx_rx_buffer *rx_buf)
345{ 345{
346 if (rx_buf->page) { 346 if (rx_buf->page) {
347 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -357,8 +357,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
357 } 357 }
358} 358}
359 359
360static inline void efx_free_rx_buffer(struct efx_nic *efx, 360static void efx_free_rx_buffer(struct efx_nic *efx,
361 struct efx_rx_buffer *rx_buf) 361 struct efx_rx_buffer *rx_buf)
362{ 362{
363 if (rx_buf->page) { 363 if (rx_buf->page) {
364 __free_pages(rx_buf->page, efx->rx_buffer_order); 364 __free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -369,8 +369,8 @@ static inline void efx_free_rx_buffer(struct efx_nic *efx,
369 } 369 }
370} 370}
371 371
372static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 372static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
373 struct efx_rx_buffer *rx_buf) 373 struct efx_rx_buffer *rx_buf)
374{ 374{
375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
376 efx_free_rx_buffer(rx_queue->efx, rx_buf); 376 efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -506,10 +506,10 @@ void efx_rx_work(struct work_struct *data)
506 efx_schedule_slow_fill(rx_queue, 1); 506 efx_schedule_slow_fill(rx_queue, 1);
507} 507}
508 508
509static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 509static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
510 struct efx_rx_buffer *rx_buf, 510 struct efx_rx_buffer *rx_buf,
511 int len, bool *discard, 511 int len, bool *discard,
512 bool *leak_packet) 512 bool *leak_packet)
513{ 513{
514 struct efx_nic *efx = rx_queue->efx; 514 struct efx_nic *efx = rx_queue->efx;
515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -546,8 +546,8 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
546 * Handles driverlink veto, and passes the fragment up via 546 * Handles driverlink veto, and passes the fragment up via
547 * the appropriate LRO method 547 * the appropriate LRO method
548 */ 548 */
549static inline void efx_rx_packet_lro(struct efx_channel *channel, 549static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 550 struct efx_rx_buffer *rx_buf)
551{ 551{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
553 void *priv = channel; 553 void *priv = channel;
@@ -574,9 +574,9 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
574} 574}
575 575
576/* Allocate and construct an SKB around a struct page.*/ 576/* Allocate and construct an SKB around a struct page.*/
577static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, 577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx, 578 struct efx_nic *efx,
579 int hdr_len) 579 int hdr_len)
580{ 580{
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 550856fab16c..0e9889ca68fc 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -47,7 +47,7 @@ void efx_stop_queue(struct efx_nic *efx)
47 * We want to be able to nest calls to netif_stop_queue(), since each 47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue. 48 * channel can have an individual stop on the queue.
49 */ 49 */
50inline void efx_wake_queue(struct efx_nic *efx) 50void efx_wake_queue(struct efx_nic *efx)
51{ 51{
52 local_bh_disable(); 52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count, 53 if (atomic_dec_and_lock(&efx->netif_stop_count,
@@ -59,8 +59,8 @@ inline void efx_wake_queue(struct efx_nic *efx)
59 local_bh_enable(); 59 local_bh_enable();
60} 60}
61 61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 62static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer) 63 struct efx_tx_buffer *buffer)
64{ 64{
65 if (buffer->unmap_len) { 65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -110,8 +110,8 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue);
110static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 110static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
111 struct efx_tso_header *tsoh); 111 struct efx_tso_header *tsoh);
112 112
113static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, 113static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
114 struct efx_tx_buffer *buffer) 114 struct efx_tx_buffer *buffer)
115{ 115{
116 if (buffer->tsoh) { 116 if (buffer->tsoh) {
117 if (likely(!buffer->tsoh->unmap_len)) { 117 if (likely(!buffer->tsoh->unmap_len)) {
@@ -138,8 +138,8 @@ static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
139 * You must hold netif_tx_lock() to call this function. 139 * You must hold netif_tx_lock() to call this function.
140 */ 140 */
141static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, 141static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
142 const struct sk_buff *skb) 142 const struct sk_buff *skb)
143{ 143{
144 struct efx_nic *efx = tx_queue->efx; 144 struct efx_nic *efx = tx_queue->efx;
145 struct pci_dev *pci_dev = efx->pci_dev; 145 struct pci_dev *pci_dev = efx->pci_dev;
@@ -305,8 +305,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
305 * This removes packets from the TX queue, up to and including the 305 * This removes packets from the TX queue, up to and including the
306 * specified index. 306 * specified index.
307 */ 307 */
308static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 308static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
309 unsigned int index) 309 unsigned int index)
310{ 310{
311 struct efx_nic *efx = tx_queue->efx; 311 struct efx_nic *efx = tx_queue->efx;
312 unsigned int stop_index, read_ptr; 312 unsigned int stop_index, read_ptr;
@@ -578,7 +578,7 @@ struct tso_state {
578 * Verify that our various assumptions about sk_buffs and the conditions 578 * Verify that our various assumptions about sk_buffs and the conditions
579 * under which TSO will be attempted hold true. 579 * under which TSO will be attempted hold true.
580 */ 580 */
581static inline void efx_tso_check_safe(const struct sk_buff *skb) 581static void efx_tso_check_safe(const struct sk_buff *skb)
582{ 582{
583 EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); 583 EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
584 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 584 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
@@ -772,8 +772,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
772 * a single fragment, and we know it doesn't cross a page boundary. It 772 * a single fragment, and we know it doesn't cross a page boundary. It
773 * also allows us to not worry about end-of-packet etc. 773 * also allows us to not worry about end-of-packet etc.
774 */ 774 */
775static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, 775static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
776 struct efx_tso_header *tsoh, unsigned len) 776 struct efx_tso_header *tsoh, unsigned len)
777{ 777{
778 struct efx_tx_buffer *buffer; 778 struct efx_tx_buffer *buffer;
779 779
@@ -826,7 +826,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
826 826
827 827
828/* Parse the SKB header and initialise state. */ 828/* Parse the SKB header and initialise state. */
829static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) 829static void tso_start(struct tso_state *st, const struct sk_buff *skb)
830{ 830{
831 /* All ethernet/IP/TCP headers combined size is TCP header size 831 /* All ethernet/IP/TCP headers combined size is TCP header size
832 * plus offset of TCP header relative to start of packet. 832 * plus offset of TCP header relative to start of packet.
@@ -848,8 +848,8 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
848 st->unmap_single = false; 848 st->unmap_single = false;
849} 849}
850 850
851static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 851static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
852 skb_frag_t *frag) 852 skb_frag_t *frag)
853{ 853{
854 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, 854 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
855 frag->page_offset, frag->size, 855 frag->page_offset, frag->size,
@@ -864,9 +864,8 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
864 return -ENOMEM; 864 return -ENOMEM;
865} 865}
866 866
867static inline int 867static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
868tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, 868 const struct sk_buff *skb)
869 const struct sk_buff *skb)
870{ 869{
871 int hl = st->header_len; 870 int hl = st->header_len;
872 int len = skb_headlen(skb) - hl; 871 int len = skb_headlen(skb) - hl;
@@ -894,9 +893,9 @@ tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
894 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 893 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
895 * space in @tx_queue. 894 * space in @tx_queue.
896 */ 895 */
897static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 896static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
898 const struct sk_buff *skb, 897 const struct sk_buff *skb,
899 struct tso_state *st) 898 struct tso_state *st)
900{ 899{
901 struct efx_tx_buffer *buffer; 900 struct efx_tx_buffer *buffer;
902 int n, end_of_packet, rc; 901 int n, end_of_packet, rc;
@@ -946,9 +945,9 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
946 * Generate a new header and prepare for the new packet. Return 0 on 945 * Generate a new header and prepare for the new packet. Return 0 on
947 * success, or -1 if failed to alloc header. 946 * success, or -1 if failed to alloc header.
948 */ 947 */
949static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, 948static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
950 const struct sk_buff *skb, 949 const struct sk_buff *skb,
951 struct tso_state *st) 950 struct tso_state *st)
952{ 951{
953 struct efx_tso_header *tsoh; 952 struct efx_tso_header *tsoh;
954 struct iphdr *tsoh_iph; 953 struct iphdr *tsoh_iph;