diff options
author | Bjorn Helgaas <bhelgaas@google.com> | 2016-03-23 14:47:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-03-23 14:52:03 -0400 |
commit | 5e82b4b2a0015ed0a659b22ef2ee3409a3c39e54 (patch) | |
tree | a847bd62a9bda0cc45a1b42614e3d375fc185e61 | |
parent | 9efc2f7dcd06e04d7b6a3032ae65bfd628b1aebe (diff) |
net: Fix typos and whitespace.
Fix typos. Capitalize CPU, NAPI, RCU consistently. Align structure
indentation. No functional change intended; only comment and whitespace
changes.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 215 |
1 files changed, 106 insertions, 109 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f6385d27b81..a675205df0f1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -81,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev, | |||
81 | * function. Real network devices commonly used with qdiscs should only return | 81 | * function. Real network devices commonly used with qdiscs should only return |
82 | * the driver transmit return codes though - when qdiscs are used, the actual | 82 | * the driver transmit return codes though - when qdiscs are used, the actual |
83 | * transmission happens asynchronously, so the value is not propagated to | 83 | * transmission happens asynchronously, so the value is not propagated to |
84 | * higher layers. Virtual network devices transmit synchronously, in this case | 84 | * higher layers. Virtual network devices transmit synchronously; in this case |
85 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | 85 | * the driver transmit return codes are consumed by dev_queue_xmit(), and all |
86 | * others are propagated to higher layers. | 86 | * others are propagated to higher layers. |
87 | */ | 87 | */ |
88 | 88 | ||
@@ -129,7 +129,7 @@ static inline bool dev_xmit_complete(int rc) | |||
129 | } | 129 | } |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Compute the worst case header length according to the protocols | 132 | * Compute the worst-case header length according to the protocols |
133 | * used. | 133 | * used. |
134 | */ | 134 | */ |
135 | 135 | ||
@@ -246,7 +246,7 @@ struct hh_cache { | |||
246 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 246 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; |
247 | }; | 247 | }; |
248 | 248 | ||
249 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 249 | /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. |
250 | * Alternative is: | 250 | * Alternative is: |
251 | * dev->hard_header_len ? (dev->hard_header_len + | 251 | * dev->hard_header_len ? (dev->hard_header_len + |
252 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | 252 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 |
@@ -272,7 +272,7 @@ struct header_ops { | |||
272 | }; | 272 | }; |
273 | 273 | ||
274 | /* These flag bits are private to the generic network queueing | 274 | /* These flag bits are private to the generic network queueing |
275 | * layer, they may not be explicitly referenced by any other | 275 | * layer; they may not be explicitly referenced by any other |
276 | * code. | 276 | * code. |
277 | */ | 277 | */ |
278 | 278 | ||
@@ -286,7 +286,7 @@ enum netdev_state_t { | |||
286 | 286 | ||
287 | 287 | ||
288 | /* | 288 | /* |
289 | * This structure holds at boot time configured netdevice settings. They | 289 | * This structure holds boot-time configured netdevice settings. They |
290 | * are then used in the device probing. | 290 | * are then used in the device probing. |
291 | */ | 291 | */ |
292 | struct netdev_boot_setup { | 292 | struct netdev_boot_setup { |
@@ -304,7 +304,7 @@ struct napi_struct { | |||
304 | /* The poll_list must only be managed by the entity which | 304 | /* The poll_list must only be managed by the entity which |
305 | * changes the state of the NAPI_STATE_SCHED bit. This means | 305 | * changes the state of the NAPI_STATE_SCHED bit. This means |
306 | * whoever atomically sets that bit can add this napi_struct | 306 | * whoever atomically sets that bit can add this napi_struct |
307 | * to the per-cpu poll_list, and whoever clears that bit | 307 | * to the per-CPU poll_list, and whoever clears that bit |
308 | * can remove from the list right before clearing the bit. | 308 | * can remove from the list right before clearing the bit. |
309 | */ | 309 | */ |
310 | struct list_head poll_list; | 310 | struct list_head poll_list; |
@@ -350,7 +350,7 @@ typedef enum gro_result gro_result_t; | |||
350 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | 350 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in |
351 | * case skb->dev was changed by rx_handler. | 351 | * case skb->dev was changed by rx_handler. |
352 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | 352 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. |
353 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | 353 | * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. |
354 | * | 354 | * |
355 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | 355 | * rx_handlers are functions called from inside __netif_receive_skb(), to do |
356 | * special processing of the skb, prior to delivery to protocol handlers. | 356 | * special processing of the skb, prior to delivery to protocol handlers. |
@@ -365,19 +365,19 @@ typedef enum gro_result gro_result_t; | |||
365 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | 365 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to |
366 | * do with the skb. | 366 | * do with the skb. |
367 | * | 367 | * |
368 | * If the rx_handler consumed to skb in some way, it should return | 368 | * If the rx_handler consumed the skb in some way, it should return |
369 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | 369 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for |
370 | * the skb to be delivered in some other ways. | 370 | * the skb to be delivered in some other way. |
371 | * | 371 | * |
372 | * If the rx_handler changed skb->dev, to divert the skb to another | 372 | * If the rx_handler changed skb->dev, to divert the skb to another |
373 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | 373 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the |
374 | * new device will be called if it exists. | 374 | * new device will be called if it exists. |
375 | * | 375 | * |
376 | * If the rx_handler consider the skb should be ignored, it should return | 376 | * If the rx_handler decides the skb should be ignored, it should return |
377 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | 377 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that |
378 | * are registered on exact device (ptype->dev == skb->dev). | 378 | * are registered on exact device (ptype->dev == skb->dev). |
379 | * | 379 | * |
380 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | 380 | * If the rx_handler didn't change skb->dev, but wants the skb to be normally |
381 | * delivered, it should return RX_HANDLER_PASS. | 381 | * delivered, it should return RX_HANDLER_PASS. |
382 | * | 382 | * |
383 | * A device without a registered rx_handler will behave as if rx_handler | 383 | * A device without a registered rx_handler will behave as if rx_handler |
@@ -402,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n) | |||
402 | } | 402 | } |
403 | 403 | ||
404 | /** | 404 | /** |
405 | * napi_schedule_prep - check if napi can be scheduled | 405 | * napi_schedule_prep - check if NAPI can be scheduled |
406 | * @n: napi context | 406 | * @n: NAPI context |
407 | * | 407 | * |
408 | * Test if NAPI routine is already running, and if not mark | 408 | * Test if NAPI routine is already running, and if not mark |
409 | * it as running. This is used as a condition variable | 409 | * it as running. This is used as a condition variable to |
410 | * insure only one NAPI poll instance runs. We also make | 410 | * insure only one NAPI poll instance runs. We also make |
411 | * sure there is no pending NAPI disable. | 411 | * sure there is no pending NAPI disable. |
412 | */ | 412 | */ |
@@ -418,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n) | |||
418 | 418 | ||
419 | /** | 419 | /** |
420 | * napi_schedule - schedule NAPI poll | 420 | * napi_schedule - schedule NAPI poll |
421 | * @n: napi context | 421 | * @n: NAPI context |
422 | * | 422 | * |
423 | * Schedule NAPI poll routine to be called if it is not already | 423 | * Schedule NAPI poll routine to be called if it is not already |
424 | * running. | 424 | * running. |
@@ -431,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n) | |||
431 | 431 | ||
432 | /** | 432 | /** |
433 | * napi_schedule_irqoff - schedule NAPI poll | 433 | * napi_schedule_irqoff - schedule NAPI poll |
434 | * @n: napi context | 434 | * @n: NAPI context |
435 | * | 435 | * |
436 | * Variant of napi_schedule(), assuming hard irqs are masked. | 436 | * Variant of napi_schedule(), assuming hard irqs are masked. |
437 | */ | 437 | */ |
@@ -455,7 +455,7 @@ void __napi_complete(struct napi_struct *n); | |||
455 | void napi_complete_done(struct napi_struct *n, int work_done); | 455 | void napi_complete_done(struct napi_struct *n, int work_done); |
456 | /** | 456 | /** |
457 | * napi_complete - NAPI processing complete | 457 | * napi_complete - NAPI processing complete |
458 | * @n: napi context | 458 | * @n: NAPI context |
459 | * | 459 | * |
460 | * Mark NAPI processing as complete. | 460 | * Mark NAPI processing as complete. |
461 | * Consider using napi_complete_done() instead. | 461 | * Consider using napi_complete_done() instead. |
@@ -467,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n) | |||
467 | 467 | ||
468 | /** | 468 | /** |
469 | * napi_hash_add - add a NAPI to global hashtable | 469 | * napi_hash_add - add a NAPI to global hashtable |
470 | * @napi: napi context | 470 | * @napi: NAPI context |
471 | * | 471 | * |
472 | * generate a new napi_id and store a @napi under it in napi_hash | 472 | * Generate a new napi_id and store a @napi under it in napi_hash. |
473 | * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) | 473 | * Used for busy polling (CONFIG_NET_RX_BUSY_POLL). |
474 | * Note: This is normally automatically done from netif_napi_add(), | 474 | * Note: This is normally automatically done from netif_napi_add(), |
475 | * so might disappear in a future linux version. | 475 | * so might disappear in a future Linux version. |
476 | */ | 476 | */ |
477 | void napi_hash_add(struct napi_struct *napi); | 477 | void napi_hash_add(struct napi_struct *napi); |
478 | 478 | ||
479 | /** | 479 | /** |
480 | * napi_hash_del - remove a NAPI from global table | 480 | * napi_hash_del - remove a NAPI from global table |
481 | * @napi: napi context | 481 | * @napi: NAPI context |
482 | * | 482 | * |
483 | * Warning: caller must observe rcu grace period | 483 | * Warning: caller must observe RCU grace period |
484 | * before freeing memory containing @napi, if | 484 | * before freeing memory containing @napi, if |
485 | * this function returns true. | 485 | * this function returns true. |
486 | * Note: core networking stack automatically calls it | 486 | * Note: core networking stack automatically calls it |
487 | * from netif_napi_del() | 487 | * from netif_napi_del(). |
488 | * Drivers might want to call this helper to combine all | 488 | * Drivers might want to call this helper to combine all |
489 | * the needed rcu grace periods into a single one. | 489 | * the needed RCU grace periods into a single one. |
490 | */ | 490 | */ |
491 | bool napi_hash_del(struct napi_struct *napi); | 491 | bool napi_hash_del(struct napi_struct *napi); |
492 | 492 | ||
493 | /** | 493 | /** |
494 | * napi_disable - prevent NAPI from scheduling | 494 | * napi_disable - prevent NAPI from scheduling |
495 | * @n: napi context | 495 | * @n: NAPI context |
496 | * | 496 | * |
497 | * Stop NAPI from being scheduled on this context. | 497 | * Stop NAPI from being scheduled on this context. |
498 | * Waits till any outstanding processing completes. | 498 | * Waits till any outstanding processing completes. |
@@ -501,7 +501,7 @@ void napi_disable(struct napi_struct *n); | |||
501 | 501 | ||
502 | /** | 502 | /** |
503 | * napi_enable - enable NAPI scheduling | 503 | * napi_enable - enable NAPI scheduling |
504 | * @n: napi context | 504 | * @n: NAPI context |
505 | * | 505 | * |
506 | * Resume NAPI from being scheduled on this context. | 506 | * Resume NAPI from being scheduled on this context. |
507 | * Must be paired with napi_disable. | 507 | * Must be paired with napi_disable. |
@@ -516,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n) | |||
516 | 516 | ||
517 | /** | 517 | /** |
518 | * napi_synchronize - wait until NAPI is not running | 518 | * napi_synchronize - wait until NAPI is not running |
519 | * @n: napi context | 519 | * @n: NAPI context |
520 | * | 520 | * |
521 | * Wait until NAPI is done being scheduled on this context. | 521 | * Wait until NAPI is done being scheduled on this context. |
522 | * Waits till any outstanding processing completes but | 522 | * Waits till any outstanding processing completes but |
@@ -559,7 +559,7 @@ enum netdev_queue_state_t { | |||
559 | 559 | ||
560 | struct netdev_queue { | 560 | struct netdev_queue { |
561 | /* | 561 | /* |
562 | * read mostly part | 562 | * read-mostly part |
563 | */ | 563 | */ |
564 | struct net_device *dev; | 564 | struct net_device *dev; |
565 | struct Qdisc __rcu *qdisc; | 565 | struct Qdisc __rcu *qdisc; |
@@ -571,7 +571,7 @@ struct netdev_queue { | |||
571 | int numa_node; | 571 | int numa_node; |
572 | #endif | 572 | #endif |
573 | /* | 573 | /* |
574 | * write mostly part | 574 | * write-mostly part |
575 | */ | 575 | */ |
576 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | 576 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
577 | int xmit_lock_owner; | 577 | int xmit_lock_owner; |
@@ -648,11 +648,11 @@ struct rps_dev_flow_table { | |||
648 | /* | 648 | /* |
649 | * The rps_sock_flow_table contains mappings of flows to the last CPU | 649 | * The rps_sock_flow_table contains mappings of flows to the last CPU |
650 | * on which they were processed by the application (set in recvmsg). | 650 | * on which they were processed by the application (set in recvmsg). |
651 | * Each entry is a 32bit value. Upper part is the high order bits | 651 | * Each entry is a 32bit value. Upper part is the high-order bits |
652 | * of flow hash, lower part is cpu number. | 652 | * of flow hash, lower part is CPU number. |
653 | * rps_cpu_mask is used to partition the space, depending on number of | 653 | * rps_cpu_mask is used to partition the space, depending on number of |
654 | * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 | 654 | * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 |
655 | * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, | 655 | * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, |
656 | * meaning we use 32-6=26 bits for the hash. | 656 | * meaning we use 32-6=26 bits for the hash. |
657 | */ | 657 | */ |
658 | struct rps_sock_flow_table { | 658 | struct rps_sock_flow_table { |
@@ -674,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | |||
674 | unsigned int index = hash & table->mask; | 674 | unsigned int index = hash & table->mask; |
675 | u32 val = hash & ~rps_cpu_mask; | 675 | u32 val = hash & ~rps_cpu_mask; |
676 | 676 | ||
677 | /* We only give a hint, preemption can change cpu under us */ | 677 | /* We only give a hint, preemption can change CPU under us */ |
678 | val |= raw_smp_processor_id(); | 678 | val |= raw_smp_processor_id(); |
679 | 679 | ||
680 | if (table->ents[index] != val) | 680 | if (table->ents[index] != val) |
@@ -807,21 +807,21 @@ struct tc_to_netdev { | |||
807 | * optional and can be filled with a null pointer. | 807 | * optional and can be filled with a null pointer. |
808 | * | 808 | * |
809 | * int (*ndo_init)(struct net_device *dev); | 809 | * int (*ndo_init)(struct net_device *dev); |
810 | * This function is called once when network device is registered. | 810 | * This function is called once when a network device is registered. |
811 | * The network device can use this to any late stage initializaton | 811 | * The network device can use this for any late stage initialization |
812 | * or semantic validattion. It can fail with an error code which will | 812 | * or semantic validation. It can fail with an error code which will |
813 | * be propogated back to register_netdev | 813 | * be propagated back to register_netdev. |
814 | * | 814 | * |
815 | * void (*ndo_uninit)(struct net_device *dev); | 815 | * void (*ndo_uninit)(struct net_device *dev); |
816 | * This function is called when device is unregistered or when registration | 816 | * This function is called when device is unregistered or when registration |
817 | * fails. It is not called if init fails. | 817 | * fails. It is not called if init fails. |
818 | * | 818 | * |
819 | * int (*ndo_open)(struct net_device *dev); | 819 | * int (*ndo_open)(struct net_device *dev); |
820 | * This function is called when network device transistions to the up | 820 | * This function is called when a network device transitions to the up |
821 | * state. | 821 | * state. |
822 | * | 822 | * |
823 | * int (*ndo_stop)(struct net_device *dev); | 823 | * int (*ndo_stop)(struct net_device *dev); |
824 | * This function is called when network device transistions to the down | 824 | * This function is called when a network device transitions to the down |
825 | * state. | 825 | * state. |
826 | * | 826 | * |
827 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, | 827 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
@@ -832,7 +832,7 @@ struct tc_to_netdev { | |||
832 | * corner cases, but the stack really does a non-trivial amount | 832 | * corner cases, but the stack really does a non-trivial amount |
833 | * of useless work if you return NETDEV_TX_BUSY. | 833 | * of useless work if you return NETDEV_TX_BUSY. |
834 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 834 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) |
835 | * Required can not be NULL. | 835 | * Required; cannot be NULL. |
836 | * | 836 | * |
837 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | 837 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
838 | * netdev_features_t features); | 838 | * netdev_features_t features); |
@@ -842,34 +842,34 @@ struct tc_to_netdev { | |||
842 | * | 842 | * |
843 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, | 843 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
844 | * void *accel_priv, select_queue_fallback_t fallback); | 844 | * void *accel_priv, select_queue_fallback_t fallback); |
845 | * Called to decide which queue to when device supports multiple | 845 | * Called to decide which queue to use when device supports multiple |
846 | * transmit queues. | 846 | * transmit queues. |
847 | * | 847 | * |
848 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); | 848 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
849 | * This function is called to allow device receiver to make | 849 | * This function is called to allow device receiver to make |
850 | * changes to configuration when multicast or promiscious is enabled. | 850 | * changes to configuration when multicast or promiscuous is enabled. |
851 | * | 851 | * |
852 | * void (*ndo_set_rx_mode)(struct net_device *dev); | 852 | * void (*ndo_set_rx_mode)(struct net_device *dev); |
853 | * This function is called device changes address list filtering. | 853 | * This function is called device changes address list filtering. |
854 | * If driver handles unicast address filtering, it should set | 854 | * If driver handles unicast address filtering, it should set |
855 | * IFF_UNICAST_FLT to its priv_flags. | 855 | * IFF_UNICAST_FLT in its priv_flags. |
856 | * | 856 | * |
857 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | 857 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); |
858 | * This function is called when the Media Access Control address | 858 | * This function is called when the Media Access Control address |
859 | * needs to be changed. If this interface is not defined, the | 859 | * needs to be changed. If this interface is not defined, the |
860 | * mac address can not be changed. | 860 | * MAC address can not be changed. |
861 | * | 861 | * |
862 | * int (*ndo_validate_addr)(struct net_device *dev); | 862 | * int (*ndo_validate_addr)(struct net_device *dev); |
863 | * Test if Media Access Control address is valid for the device. | 863 | * Test if Media Access Control address is valid for the device. |
864 | * | 864 | * |
865 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | 865 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); |
866 | * Called when a user request an ioctl which can't be handled by | 866 | * Called when a user requests an ioctl which can't be handled by |
867 | * the generic interface code. If not defined ioctl's return | 867 | * the generic interface code. If not defined ioctls return |
868 | * not supported error code. | 868 | * not supported error code. |
869 | * | 869 | * |
870 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | 870 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); |
871 | * Used to set network devices bus interface parameters. This interface | 871 | * Used to set network devices bus interface parameters. This interface |
872 | * is retained for legacy reason, new devices should use the bus | 872 | * is retained for legacy reasons; new devices should use the bus |
873 | * interface (PCI) for low level management. | 873 | * interface (PCI) for low level management. |
874 | * | 874 | * |
875 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | 875 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); |
@@ -878,7 +878,7 @@ struct tc_to_netdev { | |||
878 | * will return an error. | 878 | * will return an error. |
879 | * | 879 | * |
880 | * void (*ndo_tx_timeout)(struct net_device *dev); | 880 | * void (*ndo_tx_timeout)(struct net_device *dev); |
881 | * Callback uses when the transmitter has not made any progress | 881 | * Callback used when the transmitter has not made any progress |
882 | * for dev->watchdog ticks. | 882 | * for dev->watchdog ticks. |
883 | * | 883 | * |
884 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 884 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
@@ -896,11 +896,11 @@ struct tc_to_netdev { | |||
896 | * neither operation. | 896 | * neither operation. |
897 | * | 897 | * |
898 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); | 898 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
899 | * If device support VLAN filtering this function is called when a | 899 | * If device supports VLAN filtering this function is called when a |
900 | * VLAN id is registered. | 900 | * VLAN id is registered. |
901 | * | 901 | * |
902 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); | 902 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
903 | * If device support VLAN filtering this function is called when a | 903 | * If device supports VLAN filtering this function is called when a |
904 | * VLAN id is unregistered. | 904 | * VLAN id is unregistered. |
905 | * | 905 | * |
906 | * void (*ndo_poll_controller)(struct net_device *dev); | 906 | * void (*ndo_poll_controller)(struct net_device *dev); |
@@ -920,7 +920,7 @@ struct tc_to_netdev { | |||
920 | * | 920 | * |
921 | * Enable or disable the VF ability to query its RSS Redirection Table and | 921 | * Enable or disable the VF ability to query its RSS Redirection Table and |
922 | * Hash Key. This is needed since on some devices VF share this information | 922 | * Hash Key. This is needed since on some devices VF share this information |
923 | * with PF and querying it may adduce a theoretical security risk. | 923 | * with PF and querying it may introduce a theoretical security risk. |
924 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); | 924 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); |
925 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | 925 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
926 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) | 926 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) |
@@ -1030,20 +1030,20 @@ struct tc_to_netdev { | |||
1030 | * | 1030 | * |
1031 | * void (*ndo_add_vxlan_port)(struct net_device *dev, | 1031 | * void (*ndo_add_vxlan_port)(struct net_device *dev, |
1032 | * sa_family_t sa_family, __be16 port); | 1032 | * sa_family_t sa_family, __be16 port); |
1033 | * Called by vxlan to notiy a driver about the UDP port and socket | 1033 | * Called by vxlan to notify a driver about the UDP port and socket |
1034 | * address family that vxlan is listnening to. It is called only when | 1034 | * address family that vxlan is listening to. It is called only when |
1035 | * a new port starts listening. The operation is protected by the | 1035 | * a new port starts listening. The operation is protected by the |
1036 | * vxlan_net->sock_lock. | 1036 | * vxlan_net->sock_lock. |
1037 | * | 1037 | * |
1038 | * void (*ndo_add_geneve_port)(struct net_device *dev, | 1038 | * void (*ndo_add_geneve_port)(struct net_device *dev, |
1039 | * sa_family_t sa_family, __be16 port); | 1039 | * sa_family_t sa_family, __be16 port); |
1040 | * Called by geneve to notify a driver about the UDP port and socket | 1040 | * Called by geneve to notify a driver about the UDP port and socket |
1041 | * address family that geneve is listnening to. It is called only when | 1041 | * address family that geneve is listnening to. It is called only when |
1042 | * a new port starts listening. The operation is protected by the | 1042 | * a new port starts listening. The operation is protected by the |
1043 | * geneve_net->sock_lock. | 1043 | * geneve_net->sock_lock. |
1044 | * | 1044 | * |
1045 | * void (*ndo_del_geneve_port)(struct net_device *dev, | 1045 | * void (*ndo_del_geneve_port)(struct net_device *dev, |
1046 | * sa_family_t sa_family, __be16 port); | 1046 | * sa_family_t sa_family, __be16 port); |
1047 | * Called by geneve to notify the driver about a UDP port and socket | 1047 | * Called by geneve to notify the driver about a UDP port and socket |
1048 | * address family that geneve is not listening to anymore. The operation | 1048 | * address family that geneve is not listening to anymore. The operation |
1049 | * is protected by the geneve_net->sock_lock. | 1049 | * is protected by the geneve_net->sock_lock. |
@@ -1072,9 +1072,9 @@ struct tc_to_netdev { | |||
1072 | * Callback to use for xmit over the accelerated station. This | 1072 | * Callback to use for xmit over the accelerated station. This |
1073 | * is used in place of ndo_start_xmit on accelerated net | 1073 | * is used in place of ndo_start_xmit on accelerated net |
1074 | * devices. | 1074 | * devices. |
1075 | * netdev_features_t (*ndo_features_check) (struct sk_buff *skb, | 1075 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
1076 | * struct net_device *dev | 1076 | * struct net_device *dev |
1077 | * netdev_features_t features); | 1077 | * netdev_features_t features); |
1078 | * Called by core transmit path to determine if device is capable of | 1078 | * Called by core transmit path to determine if device is capable of |
1079 | * performing offload operations on a given packet. This is to give | 1079 | * performing offload operations on a given packet. This is to give |
1080 | * the device an opportunity to implement any restrictions that cannot | 1080 | * the device an opportunity to implement any restrictions that cannot |
@@ -1088,7 +1088,7 @@ struct tc_to_netdev { | |||
1088 | * int (*ndo_get_iflink)(const struct net_device *dev); | 1088 | * int (*ndo_get_iflink)(const struct net_device *dev); |
1089 | * Called to get the iflink value of this device. | 1089 | * Called to get the iflink value of this device. |
1090 | * void (*ndo_change_proto_down)(struct net_device *dev, | 1090 | * void (*ndo_change_proto_down)(struct net_device *dev, |
1091 | * bool proto_down); | 1091 | * bool proto_down); |
1092 | * This function is used to pass protocol port error state information | 1092 | * This function is used to pass protocol port error state information |
1093 | * to the switch driver. The switch driver can react to the proto_down | 1093 | * to the switch driver. The switch driver can react to the proto_down |
1094 | * by doing a phys down on the associated switch port. | 1094 | * by doing a phys down on the associated switch port. |
@@ -1100,7 +1100,7 @@ struct tc_to_netdev { | |||
1100 | * This function is used to specify the headroom that the skb must | 1100 | * This function is used to specify the headroom that the skb must |
1101 | * consider when allocation skb during packet reception. Setting | 1101 | * consider when allocation skb during packet reception. Setting |
1102 | * appropriate rx headroom value allows avoiding skb head copy on | 1102 | * appropriate rx headroom value allows avoiding skb head copy on |
1103 | * forward. Setting a negative value reset the rx headroom to the | 1103 | * forward. Setting a negative value resets the rx headroom to the |
1104 | * default value. | 1104 | * default value. |
1105 | * | 1105 | * |
1106 | */ | 1106 | */ |
@@ -1296,7 +1296,7 @@ struct net_device_ops { | |||
1296 | * | 1296 | * |
1297 | * These are the &struct net_device, they are only set internally | 1297 | * These are the &struct net_device, they are only set internally |
1298 | * by drivers and used in the kernel. These flags are invisible to | 1298 | * by drivers and used in the kernel. These flags are invisible to |
1299 | * userspace, this means that the order of these flags can change | 1299 | * userspace; this means that the order of these flags can change |
1300 | * during any kernel release. | 1300 | * during any kernel release. |
1301 | * | 1301 | * |
1302 | * You should have a pretty good reason to be extending these flags. | 1302 | * You should have a pretty good reason to be extending these flags. |
@@ -1414,10 +1414,10 @@ enum netdev_priv_flags { | |||
1414 | * | 1414 | * |
1415 | * @state: Generic network queuing layer state, see netdev_state_t | 1415 | * @state: Generic network queuing layer state, see netdev_state_t |
1416 | * @dev_list: The global list of network devices | 1416 | * @dev_list: The global list of network devices |
1417 | * @napi_list: List entry, that is used for polling napi devices | 1417 | * @napi_list: List entry used for polling NAPI devices |
1418 | * @unreg_list: List entry, that is used, when we are unregistering the | 1418 | * @unreg_list: List entry when we are unregistering the |
1419 | * device, see the function unregister_netdev | 1419 | * device; see the function unregister_netdev |
1420 | * @close_list: List entry, that is used, when we are closing the device | 1420 | * @close_list: List entry used when we are closing the device |
1421 | * @ptype_all: Device-specific packet handlers for all protocols | 1421 | * @ptype_all: Device-specific packet handlers for all protocols |
1422 | * @ptype_specific: Device-specific, protocol-specific packet handlers | 1422 | * @ptype_specific: Device-specific, protocol-specific packet handlers |
1423 | * | 1423 | * |
@@ -1437,7 +1437,7 @@ enum netdev_priv_flags { | |||
1437 | * @mpls_features: Mask of features inheritable by MPLS | 1437 | * @mpls_features: Mask of features inheritable by MPLS |
1438 | * | 1438 | * |
1439 | * @ifindex: interface index | 1439 | * @ifindex: interface index |
1440 | * @group: The group, that the device belongs to | 1440 | * @group: The group the device belongs to |
1441 | * | 1441 | * |
1442 | * @stats: Statistics struct, which was left as a legacy, use | 1442 | * @stats: Statistics struct, which was left as a legacy, use |
1443 | * rtnl_link_stats64 instead | 1443 | * rtnl_link_stats64 instead |
@@ -1491,7 +1491,7 @@ enum netdev_priv_flags { | |||
1491 | * @dev_port: Used to differentiate devices that share | 1491 | * @dev_port: Used to differentiate devices that share |
1492 | * the same function | 1492 | * the same function |
1493 | * @addr_list_lock: XXX: need comments on this one | 1493 | * @addr_list_lock: XXX: need comments on this one |
1494 | * @uc_promisc: Counter, that indicates, that promiscuous mode | 1494 | * @uc_promisc: Counter that indicates promiscuous mode |
1495 | * has been enabled due to the need to listen to | 1495 | * has been enabled due to the need to listen to |
1496 | * additional unicast addresses in a device that | 1496 | * additional unicast addresses in a device that |
1497 | * does not implement ndo_set_rx_mode() | 1497 | * does not implement ndo_set_rx_mode() |
@@ -1499,9 +1499,9 @@ enum netdev_priv_flags { | |||
1499 | * @mc: multicast mac addresses | 1499 | * @mc: multicast mac addresses |
1500 | * @dev_addrs: list of device hw addresses | 1500 | * @dev_addrs: list of device hw addresses |
1501 | * @queues_kset: Group of all Kobjects in the Tx and RX queues | 1501 | * @queues_kset: Group of all Kobjects in the Tx and RX queues |
1502 | * @promiscuity: Number of times, the NIC is told to work in | 1502 | * @promiscuity: Number of times the NIC is told to work in |
1503 | * Promiscuous mode, if it becomes 0 the NIC will | 1503 | * promiscuous mode; if it becomes 0 the NIC will |
1504 | * exit from working in Promiscuous mode | 1504 | * exit promiscuous mode |
1505 | * @allmulti: Counter, enables or disables allmulticast mode | 1505 | * @allmulti: Counter, enables or disables allmulticast mode |
1506 | * | 1506 | * |
1507 | * @vlan_info: VLAN info | 1507 | * @vlan_info: VLAN info |
@@ -1547,7 +1547,7 @@ enum netdev_priv_flags { | |||
1547 | * | 1547 | * |
1548 | * @trans_start: Time (in jiffies) of last Tx | 1548 | * @trans_start: Time (in jiffies) of last Tx |
1549 | * @watchdog_timeo: Represents the timeout that is used by | 1549 | * @watchdog_timeo: Represents the timeout that is used by |
1550 | * the watchdog ( see dev_watchdog() ) | 1550 | * the watchdog (see dev_watchdog()) |
1551 | * @watchdog_timer: List of timers | 1551 | * @watchdog_timer: List of timers |
1552 | * | 1552 | * |
1553 | * @pcpu_refcnt: Number of references to this device | 1553 | * @pcpu_refcnt: Number of references to this device |
@@ -1664,8 +1664,8 @@ struct net_device { | |||
1664 | atomic_long_t rx_nohandler; | 1664 | atomic_long_t rx_nohandler; |
1665 | 1665 | ||
1666 | #ifdef CONFIG_WIRELESS_EXT | 1666 | #ifdef CONFIG_WIRELESS_EXT |
1667 | const struct iw_handler_def * wireless_handlers; | 1667 | const struct iw_handler_def *wireless_handlers; |
1668 | struct iw_public_data * wireless_data; | 1668 | struct iw_public_data *wireless_data; |
1669 | #endif | 1669 | #endif |
1670 | const struct net_device_ops *netdev_ops; | 1670 | const struct net_device_ops *netdev_ops; |
1671 | const struct ethtool_ops *ethtool_ops; | 1671 | const struct ethtool_ops *ethtool_ops; |
@@ -1718,7 +1718,7 @@ struct net_device { | |||
1718 | unsigned int allmulti; | 1718 | unsigned int allmulti; |
1719 | 1719 | ||
1720 | 1720 | ||
1721 | /* Protocol specific pointers */ | 1721 | /* Protocol-specific pointers */ |
1722 | 1722 | ||
1723 | #if IS_ENABLED(CONFIG_VLAN_8021Q) | 1723 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
1724 | struct vlan_info __rcu *vlan_info; | 1724 | struct vlan_info __rcu *vlan_info; |
@@ -1748,13 +1748,11 @@ struct net_device { | |||
1748 | /* Interface address info used in eth_type_trans() */ | 1748 | /* Interface address info used in eth_type_trans() */ |
1749 | unsigned char *dev_addr; | 1749 | unsigned char *dev_addr; |
1750 | 1750 | ||
1751 | |||
1752 | #ifdef CONFIG_SYSFS | 1751 | #ifdef CONFIG_SYSFS |
1753 | struct netdev_rx_queue *_rx; | 1752 | struct netdev_rx_queue *_rx; |
1754 | 1753 | ||
1755 | unsigned int num_rx_queues; | 1754 | unsigned int num_rx_queues; |
1756 | unsigned int real_num_rx_queues; | 1755 | unsigned int real_num_rx_queues; |
1757 | |||
1758 | #endif | 1756 | #endif |
1759 | 1757 | ||
1760 | unsigned long gro_flush_timeout; | 1758 | unsigned long gro_flush_timeout; |
@@ -1846,7 +1844,7 @@ struct net_device { | |||
1846 | struct garp_port __rcu *garp_port; | 1844 | struct garp_port __rcu *garp_port; |
1847 | struct mrp_port __rcu *mrp_port; | 1845 | struct mrp_port __rcu *mrp_port; |
1848 | 1846 | ||
1849 | struct device dev; | 1847 | struct device dev; |
1850 | const struct attribute_group *sysfs_groups[4]; | 1848 | const struct attribute_group *sysfs_groups[4]; |
1851 | const struct attribute_group *sysfs_rx_queue_group; | 1849 | const struct attribute_group *sysfs_rx_queue_group; |
1852 | 1850 | ||
@@ -1861,9 +1859,9 @@ struct net_device { | |||
1861 | #ifdef CONFIG_DCB | 1859 | #ifdef CONFIG_DCB |
1862 | const struct dcbnl_rtnl_ops *dcbnl_ops; | 1860 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
1863 | #endif | 1861 | #endif |
1864 | u8 num_tc; | 1862 | u8 num_tc; |
1865 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | 1863 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
1866 | u8 prio_tc_map[TC_BITMASK + 1]; | 1864 | u8 prio_tc_map[TC_BITMASK + 1]; |
1867 | 1865 | ||
1868 | #if IS_ENABLED(CONFIG_FCOE) | 1866 | #if IS_ENABLED(CONFIG_FCOE) |
1869 | unsigned int fcoe_ddp_xid; | 1867 | unsigned int fcoe_ddp_xid; |
@@ -1871,9 +1869,9 @@ struct net_device { | |||
1871 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) | 1869 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
1872 | struct netprio_map __rcu *priomap; | 1870 | struct netprio_map __rcu *priomap; |
1873 | #endif | 1871 | #endif |
1874 | struct phy_device *phydev; | 1872 | struct phy_device *phydev; |
1875 | struct lock_class_key *qdisc_tx_busylock; | 1873 | struct lock_class_key *qdisc_tx_busylock; |
1876 | bool proto_down; | 1874 | bool proto_down; |
1877 | }; | 1875 | }; |
1878 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 1876 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1879 | 1877 | ||
@@ -2021,7 +2019,7 @@ static inline void *netdev_priv(const struct net_device *dev) | |||
2021 | 2019 | ||
2022 | /* Set the sysfs device type for the network logical device to allow | 2020 | /* Set the sysfs device type for the network logical device to allow |
2023 | * fine-grained identification of different network device types. For | 2021 | * fine-grained identification of different network device types. For |
2024 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | 2022 | * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. |
2025 | */ | 2023 | */ |
2026 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | 2024 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) |
2027 | 2025 | ||
@@ -2031,22 +2029,22 @@ static inline void *netdev_priv(const struct net_device *dev) | |||
2031 | #define NAPI_POLL_WEIGHT 64 | 2029 | #define NAPI_POLL_WEIGHT 64 |
2032 | 2030 | ||
2033 | /** | 2031 | /** |
2034 | * netif_napi_add - initialize a napi context | 2032 | * netif_napi_add - initialize a NAPI context |
2035 | * @dev: network device | 2033 | * @dev: network device |
2036 | * @napi: napi context | 2034 | * @napi: NAPI context |
2037 | * @poll: polling function | 2035 | * @poll: polling function |
2038 | * @weight: default weight | 2036 | * @weight: default weight |
2039 | * | 2037 | * |
2040 | * netif_napi_add() must be used to initialize a napi context prior to calling | 2038 | * netif_napi_add() must be used to initialize a NAPI context prior to calling |
2041 | * *any* of the other napi related functions. | 2039 | * *any* of the other NAPI-related functions. |
2042 | */ | 2040 | */ |
2043 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | 2041 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
2044 | int (*poll)(struct napi_struct *, int), int weight); | 2042 | int (*poll)(struct napi_struct *, int), int weight); |
2045 | 2043 | ||
2046 | /** | 2044 | /** |
2047 | * netif_tx_napi_add - initialize a napi context | 2045 | * netif_tx_napi_add - initialize a NAPI context |
2048 | * @dev: network device | 2046 | * @dev: network device |
2049 | * @napi: napi context | 2047 | * @napi: NAPI context |
2050 | * @poll: polling function | 2048 | * @poll: polling function |
2051 | * @weight: default weight | 2049 | * @weight: default weight |
2052 | * | 2050 | * |
@@ -2064,22 +2062,22 @@ static inline void netif_tx_napi_add(struct net_device *dev, | |||
2064 | } | 2062 | } |
2065 | 2063 | ||
2066 | /** | 2064 | /** |
2067 | * netif_napi_del - remove a napi context | 2065 | * netif_napi_del - remove a NAPI context |
2068 | * @napi: napi context | 2066 | * @napi: NAPI context |
2069 | * | 2067 | * |
2070 | * netif_napi_del() removes a napi context from the network device napi list | 2068 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
2071 | */ | 2069 | */ |
2072 | void netif_napi_del(struct napi_struct *napi); | 2070 | void netif_napi_del(struct napi_struct *napi); |
2073 | 2071 | ||
2074 | struct napi_gro_cb { | 2072 | struct napi_gro_cb { |
2075 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ | 2073 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
2076 | void *frag0; | 2074 | void *frag0; |
2077 | 2075 | ||
2078 | /* Length of frag0. */ | 2076 | /* Length of frag0. */ |
2079 | unsigned int frag0_len; | 2077 | unsigned int frag0_len; |
2080 | 2078 | ||
2081 | /* This indicates where we are processing relative to skb->data. */ | 2079 | /* This indicates where we are processing relative to skb->data. */ |
2082 | int data_offset; | 2080 | int data_offset; |
2083 | 2081 | ||
2084 | /* This is non-zero if the packet cannot be merged with the new skb. */ | 2082 | /* This is non-zero if the packet cannot be merged with the new skb. */ |
2085 | u16 flush; | 2083 | u16 flush; |
@@ -2175,7 +2173,7 @@ struct udp_offload { | |||
2175 | struct udp_offload_callbacks callbacks; | 2173 | struct udp_offload_callbacks callbacks; |
2176 | }; | 2174 | }; |
2177 | 2175 | ||
2178 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | 2176 | /* often modified stats are per-CPU, other are shared (netdev->stats) */ |
2179 | struct pcpu_sw_netstats { | 2177 | struct pcpu_sw_netstats { |
2180 | u64 rx_packets; | 2178 | u64 rx_packets; |
2181 | u64 rx_bytes; | 2179 | u64 rx_bytes; |
@@ -2272,7 +2270,7 @@ struct netdev_notifier_changeupper_info { | |||
2272 | struct netdev_notifier_info info; /* must be first */ | 2270 | struct netdev_notifier_info info; /* must be first */ |
2273 | struct net_device *upper_dev; /* new upper dev */ | 2271 | struct net_device *upper_dev; /* new upper dev */ |
2274 | bool master; /* is upper dev master */ | 2272 | bool master; /* is upper dev master */ |
2275 | bool linking; /* is the nofication for link or unlink */ | 2273 | bool linking; /* is the notification for link or unlink */ |
2276 | void *upper_info; /* upper dev info */ | 2274 | void *upper_info; /* upper dev info */ |
2277 | }; | 2275 | }; |
2278 | 2276 | ||
@@ -2737,7 +2735,7 @@ extern int netdev_flow_limit_table_len; | |||
2737 | #endif /* CONFIG_NET_FLOW_LIMIT */ | 2735 | #endif /* CONFIG_NET_FLOW_LIMIT */ |
2738 | 2736 | ||
2739 | /* | 2737 | /* |
2740 | * Incoming packets are placed on per-cpu queues | 2738 | * Incoming packets are placed on per-CPU queues |
2741 | */ | 2739 | */ |
2742 | struct softnet_data { | 2740 | struct softnet_data { |
2743 | struct list_head poll_list; | 2741 | struct list_head poll_list; |
@@ -2907,7 +2905,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) | |||
2907 | * @dev_queue: pointer to transmit queue | 2905 | * @dev_queue: pointer to transmit queue |
2908 | * | 2906 | * |
2909 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), | 2907 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), |
2910 | * to give appropriate hint to the cpu. | 2908 | * to give appropriate hint to the CPU. |
2911 | */ | 2909 | */ |
2912 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) | 2910 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) |
2913 | { | 2911 | { |
@@ -2921,7 +2919,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que | |||
2921 | * @dev_queue: pointer to transmit queue | 2919 | * @dev_queue: pointer to transmit queue |
2922 | * | 2920 | * |
2923 | * BQL enabled drivers might use this helper in their TX completion path, | 2921 | * BQL enabled drivers might use this helper in their TX completion path, |
2924 | * to give appropriate hint to the cpu. | 2922 | * to give appropriate hint to the CPU. |
2925 | */ | 2923 | */ |
2926 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) | 2924 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) |
2927 | { | 2925 | { |
@@ -3060,7 +3058,7 @@ static inline bool netif_running(const struct net_device *dev) | |||
3060 | } | 3058 | } |
3061 | 3059 | ||
3062 | /* | 3060 | /* |
3063 | * Routines to manage the subqueues on a device. We only need start | 3061 | * Routines to manage the subqueues on a device. We only need start, |
3064 | * stop, and a check if it's stopped. All other device management is | 3062 | * stop, and a check if it's stopped. All other device management is |
3065 | * done at the overall netdevice level. | 3063 | * done at the overall netdevice level. |
3066 | * Also test the device if we're multiqueue. | 3064 | * Also test the device if we're multiqueue. |
@@ -3344,7 +3342,6 @@ void netif_carrier_off(struct net_device *dev); | |||
3344 | * in a "pending" state, waiting for some external event. For "on- | 3342 | * in a "pending" state, waiting for some external event. For "on- |
3345 | * demand" interfaces, this new state identifies the situation where the | 3343 | * demand" interfaces, this new state identifies the situation where the |
3346 | * interface is waiting for events to place it in the up state. | 3344 | * interface is waiting for events to place it in the up state. |
3347 | * | ||
3348 | */ | 3345 | */ |
3349 | static inline void netif_dormant_on(struct net_device *dev) | 3346 | static inline void netif_dormant_on(struct net_device *dev) |
3350 | { | 3347 | { |
@@ -3679,7 +3676,7 @@ void dev_uc_init(struct net_device *dev); | |||
3679 | * | 3676 | * |
3680 | * Add newly added addresses to the interface, and release | 3677 | * Add newly added addresses to the interface, and release |
3681 | * addresses that have been deleted. | 3678 | * addresses that have been deleted. |
3682 | **/ | 3679 | */ |
3683 | static inline int __dev_uc_sync(struct net_device *dev, | 3680 | static inline int __dev_uc_sync(struct net_device *dev, |
3684 | int (*sync)(struct net_device *, | 3681 | int (*sync)(struct net_device *, |
3685 | const unsigned char *), | 3682 | const unsigned char *), |
@@ -3695,7 +3692,7 @@ static inline int __dev_uc_sync(struct net_device *dev, | |||
3695 | * @unsync: function to call if address should be removed | 3692 | * @unsync: function to call if address should be removed |
3696 | * | 3693 | * |
3697 | * Remove all addresses that were added to the device by dev_uc_sync(). | 3694 | * Remove all addresses that were added to the device by dev_uc_sync(). |
3698 | **/ | 3695 | */ |
3699 | static inline void __dev_uc_unsync(struct net_device *dev, | 3696 | static inline void __dev_uc_unsync(struct net_device *dev, |
3700 | int (*unsync)(struct net_device *, | 3697 | int (*unsync)(struct net_device *, |
3701 | const unsigned char *)) | 3698 | const unsigned char *)) |
@@ -3723,7 +3720,7 @@ void dev_mc_init(struct net_device *dev); | |||
3723 | * | 3720 | * |
3724 | * Add newly added addresses to the interface, and release | 3721 | * Add newly added addresses to the interface, and release |
3725 | * addresses that have been deleted. | 3722 | * addresses that have been deleted. |
3726 | **/ | 3723 | */ |
3727 | static inline int __dev_mc_sync(struct net_device *dev, | 3724 | static inline int __dev_mc_sync(struct net_device *dev, |
3728 | int (*sync)(struct net_device *, | 3725 | int (*sync)(struct net_device *, |
3729 | const unsigned char *), | 3726 | const unsigned char *), |
@@ -3739,7 +3736,7 @@ static inline int __dev_mc_sync(struct net_device *dev, | |||
3739 | * @unsync: function to call if address should be removed | 3736 | * @unsync: function to call if address should be removed |
3740 | * | 3737 | * |
3741 | * Remove all addresses that were added to the device by dev_mc_sync(). | 3738 | * Remove all addresses that were added to the device by dev_mc_sync(). |
3742 | **/ | 3739 | */ |
3743 | static inline void __dev_mc_unsync(struct net_device *dev, | 3740 | static inline void __dev_mc_unsync(struct net_device *dev, |
3744 | int (*unsync)(struct net_device *, | 3741 | int (*unsync)(struct net_device *, |
3745 | const unsigned char *)) | 3742 | const unsigned char *)) |