aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/net/sch_generic.h24
2 files changed, 21 insertions, 20 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1ebf1ea29d60..3411f22e7d16 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -853,9 +853,9 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
853 * The reference count is not incremented and the reference is therefore 853 * The reference count is not incremented and the reference is therefore
854 * volatile. Use with caution. 854 * volatile. Use with caution.
855 */ 855 */
856static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 856static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
857{ 857{
858 struct sk_buff *list = ((struct sk_buff *)list_)->next; 858 struct sk_buff *list = ((const struct sk_buff *)list_)->next;
859 if (list == (struct sk_buff *)list_) 859 if (list == (struct sk_buff *)list_)
860 list = NULL; 860 list = NULL;
861 return list; 861 return list;
@@ -874,9 +874,9 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
874 * The reference count is not incremented and the reference is therefore 874 * The reference count is not incremented and the reference is therefore
875 * volatile. Use with caution. 875 * volatile. Use with caution.
876 */ 876 */
877static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 877static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
878{ 878{
879 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 879 struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
880 if (list == (struct sk_buff *)list_) 880 if (list == (struct sk_buff *)list_)
881 list = NULL; 881 list = NULL;
882 return list; 882 return list;
@@ -1830,7 +1830,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1830 * Returns true if modifying the header part of the cloned buffer 1830 * Returns true if modifying the header part of the cloned buffer
1831 * does not requires the data to be copied. 1831 * does not requires the data to be copied.
1832 */ 1832 */
1833static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1833static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1834{ 1834{
1835 return !skb_header_cloned(skb) && 1835 return !skb_header_cloned(skb) &&
1836 skb_headroom(skb) + len <= skb->hdr_len; 1836 skb_headroom(skb) + len <= skb->hdr_len;
@@ -2451,7 +2451,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2451{ 2451{
2452 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2452 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2453 * wanted then gso_type will be set. */ 2453 * wanted then gso_type will be set. */
2454 struct skb_shared_info *shinfo = skb_shinfo(skb); 2454 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2455
2455 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2456 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2456 unlikely(shinfo->gso_type == 0)) { 2457 unlikely(shinfo->gso_type == 0)) {
2457 __skb_warn_lro_forwarding(skb); 2458 __skb_warn_lro_forwarding(skb);
@@ -2475,7 +2476,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
2475 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2476 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2476 * use this helper, to document places where we make this assertion. 2477 * use this helper, to document places where we make this assertion.
2477 */ 2478 */
2478static inline void skb_checksum_none_assert(struct sk_buff *skb) 2479static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2479{ 2480{
2480#ifdef DEBUG 2481#ifdef DEBUG
2481 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2482 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
@@ -2484,7 +2485,7 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
2484 2485
2485bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2486bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2486 2487
2487static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size) 2488static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2488{ 2489{
2489 if (irqs_disabled()) 2490 if (irqs_disabled())
2490 return false; 2491 return false;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4fc88f3ccd5f..2eb207ea4eaf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -46,14 +46,14 @@ struct qdisc_size_table {
46struct Qdisc { 46struct Qdisc {
47 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 47 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
48 struct sk_buff * (*dequeue)(struct Qdisc *dev); 48 struct sk_buff * (*dequeue)(struct Qdisc *dev);
49 unsigned flags; 49 unsigned int flags;
50#define TCQ_F_BUILTIN 1 50#define TCQ_F_BUILTIN 1
51#define TCQ_F_INGRESS 2 51#define TCQ_F_INGRESS 2
52#define TCQ_F_CAN_BYPASS 4 52#define TCQ_F_CAN_BYPASS 4
53#define TCQ_F_MQROOT 8 53#define TCQ_F_MQROOT 8
54#define TCQ_F_WARN_NONWC (1 << 16) 54#define TCQ_F_WARN_NONWC (1 << 16)
55 int padded; 55 int padded;
56 struct Qdisc_ops *ops; 56 const struct Qdisc_ops *ops;
57 struct qdisc_size_table __rcu *stab; 57 struct qdisc_size_table __rcu *stab;
58 struct list_head list; 58 struct list_head list;
59 u32 handle; 59 u32 handle;
@@ -224,7 +224,7 @@ struct qdisc_skb_cb {
224 long data[]; 224 long data[];
225}; 225};
226 226
227static inline int qdisc_qlen(struct Qdisc *q) 227static inline int qdisc_qlen(const struct Qdisc *q)
228{ 228{
229 return q->q.qlen; 229 return q->q.qlen;
230} 230}
@@ -239,12 +239,12 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
239 return &qdisc->q.lock; 239 return &qdisc->q.lock;
240} 240}
241 241
242static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) 242static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
243{ 243{
244 return qdisc->dev_queue->qdisc; 244 return qdisc->dev_queue->qdisc;
245} 245}
246 246
247static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) 247static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
248{ 248{
249 return qdisc->dev_queue->qdisc_sleeping; 249 return qdisc->dev_queue->qdisc_sleeping;
250} 250}
@@ -260,7 +260,7 @@ static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
260 * root. This is enforced by holding the RTNL semaphore, which 260 * root. This is enforced by holding the RTNL semaphore, which
261 * all users of this lock accessor must do. 261 * all users of this lock accessor must do.
262 */ 262 */
263static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) 263static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
264{ 264{
265 struct Qdisc *root = qdisc_root(qdisc); 265 struct Qdisc *root = qdisc_root(qdisc);
266 266
@@ -268,7 +268,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
268 return qdisc_lock(root); 268 return qdisc_lock(root);
269} 269}
270 270
271static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) 271static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
272{ 272{
273 struct Qdisc *root = qdisc_root_sleeping(qdisc); 273 struct Qdisc *root = qdisc_root_sleeping(qdisc);
274 274
@@ -276,17 +276,17 @@ static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
276 return qdisc_lock(root); 276 return qdisc_lock(root);
277} 277}
278 278
279static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 279static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
280{ 280{
281 return qdisc->dev_queue->dev; 281 return qdisc->dev_queue->dev;
282} 282}
283 283
284static inline void sch_tree_lock(struct Qdisc *q) 284static inline void sch_tree_lock(const struct Qdisc *q)
285{ 285{
286 spin_lock_bh(qdisc_root_sleeping_lock(q)); 286 spin_lock_bh(qdisc_root_sleeping_lock(q));
287} 287}
288 288
289static inline void sch_tree_unlock(struct Qdisc *q) 289static inline void sch_tree_unlock(const struct Qdisc *q)
290{ 290{
291 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 291 spin_unlock_bh(qdisc_root_sleeping_lock(q));
292} 292}
@@ -319,7 +319,7 @@ static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
319} 319}
320 320
321static inline struct Qdisc_class_common * 321static inline struct Qdisc_class_common *
322qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) 322qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
323{ 323{
324 struct Qdisc_class_common *cl; 324 struct Qdisc_class_common *cl;
325 struct hlist_node *n; 325 struct hlist_node *n;
@@ -393,7 +393,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
393} 393}
394 394
395/* Are any of the TX qdiscs changing? */ 395/* Are any of the TX qdiscs changing? */
396static inline bool qdisc_tx_changing(struct net_device *dev) 396static inline bool qdisc_tx_changing(const struct net_device *dev)
397{ 397{
398 unsigned int i; 398 unsigned int i;
399 for (i = 0; i < dev->num_tx_queues; i++) { 399 for (i = 0; i < dev->num_tx_queues; i++) {