aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/hid.h28
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/xarray.h267
9 files changed, 234 insertions, 95 deletions
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
169 169
170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, 170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
171 unsigned int idx); 171 unsigned int idx);
172struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
172unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 173unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
173void can_free_echo_skb(struct net_device *dev, unsigned int idx); 174void can_free_echo_skb(struct net_device *dev, unsigned int idx);
174 175
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); 41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); 42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); 43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
44int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); 44int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
45 struct sk_buff *skb, u32 timestamp);
46unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
47 unsigned int idx, u32 timestamp);
48int can_rx_offload_queue_tail(struct can_rx_offload *offload,
49 struct sk_buff *skb);
45void can_rx_offload_reset(struct can_rx_offload *offload); 50void can_rx_offload_reset(struct can_rx_offload *offload);
46void can_rx_offload_del(struct can_rx_offload *offload); 51void can_rx_offload_del(struct can_rx_offload *offload);
47void can_rx_offload_enable(struct can_rx_offload *offload); 52void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..9e66bfe369aa 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,7 +5,7 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h> 6#include <linux/mem_encrypt.h>
7 7
8#define DIRECT_MAPPING_ERROR 0 8#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
9 9
10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
11#include <asm/dma-direct.h> 11#include <asm/dma-direct.h>
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 387c70df6f29..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, 1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1140 int interrupt); 1140 int interrupt);
1141 1141
1142
1143/**
1144 * struct hid_scroll_counter - Utility class for processing high-resolution
1145 * scroll events.
1146 * @dev: the input device for which events should be reported.
1147 * @microns_per_hi_res_unit: the amount moved by the user's finger for each
1148 * high-resolution unit reported by the mouse, in
1149 * microns.
1150 * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
1151 * multiple of its lower resolution. For example, if
1152 * moving the wheel by one "notch" would result in a
1153 * value of 1 in low-resolution mode but 8 in
1154 * high-resolution, the multiplier is 8.
1155 * @remainder: counts the number of high-resolution units moved since the last
1156 * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
1157 * only be used by class methods.
1158 */
1159struct hid_scroll_counter {
1160 struct input_dev *dev;
1161 int microns_per_hi_res_unit;
1162 int resolution_multiplier;
1163
1164 int remainder;
1165};
1166
1167void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1168 int hi_res_value);
1169
1170/* HID quirks API */ 1142/* HID quirks API */
1171unsigned long hid_lookup_quirk(const struct hid_device *hdev); 1143unsigned long hid_lookup_quirk(const struct hid_device *hdev);
1172int hid_quirks_init(char **quirks_param, __u16 bus, int count); 1144int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
406 } 406 }
407 /* fall through */ 407 /* fall through */
408 case NET_DIM_START_MEASURE: 408 case NET_DIM_START_MEASURE:
409 net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
410 &dim->start_sample);
409 dim->state = NET_DIM_MEASURE_IN_PROGRESS; 411 dim->state = NET_DIM_MEASURE_IN_PROGRESS;
410 break; 412 break;
411 case NET_DIM_APPLY_NEW_PROFILE: 413 case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..0d1b2c3f127b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1326 } 1326 }
1327} 1327}
1328 1328
1329static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1330{
1331 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1332 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1333}
1334
1335static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1336{
1337 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1338}
1339
1340static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1341{
1342 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1343}
1344
1329/* Release a reference on a zerocopy structure */ 1345/* Release a reference on a zerocopy structure */
1330static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) 1346static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1331{ 1347{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1335 if (uarg->callback == sock_zerocopy_callback) { 1351 if (uarg->callback == sock_zerocopy_callback) {
1336 uarg->zerocopy = uarg->zerocopy && zerocopy; 1352 uarg->zerocopy = uarg->zerocopy && zerocopy;
1337 sock_zerocopy_put(uarg); 1353 sock_zerocopy_put(uarg);
1338 } else { 1354 } else if (!skb_zcopy_is_nouarg(skb)) {
1339 uarg->callback(uarg, zerocopy); 1355 uarg->callback(uarg, zerocopy);
1340 } 1356 }
1341 1357
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ 198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
199 u32 compressed_ack_rcv_nxt;
199 200
200 u32 tsoffset; /* timestamp offset */ 201 u32 tsoffset; /* timestamp offset */
201 202
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
66/* Device needs a pause after every control message. */ 66/* Device needs a pause after every control message. */
67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) 67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
68 68
69/* Hub needs extra delay after resetting its port. */
70#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
71
69#endif /* __LINUX_USB_QUIRKS_H */ 72#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..564892e19f8c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
289void xa_init_flags(struct xarray *, gfp_t flags); 289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index); 290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_cmpxchg(struct xarray *, unsigned long index, 292void *xa_erase(struct xarray *, unsigned long index);
293 void *old, void *entry, gfp_t);
294int xa_reserve(struct xarray *, unsigned long index, gfp_t);
295void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, 293void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
296 void *entry, gfp_t); 294 void *entry, gfp_t);
297bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); 295bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
344} 342}
345 343
346/** 344/**
347 * xa_erase() - Erase this entry from the XArray.
348 * @xa: XArray.
349 * @index: Index of entry.
350 *
351 * This function is the equivalent of calling xa_store() with %NULL as
352 * the third argument. The XArray does not need to allocate memory, so
353 * the user does not need to provide GFP flags.
354 *
355 * Context: Process context. Takes and releases the xa_lock.
356 * Return: The entry which used to be at this index.
357 */
358static inline void *xa_erase(struct xarray *xa, unsigned long index)
359{
360 return xa_store(xa, index, NULL, 0);
361}
362
363/**
364 * xa_insert() - Store this entry in the XArray unless another entry is
365 * already present.
366 * @xa: XArray.
367 * @index: Index into array.
368 * @entry: New entry.
369 * @gfp: Memory allocation flags.
370 *
371 * If you would rather see the existing entry in the array, use xa_cmpxchg().
372 * This function is for users who don't care what the entry is, only that
373 * one is present.
374 *
375 * Context: Process context. Takes and releases the xa_lock.
376 * May sleep if the @gfp flags permit.
377 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
378 * -ENOMEM if memory could not be allocated.
379 */
380static inline int xa_insert(struct xarray *xa, unsigned long index,
381 void *entry, gfp_t gfp)
382{
383 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
384 if (!curr)
385 return 0;
386 if (xa_is_err(curr))
387 return xa_err(curr);
388 return -EEXIST;
389}
390
391/**
392 * xa_release() - Release a reserved entry.
393 * @xa: XArray.
394 * @index: Index of entry.
395 *
396 * After calling xa_reserve(), you can call this function to release the
397 * reservation. If the entry at @index has been stored to, this function
398 * will do nothing.
399 */
400static inline void xa_release(struct xarray *xa, unsigned long index)
401{
402 xa_cmpxchg(xa, index, NULL, NULL, 0);
403}
404
405/**
406 * xa_for_each() - Iterate over a portion of an XArray. 345 * xa_for_each() - Iterate over a portion of an XArray.
407 * @xa: XArray. 346 * @xa: XArray.
408 * @entry: Entry retrieved from array. 347 * @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
455void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 394void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
456 void *entry, gfp_t); 395 void *entry, gfp_t);
457int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); 396int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
397int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
458void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 398void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
459void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 399void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
460 400
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
487} 427}
488 428
489/** 429/**
430 * xa_store_bh() - Store this entry in the XArray.
431 * @xa: XArray.
432 * @index: Index into array.
433 * @entry: New entry.
434 * @gfp: Memory allocation flags.
435 *
436 * This function is like calling xa_store() except it disables softirqs
437 * while holding the array lock.
438 *
439 * Context: Any context. Takes and releases the xa_lock while
440 * disabling softirqs.
441 * Return: The entry which used to be at this index.
442 */
443static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
444 void *entry, gfp_t gfp)
445{
446 void *curr;
447
448 xa_lock_bh(xa);
449 curr = __xa_store(xa, index, entry, gfp);
450 xa_unlock_bh(xa);
451
452 return curr;
453}
454
455/**
456 * xa_store_irq() - Erase this entry from the XArray.
457 * @xa: XArray.
458 * @index: Index into array.
459 * @entry: New entry.
460 * @gfp: Memory allocation flags.
461 *
462 * This function is like calling xa_store() except it disables interrupts
463 * while holding the array lock.
464 *
465 * Context: Process context. Takes and releases the xa_lock while
466 * disabling interrupts.
467 * Return: The entry which used to be at this index.
468 */
469static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
470 void *entry, gfp_t gfp)
471{
472 void *curr;
473
474 xa_lock_irq(xa);
475 curr = __xa_store(xa, index, entry, gfp);
476 xa_unlock_irq(xa);
477
478 return curr;
479}
480
481/**
490 * xa_erase_bh() - Erase this entry from the XArray. 482 * xa_erase_bh() - Erase this entry from the XArray.
491 * @xa: XArray. 483 * @xa: XArray.
492 * @index: Index of entry. 484 * @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
495 * the third argument. The XArray does not need to allocate memory, so 487 * the third argument. The XArray does not need to allocate memory, so
496 * the user does not need to provide GFP flags. 488 * the user does not need to provide GFP flags.
497 * 489 *
498 * Context: Process context. Takes and releases the xa_lock while 490 * Context: Any context. Takes and releases the xa_lock while
499 * disabling softirqs. 491 * disabling softirqs.
500 * Return: The entry which used to be at this index. 492 * Return: The entry which used to be at this index.
501 */ 493 */
@@ -535,6 +527,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
535} 527}
536 528
537/** 529/**
530 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
531 * @xa: XArray.
532 * @index: Index into array.
533 * @old: Old value to test against.
534 * @entry: New value to place in array.
535 * @gfp: Memory allocation flags.
536 *
537 * If the entry at @index is the same as @old, replace it with @entry.
538 * If the return value is equal to @old, then the exchange was successful.
539 *
540 * Context: Any context. Takes and releases the xa_lock. May sleep
541 * if the @gfp flags permit.
542 * Return: The old value at this index or xa_err() if an error happened.
543 */
544static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
545 void *old, void *entry, gfp_t gfp)
546{
547 void *curr;
548
549 xa_lock(xa);
550 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
551 xa_unlock(xa);
552
553 return curr;
554}
555
556/**
557 * xa_insert() - Store this entry in the XArray unless another entry is
558 * already present.
559 * @xa: XArray.
560 * @index: Index into array.
561 * @entry: New entry.
562 * @gfp: Memory allocation flags.
563 *
564 * If you would rather see the existing entry in the array, use xa_cmpxchg().
565 * This function is for users who don't care what the entry is, only that
566 * one is present.
567 *
568 * Context: Process context. Takes and releases the xa_lock.
569 * May sleep if the @gfp flags permit.
570 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
571 * -ENOMEM if memory could not be allocated.
572 */
573static inline int xa_insert(struct xarray *xa, unsigned long index,
574 void *entry, gfp_t gfp)
575{
576 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
577 if (!curr)
578 return 0;
579 if (xa_is_err(curr))
580 return xa_err(curr);
581 return -EEXIST;
582}
583
584/**
538 * xa_alloc() - Find somewhere to store this entry in the XArray. 585 * xa_alloc() - Find somewhere to store this entry in the XArray.
539 * @xa: XArray. 586 * @xa: XArray.
540 * @id: Pointer to ID. 587 * @id: Pointer to ID.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
575 * Updates the @id pointer with the index, then stores the entry at that 622 * Updates the @id pointer with the index, then stores the entry at that
576 * index. A concurrent lookup will not see an uninitialised @id. 623 * index. A concurrent lookup will not see an uninitialised @id.
577 * 624 *
578 * Context: Process context. Takes and releases the xa_lock while 625 * Context: Any context. Takes and releases the xa_lock while
579 * disabling softirqs. May sleep if the @gfp flags permit. 626 * disabling softirqs. May sleep if the @gfp flags permit.
580 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if 627 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
581 * there is no more space in the XArray. 628 * there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
621 return err; 668 return err;
622} 669}
623 670
671/**
672 * xa_reserve() - Reserve this index in the XArray.
673 * @xa: XArray.
674 * @index: Index into array.
675 * @gfp: Memory allocation flags.
676 *
677 * Ensures there is somewhere to store an entry at @index in the array.
678 * If there is already something stored at @index, this function does
679 * nothing. If there was nothing there, the entry is marked as reserved.
680 * Loading from a reserved entry returns a %NULL pointer.
681 *
682 * If you do not use the entry that you have reserved, call xa_release()
683 * or xa_erase() to free any unnecessary memory.
684 *
685 * Context: Any context. Takes and releases the xa_lock.
686 * May sleep if the @gfp flags permit.
687 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
688 */
689static inline
690int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
691{
692 int ret;
693
694 xa_lock(xa);
695 ret = __xa_reserve(xa, index, gfp);
696 xa_unlock(xa);
697
698 return ret;
699}
700
701/**
702 * xa_reserve_bh() - Reserve this index in the XArray.
703 * @xa: XArray.
704 * @index: Index into array.
705 * @gfp: Memory allocation flags.
706 *
707 * A softirq-disabling version of xa_reserve().
708 *
709 * Context: Any context. Takes and releases the xa_lock while
710 * disabling softirqs.
711 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
712 */
713static inline
714int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
715{
716 int ret;
717
718 xa_lock_bh(xa);
719 ret = __xa_reserve(xa, index, gfp);
720 xa_unlock_bh(xa);
721
722 return ret;
723}
724
725/**
726 * xa_reserve_irq() - Reserve this index in the XArray.
727 * @xa: XArray.
728 * @index: Index into array.
729 * @gfp: Memory allocation flags.
730 *
731 * An interrupt-disabling version of xa_reserve().
732 *
733 * Context: Process context. Takes and releases the xa_lock while
734 * disabling interrupts.
735 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
736 */
737static inline
738int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
739{
740 int ret;
741
742 xa_lock_irq(xa);
743 ret = __xa_reserve(xa, index, gfp);
744 xa_unlock_irq(xa);
745
746 return ret;
747}
748
749/**
750 * xa_release() - Release a reserved entry.
751 * @xa: XArray.
752 * @index: Index of entry.
753 *
754 * After calling xa_reserve(), you can call this function to release the
755 * reservation. If the entry at @index has been stored to, this function
756 * will do nothing.
757 */
758static inline void xa_release(struct xarray *xa, unsigned long index)
759{
760 xa_cmpxchg(xa, index, NULL, NULL, 0);
761}
762
624/* Everything below here is the Advanced API. Proceed with caution. */ 763/* Everything below here is the Advanced API. Proceed with caution. */
625 764
626/* 765/*