aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/kconfig.h5
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/skbuff.h1
18 files changed, 110 insertions, 73 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ddbeda6dbdc8..689a8b9b9c8f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,6 +326,7 @@ struct pci_dev;
326int acpi_pci_irq_enable (struct pci_dev *dev); 326int acpi_pci_irq_enable (struct pci_dev *dev);
327void acpi_penalize_isa_irq(int irq, int active); 327void acpi_penalize_isa_irq(int irq, int active);
328bool acpi_isa_irq_available(int irq); 328bool acpi_isa_irq_available(int irq);
329void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
329void acpi_pci_irq_disable (struct pci_dev *dev); 330void acpi_pci_irq_disable (struct pci_dev *dev);
330 331
331extern int ec_read(u8 addr, u8 *val); 332extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index af596381fa0f..a428aec36ace 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
785 * routines, one at of_clk_init(), and one at platform device probe 785 * routines, one at of_clk_init(), and one at platform device probe
786 */ 786 */
787#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ 787#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
788 static void name##_of_clk_init_driver(struct device_node *np) \ 788 static void __init name##_of_clk_init_driver(struct device_node *np) \
789 { \ 789 { \
790 of_node_clear_flag(np, OF_POPULATED); \ 790 of_node_clear_flag(np, OF_POPULATED); \
791 fn(np); \ 791 fn(np); \
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1169 const char *mod_name); 1169 const char *mod_name);
1170void vmbus_driver_unregister(struct hv_driver *hv_driver); 1170void vmbus_driver_unregister(struct hv_driver *hv_driver);
1171 1171
1172static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
1173{
1174 const struct kobject *kobj = &device_obj->device.kobj;
1175
1176 return kobj->name;
1177}
1178
1179void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1172void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1180 1173
1181int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1174int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/io.h b/include/linux/io.h
index e2c8419278c1..82ef36eac8a1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -141,4 +141,26 @@ enum {
141void *memremap(resource_size_t offset, size_t size, unsigned long flags); 141void *memremap(resource_size_t offset, size_t size, unsigned long flags);
142void memunmap(void *addr); 142void memunmap(void *addr);
143 143
144/*
145 * On x86 PAT systems we have memory tracking that keeps track of
146 * the allowed mappings on memory ranges. This tracking works for
147 * all the in-kernel mapping APIs (ioremap*), but where the user
148 * wishes to map a range from a physical device into user memory
149 * the tracking won't be updated. This API is to be used by
150 * drivers which remap physical device pages into userspace,
151 * and wants to make sure they are mapped WC and not UC.
152 */
153#ifndef arch_io_reserve_memtype_wc
154static inline int arch_io_reserve_memtype_wc(resource_size_t base,
155 resource_size_t size)
156{
157 return 0;
158}
159
160static inline void arch_io_free_memtype_wc(resource_size_t base,
161 resource_size_t size)
162{
163}
164#endif
165
144#endif /* _LINUX_IO_H */ 166#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e63e288dee83..7892f55a1866 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,11 +19,15 @@ struct vm_fault;
19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ 19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
20 20
21/* 21/*
22 * Flags for iomap mappings: 22 * Flags for all iomap mappings:
23 */ 23 */
24#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ 24#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
25#define IOMAP_F_SHARED 0x02 /* block shared with another file */ 25
26#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */ 26/*
27 * Flags that only need to be reported for IOMAP_REPORT requests:
28 */
29#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
30#define IOMAP_F_SHARED 0x20 /* block shared with another file */
27 31
28/* 32/*
29 * Magic value for blkno: 33 * Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
42/* 46/*
43 * Flags for iomap_begin / iomap_end. No flag implies a read. 47 * Flags for iomap_begin / iomap_end. No flag implies a read.
44 */ 48 */
45#define IOMAP_WRITE (1 << 0) 49#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
46#define IOMAP_ZERO (1 << 1) 50#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
51#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
47 52
48struct iomap_ops { 53struct iomap_ops {
49 /* 54 /*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..ca1ad9ebbc92 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
123}; 123};
124 124
125#if defined(CONFIG_NET_L3_MASTER_DEV) 125#if defined(CONFIG_NET_L3_MASTER_DEV)
126static inline bool skb_l3mdev_slave(__u16 flags) 126static inline bool ipv6_l3mdev_skb(__u16 flags)
127{ 127{
128 return flags & IP6SKB_L3SLAVE; 128 return flags & IP6SKB_L3SLAVE;
129} 129}
130#else 130#else
131static inline bool skb_l3mdev_slave(__u16 flags) 131static inline bool ipv6_l3mdev_skb(__u16 flags)
132{ 132{
133 return false; 133 return false;
134} 134}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
139 139
140static inline int inet6_iif(const struct sk_buff *skb) 140static inline int inet6_iif(const struct sk_buff *skb)
141{ 141{
142 bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); 142 bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
143 143
144 return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; 144 return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
145} 145}
146 146
147/* can not be used in TCP layer after tcp_v6_fill_cb */
148static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
149{
150#if defined(CONFIG_NET_L3_MASTER_DEV)
151 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
152 ipv6_l3mdev_skb(IP6CB(skb)->flags))
153 return true;
154#endif
155 return false;
156}
157
147struct tcp6_request_sock { 158struct tcp6_request_sock {
148 struct tcp_request_sock tcp6rsk_tcp; 159 struct tcp_request_sock tcp6rsk_tcp;
149}; 160};
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 15ec117ec537..8f2e059e4d45 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -31,7 +31,6 @@
31 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when 31 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
32 * the last step cherry picks the 2nd arg, we get a zero. 32 * the last step cherry picks the 2nd arg, we get a zero.
33 */ 33 */
34#define config_enabled(cfg) ___is_defined(cfg)
35#define __is_defined(x) ___is_defined(x) 34#define __is_defined(x) ___is_defined(x)
36#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) 35#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
37#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) 36#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
41 * otherwise. For boolean options, this is equivalent to 40 * otherwise. For boolean options, this is equivalent to
42 * IS_ENABLED(CONFIG_FOO). 41 * IS_ENABLED(CONFIG_FOO).
43 */ 42 */
44#define IS_BUILTIN(option) config_enabled(option) 43#define IS_BUILTIN(option) __is_defined(option)
45 44
46/* 45/*
47 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 46 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
48 * otherwise. 47 * otherwise.
49 */ 48 */
50#define IS_MODULE(option) config_enabled(option##_MODULE) 49#define IS_MODULE(option) __is_defined(option##_MODULE)
51 50
52/* 51/*
53 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled 52 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..3be7abd6e722 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1399 u32 *lkey, u32 *rkey); 1399 u32 *lkey, u32 *rkey);
1400int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 1400int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1401int mlx4_SYNC_TPT(struct mlx4_dev *dev); 1401int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1402int mlx4_test_interrupts(struct mlx4_dev *dev); 1402int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
1403int mlx4_test_async(struct mlx4_dev *dev);
1403int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 1404int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
1404 const u32 offset[], u32 value[], 1405 const u32 offset[], u32 value[],
1405 size_t array_len, u8 port); 1406 size_t array_len, u8 port);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..ecc451d89ccd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,8 +418,12 @@ struct mlx5_core_health {
418 u32 prev; 418 u32 prev;
419 int miss_counter; 419 int miss_counter;
420 bool sick; 420 bool sick;
421 /* wq spinlock to synchronize draining */
422 spinlock_t wq_lock;
421 struct workqueue_struct *wq; 423 struct workqueue_struct *wq;
424 unsigned long flags;
422 struct work_struct work; 425 struct work_struct work;
426 struct delayed_work recover_work;
423}; 427};
424 428
425struct mlx5_cq_table { 429struct mlx5_cq_table {
@@ -626,10 +630,6 @@ struct mlx5_db {
626}; 630};
627 631
628enum { 632enum {
629 MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
630};
631
632enum {
633 MLX5_COMP_EQ_SIZE = 1024, 633 MLX5_COMP_EQ_SIZE = 1024,
634}; 634};
635 635
@@ -638,13 +638,6 @@ enum {
638 MLX5_PTYS_EN = 1 << 2, 638 MLX5_PTYS_EN = 1 << 2,
639}; 639};
640 640
641struct mlx5_db_pgdir {
642 struct list_head list;
643 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
644 __be32 *db_page;
645 dma_addr_t db_dma;
646};
647
648typedef void (*mlx5_cmd_cbk_t)(int status, void *context); 641typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
649 642
650struct mlx5_cmd_work_ent { 643struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
789int mlx5_health_init(struct mlx5_core_dev *dev); 782int mlx5_health_init(struct mlx5_core_dev *dev);
790void mlx5_start_health_poll(struct mlx5_core_dev *dev); 783void mlx5_start_health_poll(struct mlx5_core_dev *dev);
791void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 784void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
785void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
792int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, 786int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
793 struct mlx5_buf *buf, int node); 787 struct mlx5_buf *buf, int node);
794int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); 788int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3a191853faaa..a92c8d73aeaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1271,10 +1271,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
1271extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1271extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1272 void *buf, int len, unsigned int gup_flags); 1272 void *buf, int len, unsigned int gup_flags);
1273 1273
1274long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1275 unsigned long start, unsigned long nr_pages,
1276 unsigned int foll_flags, struct page **pages,
1277 struct vm_area_struct **vmas, int *nonblocking);
1278long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1274long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1279 unsigned long start, unsigned long nr_pages, 1275 unsigned long start, unsigned long nr_pages,
1280 unsigned int gup_flags, struct page **pages, 1276 unsigned int gup_flags, struct page **pages,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7f2ae99e5daf..0f088f3a2fed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -440,33 +440,7 @@ struct zone {
440 seqlock_t span_seqlock; 440 seqlock_t span_seqlock;
441#endif 441#endif
442 442
443 /* 443 int initialized;
444 * wait_table -- the array holding the hash table
445 * wait_table_hash_nr_entries -- the size of the hash table array
446 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
447 *
448 * The purpose of all these is to keep track of the people
449 * waiting for a page to become available and make them
450 * runnable again when possible. The trouble is that this
451 * consumes a lot of space, especially when so few things
452 * wait on pages at a given time. So instead of using
453 * per-page waitqueues, we use a waitqueue hash table.
454 *
455 * The bucket discipline is to sleep on the same queue when
456 * colliding and wake all in that wait queue when removing.
457 * When something wakes, it must check to be sure its page is
458 * truly available, a la thundering herd. The cost of a
459 * collision is great, but given the expected load of the
460 * table, they should be so rare as to be outweighed by the
461 * benefits from the saved space.
462 *
463 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
464 * primary users of these fields, and in mm/page_alloc.c
465 * free_area_init_core() performs the initialization of them.
466 */
467 wait_queue_head_t *wait_table;
468 unsigned long wait_table_hash_nr_entries;
469 unsigned long wait_table_bits;
470 444
471 /* Write-intensive fields used from the page allocator */ 445 /* Write-intensive fields used from the page allocator */
472 ZONE_PADDING(_pad1_) 446 ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
546 520
547static inline bool zone_is_initialized(struct zone *zone) 521static inline bool zone_is_initialized(struct zone *zone)
548{ 522{
549 return !!zone->wait_table; 523 return zone->initialized;
550} 524}
551 525
552static inline bool zone_is_empty(struct zone *zone) 526static inline bool zone_is_empty(struct zone *zone)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1184 int page); 1184 int page);
1185 1185
1186/* Reset and initialize a NAND device */ 1186/* Reset and initialize a NAND device */
1187int nand_reset(struct nand_chip *chip); 1187int nand_reset(struct nand_chip *chip, int chipnr);
1188 1188
1189/* Free resources held by the NAND device */ 1189/* Free resources held by the NAND device */
1190void nand_cleanup(struct nand_chip *chip); 1190void nand_cleanup(struct nand_chip *chip);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 136ae6bbe81e..91ee3643ccc8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
2169 /* Used to determine if flush_id can be ignored */ 2169 /* Used to determine if flush_id can be ignored */
2170 u8 is_atomic:1; 2170 u8 is_atomic:1;
2171 2171
2172 /* 5 bit hole */ 2172 /* Number of gro_receive callbacks this packet already went through */
2173 u8 recursion_counter:4;
2174
2175 /* 1 bit hole */
2173 2176
2174 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2177 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2175 __wsum csum; 2178 __wsum csum;
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
2180 2183
2181#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 2184#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2182 2185
2186#define GRO_RECURSION_LIMIT 15
2187static inline int gro_recursion_inc_test(struct sk_buff *skb)
2188{
2189 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2190}
2191
2192typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2193static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2194 struct sk_buff **head,
2195 struct sk_buff *skb)
2196{
2197 if (unlikely(gro_recursion_inc_test(skb))) {
2198 NAPI_GRO_CB(skb)->flush |= 1;
2199 return NULL;
2200 }
2201
2202 return cb(head, skb);
2203}
2204
2205typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2206 struct sk_buff *);
2207static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2208 struct sock *sk,
2209 struct sk_buff **head,
2210 struct sk_buff *skb)
2211{
2212 if (unlikely(gro_recursion_inc_test(skb))) {
2213 NAPI_GRO_CB(skb)->flush |= 1;
2214 return NULL;
2215 }
2216
2217 return cb(sk, head, skb);
2218}
2219
2183struct packet_type { 2220struct packet_type {
2184 __be16 type; /* This is really htons(ether_type). */ 2221 __be16 type; /* This is really htons(ether_type). */
2185 struct net_device *dev; /* NULL is wildcarded here */ 2222 struct net_device *dev; /* NULL is wildcarded here */
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3877 ldev = netdev_all_lower_get_next(dev, &(iter))) 3914 ldev = netdev_all_lower_get_next(dev, &(iter)))
3878 3915
3879#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ 3916#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3880 for (iter = (dev)->all_adj_list.lower.next, \ 3917 for (iter = &(dev)->all_adj_list.lower, \
3881 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ 3918 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3882 ldev; \ 3919 ldev; \
3883 ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) 3920 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 060d0ede88df..4741ecdb9817 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
1257extern void perf_event_enable(struct perf_event *event); 1257extern void perf_event_enable(struct perf_event *event);
1258extern void perf_event_disable(struct perf_event *event); 1258extern void perf_event_disable(struct perf_event *event);
1259extern void perf_event_disable_local(struct perf_event *event); 1259extern void perf_event_disable_local(struct perf_event *event);
1260extern void perf_event_disable_inatomic(struct perf_event *event);
1260extern void perf_event_task_tick(void); 1261extern void perf_event_task_tick(void);
1261#else /* !CONFIG_PERF_EVENTS: */ 1262#else /* !CONFIG_PERF_EVENTS: */
1262static inline void * 1263static inline void *
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) 146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
147 147
148#define QED_COALESCE_MAX 0xFF 148#define QED_COALESCE_MAX 0xFF
149#define QED_DEFAULT_RX_USECS 12
149 150
150/* forward */ 151/* forward */
151struct qed_dev; 152struct qed_dev;
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
68 68
69bool qede_roce_supported(struct qede_dev *dev); 69bool qede_roce_supported(struct qede_dev *dev);
70 70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 71#if IS_ENABLED(CONFIG_QED_RDMA)
72int qede_roce_dev_add(struct qede_dev *dev); 72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev); 73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev); 74void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 9adc7b21903d..f6673132431d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/bug.h> 20#include <linux/bug.h>
20#include <linux/lockdep.h> 21#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
116#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ 117#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
117({ \ 118({ \
118 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ 119 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
119 int ret; \ 120 int pollret; \
120 might_sleep_if(sleep_us); \ 121 might_sleep_if(sleep_us); \
121 for (;;) { \ 122 for (;;) { \
122 ret = regmap_read((map), (addr), &(val)); \ 123 pollret = regmap_read((map), (addr), &(val)); \
123 if (ret) \ 124 if (pollret) \
124 break; \ 125 break; \
125 if (cond) \ 126 if (cond) \
126 break; \ 127 break; \
127 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ 128 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
128 ret = regmap_read((map), (addr), &(val)); \ 129 pollret = regmap_read((map), (addr), &(val)); \
129 break; \ 130 break; \
130 } \ 131 } \
131 if (sleep_us) \ 132 if (sleep_us) \
132 usleep_range((sleep_us >> 2) + 1, sleep_us); \ 133 usleep_range((sleep_us >> 2) + 1, sleep_us); \
133 } \ 134 } \
134 ret ?: ((cond) ? 0 : -ETIMEDOUT); \ 135 pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
135}) 136})
136 137
137#ifdef CONFIG_REGMAP 138#ifdef CONFIG_REGMAP
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 601258f6e621..32810f279f8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
936 936
937/** 937/**
938 * skb_fclone_busy - check if fclone is busy 938 * skb_fclone_busy - check if fclone is busy
939 * @sk: socket
939 * @skb: buffer 940 * @skb: buffer
940 * 941 *
941 * Returns true if skb is a fast clone, and its clone is not freed. 942 * Returns true if skb is a fast clone, and its clone is not freed.