aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-11-16 04:50:25 -0500
committerIngo Molnar <mingo@kernel.org>2014-11-16 04:50:25 -0500
commite9ac5f0fa8549dffe2a15870217a9c2e7cd557ec (patch)
tree863e0e108f7b7ba2dffc7575bbdfc2d454fc2017 /include/linux
parent44dba3d5d6a10685fb15bd1954e62016334825e0 (diff)
parent6e998916dfe327e785e7c2447959b2c1a3ea4930 (diff)
Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying more changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/cma.h8
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler-gcc5.h1
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/kernel_stat.h5
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/memcontrol.h58
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mtd/spi-nor.h21
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/usb/usbnet.h4
15 files changed, 81 insertions, 90 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0207a78a8d82..aac0f9ea952a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1136,8 +1136,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1136/* 1136/*
1137 * tag stuff 1137 * tag stuff
1138 */ 1138 */
1139#define blk_rq_tagged(rq) \ 1139#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
1140 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1141extern int blk_queue_start_tag(struct request_queue *, struct request *); 1140extern int blk_queue_start_tag(struct request_queue *, struct request *);
1142extern struct request *blk_queue_find_tag(struct request_queue *, int); 1141extern struct request *blk_queue_find_tag(struct request_queue *, int);
1143extern void blk_queue_end_tag(struct request_queue *, struct request *); 1142extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1583,13 +1582,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1583 struct request *r1, 1582 struct request *r1,
1584 struct request *r2) 1583 struct request *r2)
1585{ 1584{
1586 return 0; 1585 return true;
1587} 1586}
1588static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1587static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1589 struct request *r, 1588 struct request *r,
1590 struct bio *b) 1589 struct bio *b)
1591{ 1590{
1592 return 0; 1591 return true;
1593} 1592}
1594static inline bool blk_integrity_is_initialized(struct gendisk *g) 1593static inline bool blk_integrity_is_initialized(struct gendisk *g)
1595{ 1594{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0430ed05d3b9..a93438beb33c 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -18,12 +18,12 @@ struct cma;
18extern phys_addr_t cma_get_base(struct cma *cma); 18extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma); 19extern unsigned long cma_get_size(struct cma *cma);
20 20
21extern int __init cma_declare_contiguous(phys_addr_t size, 21extern int __init cma_declare_contiguous(phys_addr_t base,
22 phys_addr_t base, phys_addr_t limit, 22 phys_addr_t size, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit, 23 phys_addr_t alignment, unsigned int order_per_bit,
24 bool fixed, struct cma **res_cma); 24 bool fixed, struct cma **res_cma);
25extern int cma_init_reserved_mem(phys_addr_t size, 25extern int cma_init_reserved_mem(phys_addr_t base,
26 phys_addr_t base, int order_per_bit, 26 phys_addr_t size, int order_per_bit,
27 struct cma **res_cma); 27 struct cma **res_cma);
28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); 28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
29extern bool cma_release(struct cma *cma, struct page *pages, int count); 29extern bool cma_release(struct cma *cma, struct page *pages, int count);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 2507fd2a1eb4..d1a558239b1a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -71,7 +71,6 @@
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 * 72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 * 74 *
76 * (asm goto is automatically volatile - the naming reflects this.) 75 * (asm goto is automatically volatile - the naming reflects this.)
77 */ 76 */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index cdd1cc202d51..c8c565952548 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -53,7 +53,6 @@
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 * 54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 * Fixed in GCC 4.8.2 and later versions.
57 * 56 *
58 * (asm goto is automatically volatile - the naming reflects this.) 57 * (asm goto is automatically volatile - the naming reflects this.)
59 */ 58 */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4e41a4a331bb..9ab779e8a63c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,11 +639,13 @@ static inline int inode_unhashed(struct inode *inode)
639 * 2: child/target 639 * 2: child/target
640 * 3: xattr 640 * 3: xattr
641 * 4: second non-directory 641 * 4: second non-directory
642 * The last is for certain operations (such as rename) which lock two 642 * 5: second parent (when locking independent directories in rename)
643 *
644 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
643 * non-directories at once. 645 * non-directories at once.
644 * 646 *
645 * The locking order between these classes is 647 * The locking order between these classes is
646 * parent -> child -> normal -> xattr -> second non-directory 648 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
647 */ 649 */
648enum inode_i_mutex_lock_class 650enum inode_i_mutex_lock_class
649{ 651{
@@ -651,7 +653,8 @@ enum inode_i_mutex_lock_class
651 I_MUTEX_PARENT, 653 I_MUTEX_PARENT,
652 I_MUTEX_CHILD, 654 I_MUTEX_CHILD,
653 I_MUTEX_XATTR, 655 I_MUTEX_XATTR,
654 I_MUTEX_NONDIR2 656 I_MUTEX_NONDIR2,
657 I_MUTEX_PARENT2,
655}; 658};
656 659
657void lock_two_nondirectories(struct inode *, struct inode*); 660void lock_two_nondirectories(struct inode *, struct inode*);
@@ -2466,6 +2469,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
2466extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2469extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2467 2470
2468/* fs/block_dev.c */ 2471/* fs/block_dev.c */
2472extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2469extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); 2473extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2470extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 2474extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2471 int datasync); 2475 int datasync);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 8422b4ed6882..b9376cd5a187 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
77 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
78} 78}
79 79
80/*
81 * Lock/unlock the current runqueue - to extract task statistics:
82 */
83extern unsigned long long task_delta_exec(struct task_struct *);
84
85extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 80extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
86extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 81extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
87extern void account_steal_time(cputime_t); 82extern void account_steal_time(cputime_t);
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE 6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm); 7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm); 8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); 9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
10 unsigned long vm_flags);
10 11
11#define khugepaged_enabled() \ 12#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \ 13 (transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
35 __khugepaged_exit(mm); 36 __khugepaged_exit(mm);
36} 37}
37 38
38static inline int khugepaged_enter(struct vm_area_struct *vma) 39static inline int khugepaged_enter(struct vm_area_struct *vma,
40 unsigned long vm_flags)
39{ 41{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if ((khugepaged_always() || 43 if ((khugepaged_always() ||
42 (khugepaged_req_madv() && 44 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
43 vma->vm_flags & VM_HUGEPAGE)) && 45 !(vm_flags & VM_NOHUGEPAGE))
44 !(vma->vm_flags & VM_NOHUGEPAGE))
45 if (__khugepaged_enter(vma->vm_mm)) 46 if (__khugepaged_enter(vma->vm_mm))
46 return -ENOMEM; 47 return -ENOMEM;
47 return 0; 48 return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
54static inline void khugepaged_exit(struct mm_struct *mm) 55static inline void khugepaged_exit(struct mm_struct *mm)
55{ 56{
56} 57}
57static inline int khugepaged_enter(struct vm_area_struct *vma) 58static inline int khugepaged_enter(struct vm_area_struct *vma,
59 unsigned long vm_flags)
58{ 60{
59 return 0; 61 return 0;
60} 62}
61static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 63static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
64 unsigned long vm_flags)
62{ 65{
63 return 0; 66 return 0;
64} 67}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 19df5d857411..6b75640ef5ab 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
139 return false; 139 return false;
140} 140}
141 141
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 142struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
143 unsigned long *flags); 143 unsigned long *flags);
144 144void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
145extern atomic_t memcg_moving; 145 unsigned long flags);
146 146void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
147static inline void mem_cgroup_begin_update_page_stat(struct page *page, 147 enum mem_cgroup_stat_index idx, int val);
148 bool *locked, unsigned long *flags) 148
149{ 149static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157
158void __mem_cgroup_end_update_page_stat(struct page *page,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx) 150 enum mem_cgroup_stat_index idx)
176{ 151{
177 mem_cgroup_update_page_stat(page, idx, 1); 152 mem_cgroup_update_page_stat(memcg, idx, 1);
178} 153}
179 154
180static inline void mem_cgroup_dec_page_stat(struct page *page, 155static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
181 enum mem_cgroup_stat_index idx) 156 enum mem_cgroup_stat_index idx)
182{ 157{
183 mem_cgroup_update_page_stat(page, idx, -1); 158 mem_cgroup_update_page_stat(memcg, idx, -1);
184} 159}
185 160
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 161unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{ 290{
316} 291}
317 292
318static inline void mem_cgroup_begin_update_page_stat(struct page *page, 293static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
319 bool *locked, unsigned long *flags) 294 bool *locked, unsigned long *flags)
320{ 295{
296 return NULL;
321} 297}
322 298
323static inline void mem_cgroup_end_update_page_stat(struct page *page, 299static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
324 bool *locked, unsigned long *flags) 300 bool locked, unsigned long flags)
325{ 301{
326} 302}
327 303
@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
343 return false; 319 return false;
344} 320}
345 321
346static inline void mem_cgroup_inc_page_stat(struct page *page, 322static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
347 enum mem_cgroup_stat_index idx) 323 enum mem_cgroup_stat_index idx)
348{ 324{
349} 325}
350 326
351static inline void mem_cgroup_dec_page_stat(struct page *page, 327static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
352 enum mem_cgroup_stat_index idx) 328 enum mem_cgroup_stat_index idx)
353{ 329{
354} 330}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27eb1bfbe704..b46461116cd2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1235,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
1235int redirty_page_for_writepage(struct writeback_control *wbc, 1235int redirty_page_for_writepage(struct writeback_control *wbc,
1236 struct page *page); 1236 struct page *page);
1237void account_page_dirtied(struct page *page, struct address_space *mapping); 1237void account_page_dirtied(struct page *page, struct address_space *mapping);
1238void account_page_writeback(struct page *page);
1239int set_page_dirty(struct page *page); 1238int set_page_dirty(struct page *page);
1240int set_page_dirty_lock(struct page *page); 1239int set_page_dirty_lock(struct page *page);
1241int clear_page_dirty_for_io(struct page *page); 1240int clear_page_dirty_for_io(struct page *page);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9e6294f32ba8..046a0a2e4c4e 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -187,32 +187,17 @@ struct spi_nor {
187/** 187/**
188 * spi_nor_scan() - scan the SPI NOR 188 * spi_nor_scan() - scan the SPI NOR
189 * @nor: the spi_nor structure 189 * @nor: the spi_nor structure
190 * @id: the spi_device_id provided by the driver 190 * @name: the chip type name
191 * @mode: the read mode supported by the driver 191 * @mode: the read mode supported by the driver
192 * 192 *
193 * The drivers can use this fuction to scan the SPI NOR. 193 * The drivers can use this fuction to scan the SPI NOR.
194 * In the scanning, it will try to get all the necessary information to 194 * In the scanning, it will try to get all the necessary information to
195 * fill the mtd_info{} and the spi_nor{}. 195 * fill the mtd_info{} and the spi_nor{}.
196 * 196 *
197 * The board may assigns a spi_device_id with @id which be used to compared with 197 * The chip type name can be provided through the @name parameter.
198 * the spi_device_id detected by the scanning.
199 * 198 *
200 * Return: 0 for success, others for failure. 199 * Return: 0 for success, others for failure.
201 */ 200 */
202int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, 201int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
203 enum read_mode mode);
204extern const struct spi_device_id spi_nor_ids[];
205
206/**
207 * spi_nor_match_id() - find the spi_device_id by the name
208 * @name: the name of the spi_device_id
209 *
210 * The drivers use this function to find the spi_device_id
211 * specified by the @name.
212 *
213 * Return: returns the right spi_device_id pointer on success,
214 * and returns NULL on failure.
215 */
216const struct spi_device_id *spi_nor_match_id(char *name);
217 202
218#endif 203#endif
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
16}; 16};
17 17
18struct reserved_mem_ops { 18struct reserved_mem_ops {
19 void (*device_init)(struct reserved_mem *rmem, 19 int (*device_init)(struct reserved_mem *rmem,
20 struct device *dev); 20 struct device *dev);
21 void (*device_release)(struct reserved_mem *rmem, 21 void (*device_release)(struct reserved_mem *rmem,
22 struct device *dev); 22 struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) 28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
29 29
30#ifdef CONFIG_OF_RESERVED_MEM 30#ifdef CONFIG_OF_RESERVED_MEM
31void of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34void fdt_init_reserved_mem(void); 34void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 35void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 36 phys_addr_t base, phys_addr_t size);
37#else 37#else
38static inline void of_reserved_mem_device_init(struct device *dev) { } 38static inline int of_reserved_mem_device_init(struct device *dev)
39{
40 return -ENOSYS;
41}
39static inline void of_reserved_mem_device_release(struct device *pdev) { } 42static inline void of_reserved_mem_device_release(struct device *pdev) { }
40 43
41static inline void fdt_init_reserved_mem(void) { } 44static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a4a819ffb2d1..53ff1a752d7e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -617,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void)
617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
618 618
619/** 619/**
620 * lockless_dereference() - safely load a pointer for later dereference
621 * @p: The pointer to load
622 *
623 * Similar to rcu_dereference(), but for situations where the pointed-to
624 * object's lifetime is managed by something other than RCU. That
625 * "something other" might be reference counting or simple immortality.
626 */
627#define lockless_dereference(p) \
628({ \
629 typeof(p) _________p1 = ACCESS_ONCE(p); \
630 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
631 (_________p1); \
632})
633
634/**
620 * rcu_assign_pointer() - assign to RCU-protected pointer 635 * rcu_assign_pointer() - assign to RCU-protected pointer
621 * @p: pointer to assign to 636 * @p: pointer to assign to
622 * @v: value to assign (publish) 637 * @v: value to assign (publish)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index d347c805f923..f540b1496e2f 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
35#ifndef __LINUX_REGULATOR_CONSUMER_H_ 35#ifndef __LINUX_REGULATOR_CONSUMER_H_
36#define __LINUX_REGULATOR_CONSUMER_H_ 36#define __LINUX_REGULATOR_CONSUMER_H_
37 37
38#include <linux/err.h>
39
38struct device; 40struct device;
39struct notifier_block; 41struct notifier_block;
40struct regmap; 42struct regmap;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a59d9343c25b..6c8b6f604e76 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -557,7 +557,9 @@ struct sk_buff {
557 /* fields enclosed in headers_start/headers_end are copied 557 /* fields enclosed in headers_start/headers_end are copied
558 * using a single memcpy() in __copy_skb_header() 558 * using a single memcpy() in __copy_skb_header()
559 */ 559 */
560 /* private: */
560 __u32 headers_start[0]; 561 __u32 headers_start[0];
562 /* public: */
561 563
562/* if you move pkt_type around you also must adapt those constants */ 564/* if you move pkt_type around you also must adapt those constants */
563#ifdef __BIG_ENDIAN_BITFIELD 565#ifdef __BIG_ENDIAN_BITFIELD
@@ -642,7 +644,9 @@ struct sk_buff {
642 __u16 network_header; 644 __u16 network_header;
643 __u16 mac_header; 645 __u16 mac_header;
644 646
647 /* private: */
645 __u32 headers_end[0]; 648 __u32 headers_end[0];
649 /* public: */
646 650
647 /* These elements must be at the end, see alloc_skb() for details. */ 651 /* These elements must be at the end, see alloc_skb() for details. */
648 sk_buff_data_t tail; 652 sk_buff_data_t tail;
@@ -795,15 +799,19 @@ struct sk_buff_fclones {
795 * @skb: buffer 799 * @skb: buffer
796 * 800 *
797 * Returns true is skb is a fast clone, and its clone is not freed. 801 * Returns true is skb is a fast clone, and its clone is not freed.
802 * Some drivers call skb_orphan() in their ndo_start_xmit(),
803 * so we also check that this didnt happen.
798 */ 804 */
799static inline bool skb_fclone_busy(const struct sk_buff *skb) 805static inline bool skb_fclone_busy(const struct sock *sk,
806 const struct sk_buff *skb)
800{ 807{
801 const struct sk_buff_fclones *fclones; 808 const struct sk_buff_fclones *fclones;
802 809
803 fclones = container_of(skb, struct sk_buff_fclones, skb1); 810 fclones = container_of(skb, struct sk_buff_fclones, skb1);
804 811
805 return skb->fclone == SKB_FCLONE_ORIG && 812 return skb->fclone == SKB_FCLONE_ORIG &&
806 fclones->skb2.fclone == SKB_FCLONE_CLONE; 813 fclones->skb2.fclone == SKB_FCLONE_CLONE &&
814 fclones->skb2.sk == sk;
807} 815}
808 816
809static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 817static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 26088feb6608..d9a4905e01d0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -78,6 +78,7 @@ struct usbnet {
78# define EVENT_NO_RUNTIME_PM 9 78# define EVENT_NO_RUNTIME_PM 9
79# define EVENT_RX_KILL 10 79# define EVENT_RX_KILL 10
80# define EVENT_LINK_CHANGE 11 80# define EVENT_LINK_CHANGE 11
81# define EVENT_SET_RX_MODE 12
81}; 82};
82 83
83static inline struct usb_driver *driver_of(struct usb_interface *intf) 84static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
159 /* called by minidriver when receiving indication */ 160 /* called by minidriver when receiving indication */
160 void (*indication)(struct usbnet *dev, void *ind, int indlen); 161 void (*indication)(struct usbnet *dev, void *ind, int indlen);
161 162
163 /* rx mode change (device changes address list filtering) */
164 void (*set_rx_mode)(struct usbnet *dev);
165
162 /* for new devices, use the descriptor-reading code instead */ 166 /* for new devices, use the descriptor-reading code instead */
163 int in; /* rx endpoint */ 167 int in; /* rx endpoint */
164 int out; /* tx endpoint */ 168 int out; /* tx endpoint */