diff options
Diffstat (limited to 'include/linux')
92 files changed, 2925 insertions, 670 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 619b5657af77..c94e71781b79 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
| @@ -185,6 +185,7 @@ header-y += if_pppol2tp.h | |||
| 185 | header-y += if_pppox.h | 185 | header-y += if_pppox.h |
| 186 | header-y += if_slip.h | 186 | header-y += if_slip.h |
| 187 | header-y += if_strip.h | 187 | header-y += if_strip.h |
| 188 | header-y += if_team.h | ||
| 188 | header-y += if_tr.h | 189 | header-y += if_tr.h |
| 189 | header-y += if_tun.h | 190 | header-y += if_tun.h |
| 190 | header-y += if_tunnel.h | 191 | header-y += if_tunnel.h |
| @@ -194,7 +195,9 @@ header-y += igmp.h | |||
| 194 | header-y += in.h | 195 | header-y += in.h |
| 195 | header-y += in6.h | 196 | header-y += in6.h |
| 196 | header-y += in_route.h | 197 | header-y += in_route.h |
| 198 | header-y += sock_diag.h | ||
| 197 | header-y += inet_diag.h | 199 | header-y += inet_diag.h |
| 200 | header-y += unix_diag.h | ||
| 198 | header-y += inotify.h | 201 | header-y += inotify.h |
| 199 | header-y += input.h | 202 | header-y += input.h |
| 200 | header-y += ioctl.h | 203 | header-y += ioctl.h |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index fcbbe71a3cc1..724c69c40bb8 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| 18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| 19 | #include <linux/mod_devicetable.h> | ||
| 19 | #include <linux/err.h> | 20 | #include <linux/err.h> |
| 20 | #include <linux/resource.h> | 21 | #include <linux/resource.h> |
| 21 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
| @@ -35,12 +36,6 @@ struct amba_device { | |||
| 35 | unsigned int irq[AMBA_NR_IRQS]; | 36 | unsigned int irq[AMBA_NR_IRQS]; |
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | struct amba_id { | ||
| 39 | unsigned int id; | ||
| 40 | unsigned int mask; | ||
| 41 | void *data; | ||
| 42 | }; | ||
| 43 | |||
| 44 | struct amba_driver { | 39 | struct amba_driver { |
| 45 | struct device_driver drv; | 40 | struct device_driver drv; |
| 46 | int (*probe)(struct amba_device *, const struct amba_id *); | 41 | int (*probe)(struct amba_device *, const struct amba_id *); |
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 49a83ca900ba..f4ff882cb2da 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h | |||
| @@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk); | |||
| 445 | 445 | ||
| 446 | void atm_dev_release_vccs(struct atm_dev *dev); | 446 | void atm_dev_release_vccs(struct atm_dev *dev); |
| 447 | 447 | ||
| 448 | /* | ||
| 449 | * This is approximately the algorithm used by alloc_skb. | ||
| 450 | * | ||
| 451 | */ | ||
| 452 | |||
| 453 | static inline int atm_guess_pdu2truesize(int size) | ||
| 454 | { | ||
| 455 | return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info); | ||
| 456 | } | ||
| 457 | |||
| 458 | 448 | ||
| 459 | static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) | 449 | static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) |
| 460 | { | 450 | { |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 4d4b59de9467..f4b8346b1a33 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
| @@ -205,61 +205,82 @@ struct bcma_bus { | |||
| 205 | struct ssb_sprom sprom; | 205 | struct ssb_sprom sprom; |
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | extern inline u32 bcma_read8(struct bcma_device *core, u16 offset) | 208 | static inline u32 bcma_read8(struct bcma_device *core, u16 offset) |
| 209 | { | 209 | { |
| 210 | return core->bus->ops->read8(core, offset); | 210 | return core->bus->ops->read8(core, offset); |
| 211 | } | 211 | } |
| 212 | extern inline u32 bcma_read16(struct bcma_device *core, u16 offset) | 212 | static inline u32 bcma_read16(struct bcma_device *core, u16 offset) |
| 213 | { | 213 | { |
| 214 | return core->bus->ops->read16(core, offset); | 214 | return core->bus->ops->read16(core, offset); |
| 215 | } | 215 | } |
| 216 | extern inline u32 bcma_read32(struct bcma_device *core, u16 offset) | 216 | static inline u32 bcma_read32(struct bcma_device *core, u16 offset) |
| 217 | { | 217 | { |
| 218 | return core->bus->ops->read32(core, offset); | 218 | return core->bus->ops->read32(core, offset); |
| 219 | } | 219 | } |
| 220 | extern inline | 220 | static inline |
| 221 | void bcma_write8(struct bcma_device *core, u16 offset, u32 value) | 221 | void bcma_write8(struct bcma_device *core, u16 offset, u32 value) |
| 222 | { | 222 | { |
| 223 | core->bus->ops->write8(core, offset, value); | 223 | core->bus->ops->write8(core, offset, value); |
| 224 | } | 224 | } |
| 225 | extern inline | 225 | static inline |
| 226 | void bcma_write16(struct bcma_device *core, u16 offset, u32 value) | 226 | void bcma_write16(struct bcma_device *core, u16 offset, u32 value) |
| 227 | { | 227 | { |
| 228 | core->bus->ops->write16(core, offset, value); | 228 | core->bus->ops->write16(core, offset, value); |
| 229 | } | 229 | } |
| 230 | extern inline | 230 | static inline |
| 231 | void bcma_write32(struct bcma_device *core, u16 offset, u32 value) | 231 | void bcma_write32(struct bcma_device *core, u16 offset, u32 value) |
| 232 | { | 232 | { |
| 233 | core->bus->ops->write32(core, offset, value); | 233 | core->bus->ops->write32(core, offset, value); |
| 234 | } | 234 | } |
| 235 | #ifdef CONFIG_BCMA_BLOCKIO | 235 | #ifdef CONFIG_BCMA_BLOCKIO |
| 236 | extern inline void bcma_block_read(struct bcma_device *core, void *buffer, | 236 | static inline void bcma_block_read(struct bcma_device *core, void *buffer, |
| 237 | size_t count, u16 offset, u8 reg_width) | 237 | size_t count, u16 offset, u8 reg_width) |
| 238 | { | 238 | { |
| 239 | core->bus->ops->block_read(core, buffer, count, offset, reg_width); | 239 | core->bus->ops->block_read(core, buffer, count, offset, reg_width); |
| 240 | } | 240 | } |
| 241 | extern inline void bcma_block_write(struct bcma_device *core, const void *buffer, | 241 | static inline void bcma_block_write(struct bcma_device *core, |
| 242 | size_t count, u16 offset, u8 reg_width) | 242 | const void *buffer, size_t count, |
| 243 | u16 offset, u8 reg_width) | ||
| 243 | { | 244 | { |
| 244 | core->bus->ops->block_write(core, buffer, count, offset, reg_width); | 245 | core->bus->ops->block_write(core, buffer, count, offset, reg_width); |
| 245 | } | 246 | } |
| 246 | #endif | 247 | #endif |
| 247 | extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset) | 248 | static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) |
| 248 | { | 249 | { |
| 249 | return core->bus->ops->aread32(core, offset); | 250 | return core->bus->ops->aread32(core, offset); |
| 250 | } | 251 | } |
| 251 | extern inline | 252 | static inline |
| 252 | void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) | 253 | void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) |
| 253 | { | 254 | { |
| 254 | core->bus->ops->awrite32(core, offset, value); | 255 | core->bus->ops->awrite32(core, offset, value); |
| 255 | } | 256 | } |
| 256 | 257 | ||
| 257 | #define bcma_mask32(cc, offset, mask) \ | 258 | static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) |
| 258 | bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask)) | 259 | { |
| 259 | #define bcma_set32(cc, offset, set) \ | 260 | bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); |
| 260 | bcma_write32(cc, offset, bcma_read32(cc, offset) | (set)) | 261 | } |
| 261 | #define bcma_maskset32(cc, offset, mask, set) \ | 262 | static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) |
| 262 | bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set)) | 263 | { |
| 264 | bcma_write32(cc, offset, bcma_read32(cc, offset) | set); | ||
| 265 | } | ||
| 266 | static inline void bcma_maskset32(struct bcma_device *cc, | ||
| 267 | u16 offset, u32 mask, u32 set) | ||
| 268 | { | ||
| 269 | bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); | ||
| 270 | } | ||
| 271 | static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) | ||
| 272 | { | ||
| 273 | bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); | ||
| 274 | } | ||
| 275 | static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) | ||
| 276 | { | ||
| 277 | bcma_write16(cc, offset, bcma_read16(cc, offset) | set); | ||
| 278 | } | ||
| 279 | static inline void bcma_maskset16(struct bcma_device *cc, | ||
| 280 | u16 offset, u16 mask, u16 set) | ||
| 281 | { | ||
| 282 | bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); | ||
| 283 | } | ||
| 263 | 284 | ||
| 264 | extern bool bcma_core_is_enabled(struct bcma_device *core); | 285 | extern bool bcma_core_is_enabled(struct bcma_device *core); |
| 265 | extern void bcma_core_disable(struct bcma_device *core, u32 flags); | 286 | extern void bcma_core_disable(struct bcma_device *core, u32 flags); |
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 1526d965ed06..a33086a7530b 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h | |||
| @@ -203,6 +203,7 @@ | |||
| 203 | #define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ | 203 | #define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ |
| 204 | #define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ | 204 | #define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ |
| 205 | #define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 | 205 | #define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 |
| 206 | #define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 | ||
| 206 | #define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ | 207 | #define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ |
| 207 | #define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ | 208 | #define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ |
| 208 | #define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ | 209 | #define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3ef66a2a083..3c1063acb2ab 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w); | |||
| 22 | #include <asm/bitops.h> | 22 | #include <asm/bitops.h> |
| 23 | 23 | ||
| 24 | #define for_each_set_bit(bit, addr, size) \ | 24 | #define for_each_set_bit(bit, addr, size) \ |
| 25 | for ((bit) = find_first_bit((addr), (size)); \ | 25 | for ((bit) = find_first_bit((addr), (size)); \ |
| 26 | (bit) < (size); \ | 26 | (bit) < (size); \ |
| 27 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | ||
| 28 | |||
| 29 | /* same as for_each_set_bit() but use bit as value to start with */ | ||
| 30 | #define for_each_set_bit_cont(bit, addr, size) \ | ||
| 31 | for ((bit) = find_next_bit((addr), (size), (bit)); \ | ||
| 32 | (bit) < (size); \ | ||
| 27 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | 33 | (bit) = find_next_bit((addr), (size), (bit) + 1)) |
| 28 | 34 | ||
| 29 | static __inline__ int get_bitmask_order(unsigned int count) | 35 | static __inline__ int get_bitmask_order(unsigned int count) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a6d3b5bc7b..94acd8172b5b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *); | |||
| 805 | */ | 805 | */ |
| 806 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 806 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
| 807 | spinlock_t *lock, int node_id); | 807 | spinlock_t *lock, int node_id); |
| 808 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, | ||
| 809 | request_fn_proc *, | ||
| 810 | spinlock_t *, int node_id); | ||
| 811 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 808 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
| 812 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 809 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, |
| 813 | request_fn_proc *, spinlock_t *); | 810 | request_fn_proc *, spinlock_t *); |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index ab344a521105..66d3e954eb6c 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
| @@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, | |||
| 44 | unsigned long endpfn); | 44 | unsigned long endpfn); |
| 45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); | 45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
| 46 | 46 | ||
| 47 | unsigned long free_all_memory_core_early(int nodeid); | 47 | extern unsigned long free_low_memory_core_early(int nodeid); |
| 48 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); | 48 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); |
| 49 | extern unsigned long free_all_bootmem(void); | 49 | extern unsigned long free_all_bootmem(void); |
| 50 | 50 | ||
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h new file mode 100644 index 000000000000..7702641f87ee --- /dev/null +++ b/include/linux/can/platform/cc770.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | #ifndef _CAN_PLATFORM_CC770_H_ | ||
| 2 | #define _CAN_PLATFORM_CC770_H_ | ||
| 3 | |||
| 4 | /* CPU Interface Register (0x02) */ | ||
| 5 | #define CPUIF_CEN 0x01 /* Clock Out Enable */ | ||
| 6 | #define CPUIF_MUX 0x04 /* Multiplex */ | ||
| 7 | #define CPUIF_SLP 0x08 /* Sleep */ | ||
| 8 | #define CPUIF_PWD 0x10 /* Power Down Mode */ | ||
| 9 | #define CPUIF_DMC 0x20 /* Divide Memory Clock */ | ||
| 10 | #define CPUIF_DSC 0x40 /* Divide System Clock */ | ||
| 11 | #define CPUIF_RST 0x80 /* Hardware Reset Status */ | ||
| 12 | |||
| 13 | /* Clock Out Register (0x1f) */ | ||
| 14 | #define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ | ||
| 15 | #define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ | ||
| 16 | #define CLKOUT_SL_SHIFT 4 | ||
| 17 | |||
| 18 | /* Bus Configuration Register (0x2f) */ | ||
| 19 | #define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ | ||
| 20 | #define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ | ||
| 21 | #define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ | ||
| 22 | #define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ | ||
| 23 | #define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ | ||
| 24 | |||
| 25 | struct cc770_platform_data { | ||
| 26 | u32 osc_freq; /* CAN bus oscillator frequency in Hz */ | ||
| 27 | |||
| 28 | u8 cir; /* CPU Interface Register */ | ||
| 29 | u8 cor; /* Clock Out Register */ | ||
| 30 | u8 bcr; /* Bus Configuration Register */ | ||
| 31 | }; | ||
| 32 | |||
| 33 | #endif /* !_CAN_PLATFORM_CC770_H_ */ | ||
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ac663c18776c..0bd390ce98b2 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
| @@ -59,8 +59,16 @@ SUBSYS(net_cls) | |||
| 59 | SUBSYS(blkio) | 59 | SUBSYS(blkio) |
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | /* */ | ||
| 63 | |||
| 62 | #ifdef CONFIG_CGROUP_PERF | 64 | #ifdef CONFIG_CGROUP_PERF |
| 63 | SUBSYS(perf) | 65 | SUBSYS(perf) |
| 64 | #endif | 66 | #endif |
| 65 | 67 | ||
| 66 | /* */ | 68 | /* */ |
| 69 | |||
| 70 | #ifdef CONFIG_NETPRIO_CGROUP | ||
| 71 | SUBSYS(net_prio) | ||
| 72 | #endif | ||
| 73 | |||
| 74 | /* */ | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index c86c940d1de3..081147da0564 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -71,7 +71,7 @@ struct timecounter { | |||
| 71 | 71 | ||
| 72 | /** | 72 | /** |
| 73 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds | 73 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds |
| 74 | * @tc: Pointer to cycle counter. | 74 | * @cc: Pointer to cycle counter. |
| 75 | * @cycles: Cycles | 75 | * @cycles: Cycles |
| 76 | * | 76 | * |
| 77 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code | 77 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code |
| @@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc); | |||
| 114 | * time base as values returned by | 114 | * time base as values returned by |
| 115 | * timecounter_read() | 115 | * timecounter_read() |
| 116 | * @tc: Pointer to time counter. | 116 | * @tc: Pointer to time counter. |
| 117 | * @cycle: a value returned by tc->cc->read() | 117 | * @cycle_tstamp: a value returned by tc->cc->read() |
| 118 | * | 118 | * |
| 119 | * Cycle counts that are converted correctly as long as they | 119 | * Cycle counts that are converted correctly as long as they |
| 120 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], | 120 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], |
| @@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
| 156 | * @mult: cycle to nanosecond multiplier | 156 | * @mult: cycle to nanosecond multiplier |
| 157 | * @shift: cycle to nanosecond divisor (power of two) | 157 | * @shift: cycle to nanosecond divisor (power of two) |
| 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) | 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
| 159 | * @maxadj maximum adjustment value to mult (~11%) | 159 | * @maxadj: maximum adjustment value to mult (~11%) |
| 160 | * @flags: flags describing special properties | 160 | * @flags: flags describing special properties |
| 161 | * @archdata: arch-specific data | 161 | * @archdata: arch-specific data |
| 162 | * @suspend: suspend function for the clocksource, if necessary | 162 | * @suspend: suspend function for the clocksource, if necessary |
| 163 | * @resume: resume function for the clocksource, if necessary | 163 | * @resume: resume function for the clocksource, if necessary |
| 164 | * @cycle_last: most recent cycle counter value seen by ::read() | ||
| 164 | */ | 165 | */ |
| 165 | struct clocksource { | 166 | struct clocksource { |
| 166 | /* | 167 | /* |
| @@ -187,6 +188,7 @@ struct clocksource { | |||
| 187 | void (*suspend)(struct clocksource *cs); | 188 | void (*suspend)(struct clocksource *cs); |
| 188 | void (*resume)(struct clocksource *cs); | 189 | void (*resume)(struct clocksource *cs); |
| 189 | 190 | ||
| 191 | /* private: */ | ||
| 190 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 191 | /* Watchdog related data, used by the framework */ | 193 | /* Watchdog related data, used by the framework */ |
| 192 | struct list_head wd_list; | 194 | struct list_head wd_list; |
| @@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |||
| 261 | 263 | ||
| 262 | /** | 264 | /** |
| 263 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds | 265 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
| 266 | * @cycles: cycles | ||
| 267 | * @mult: cycle to nanosecond multiplier | ||
| 268 | * @shift: cycle to nanosecond divisor (power of two) | ||
| 264 | * | 269 | * |
| 265 | * Converts cycles to nanoseconds, using the given mult and shift. | 270 | * Converts cycles to nanoseconds, using the given mult and shift. |
| 266 | * | 271 | * |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6cb60fd2ea84..305c263021e7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -27,6 +27,7 @@ struct cpu { | |||
| 27 | 27 | ||
| 28 | extern int register_cpu(struct cpu *cpu, int num); | 28 | extern int register_cpu(struct cpu *cpu, int num); |
| 29 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); | 29 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); |
| 30 | extern bool cpu_is_hotpluggable(unsigned cpu); | ||
| 30 | 31 | ||
| 31 | extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); | 32 | extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); |
| 32 | extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); | 33 | extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 7408af843b8a..23f81de51829 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -130,7 +130,6 @@ struct cpuidle_driver { | |||
| 130 | #ifdef CONFIG_CPU_IDLE | 130 | #ifdef CONFIG_CPU_IDLE |
| 131 | extern void disable_cpuidle(void); | 131 | extern void disable_cpuidle(void); |
| 132 | extern int cpuidle_idle_call(void); | 132 | extern int cpuidle_idle_call(void); |
| 133 | |||
| 134 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); | 133 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); |
| 135 | struct cpuidle_driver *cpuidle_get_driver(void); | 134 | struct cpuidle_driver *cpuidle_get_driver(void); |
| 136 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); | 135 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); |
| @@ -145,7 +144,6 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); | |||
| 145 | #else | 144 | #else |
| 146 | static inline void disable_cpuidle(void) { } | 145 | static inline void disable_cpuidle(void) { } |
| 147 | static inline int cpuidle_idle_call(void) { return -ENODEV; } | 146 | static inline int cpuidle_idle_call(void) { return -ENODEV; } |
| 148 | |||
| 149 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) | 147 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) |
| 150 | {return -ENODEV; } | 148 | {return -ENODEV; } |
| 151 | static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } | 149 | static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 65970b811e22..0e5f5785d9f2 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
| @@ -46,6 +46,8 @@ struct debug_obj { | |||
| 46 | * fails | 46 | * fails |
| 47 | * @fixup_free: fixup function, which is called when the free check | 47 | * @fixup_free: fixup function, which is called when the free check |
| 48 | * fails | 48 | * fails |
| 49 | * @fixup_assert_init: fixup function, which is called when the assert_init | ||
| 50 | * check fails | ||
| 49 | */ | 51 | */ |
| 50 | struct debug_obj_descr { | 52 | struct debug_obj_descr { |
| 51 | const char *name; | 53 | const char *name; |
| @@ -54,6 +56,7 @@ struct debug_obj_descr { | |||
| 54 | int (*fixup_activate) (void *addr, enum debug_obj_state state); | 56 | int (*fixup_activate) (void *addr, enum debug_obj_state state); |
| 55 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); | 57 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); |
| 56 | int (*fixup_free) (void *addr, enum debug_obj_state state); | 58 | int (*fixup_free) (void *addr, enum debug_obj_state state); |
| 59 | int (*fixup_assert_init)(void *addr, enum debug_obj_state state); | ||
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| 59 | #ifdef CONFIG_DEBUG_OBJECTS | 62 | #ifdef CONFIG_DEBUG_OBJECTS |
| @@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); | |||
| 64 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | 67 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); |
| 65 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 68 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
| 66 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 69 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
| 70 | extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); | ||
| 67 | 71 | ||
| 68 | /* | 72 | /* |
| 69 | * Active state: | 73 | * Active state: |
| @@ -89,6 +93,8 @@ static inline void | |||
| 89 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } | 93 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } |
| 90 | static inline void | 94 | static inline void |
| 91 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } | 95 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } |
| 96 | static inline void | ||
| 97 | debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } | ||
| 92 | 98 | ||
| 93 | static inline void debug_objects_early_init(void) { } | 99 | static inline void debug_objects_early_init(void) { } |
| 94 | static inline void debug_objects_mem_init(void) { } | 100 | static inline void debug_objects_mem_init(void) { } |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index ef90cbd8e173..57c9a8ae4f2d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
| @@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu); | |||
| 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); | 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); |
| 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
| 33 | extern int dmar_disabled; | 33 | extern int dmar_disabled; |
| 34 | extern int intel_iommu_enabled; | ||
| 34 | #else | 35 | #else |
| 35 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | 36 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
| 36 | { | 37 | { |
| @@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 44 | { | 45 | { |
| 45 | } | 46 | } |
| 46 | #define dmar_disabled (1) | 47 | #define dmar_disabled (1) |
| 48 | #define intel_iommu_enabled (0) | ||
| 47 | #endif | 49 | #endif |
| 48 | 50 | ||
| 49 | 51 | ||
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h new file mode 100644 index 000000000000..5621547d631b --- /dev/null +++ b/include/linux/dynamic_queue_limits.h | |||
| @@ -0,0 +1,97 @@ | |||
| 1 | /* | ||
| 2 | * Dynamic queue limits (dql) - Definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2011, Tom Herbert <therbert@google.com> | ||
| 5 | * | ||
| 6 | * This header file contains the definitions for dynamic queue limits (dql). | ||
| 7 | * dql would be used in conjunction with a producer/consumer type queue | ||
| 8 | * (possibly a HW queue). Such a queue would have these general properties: | ||
| 9 | * | ||
| 10 | * 1) Objects are queued up to some limit specified as number of objects. | ||
| 11 | * 2) Periodically a completion process executes which retires consumed | ||
| 12 | * objects. | ||
| 13 | * 3) Starvation occurs when limit has been reached, all queued data has | ||
| 14 | * actually been consumed, but completion processing has not yet run | ||
| 15 | * so queuing new data is blocked. | ||
| 16 | * 4) Minimizing the amount of queued data is desirable. | ||
| 17 | * | ||
| 18 | * The goal of dql is to calculate the limit as the minimum number of objects | ||
| 19 | * needed to prevent starvation. | ||
| 20 | * | ||
| 21 | * The primary functions of dql are: | ||
| 22 | * dql_queued - called when objects are enqueued to record number of objects | ||
| 23 | * dql_avail - returns how many objects are available to be queued based | ||
| 24 | * on the object limit and how many objects are already enqueued | ||
| 25 | * dql_completed - called at completion time to indicate how many objects | ||
| 26 | * were retired from the queue | ||
| 27 | * | ||
| 28 | * The dql implementation does not implement any locking for the dql data | ||
| 29 | * structures, the higher layer should provide this. dql_queued should | ||
| 30 | * be serialized to prevent concurrent execution of the function; this | ||
| 31 | * is also true for dql_completed. However, dql_queued and dlq_completed can | ||
| 32 | * be executed concurrently (i.e. they can be protected by different locks). | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef _LINUX_DQL_H | ||
| 36 | #define _LINUX_DQL_H | ||
| 37 | |||
| 38 | #ifdef __KERNEL__ | ||
| 39 | |||
| 40 | struct dql { | ||
| 41 | /* Fields accessed in enqueue path (dql_queued) */ | ||
| 42 | unsigned int num_queued; /* Total ever queued */ | ||
| 43 | unsigned int adj_limit; /* limit + num_completed */ | ||
| 44 | unsigned int last_obj_cnt; /* Count at last queuing */ | ||
| 45 | |||
| 46 | /* Fields accessed only by completion path (dql_completed) */ | ||
| 47 | |||
| 48 | unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ | ||
| 49 | unsigned int num_completed; /* Total ever completed */ | ||
| 50 | |||
| 51 | unsigned int prev_ovlimit; /* Previous over limit */ | ||
| 52 | unsigned int prev_num_queued; /* Previous queue total */ | ||
| 53 | unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ | ||
| 54 | |||
| 55 | unsigned int lowest_slack; /* Lowest slack found */ | ||
| 56 | unsigned long slack_start_time; /* Time slacks seen */ | ||
| 57 | |||
| 58 | /* Configuration */ | ||
| 59 | unsigned int max_limit; /* Max limit */ | ||
| 60 | unsigned int min_limit; /* Minimum limit */ | ||
| 61 | unsigned int slack_hold_time; /* Time to measure slack */ | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* Set some static maximums */ | ||
| 65 | #define DQL_MAX_OBJECT (UINT_MAX / 16) | ||
| 66 | #define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) | ||
| 67 | |||
| 68 | /* | ||
| 69 | * Record number of objects queued. Assumes that caller has already checked | ||
| 70 | * availability in the queue with dql_avail. | ||
| 71 | */ | ||
| 72 | static inline void dql_queued(struct dql *dql, unsigned int count) | ||
| 73 | { | ||
| 74 | BUG_ON(count > DQL_MAX_OBJECT); | ||
| 75 | |||
| 76 | dql->num_queued += count; | ||
| 77 | dql->last_obj_cnt = count; | ||
| 78 | } | ||
| 79 | |||
| 80 | /* Returns how many objects can be queued, < 0 indicates over limit. */ | ||
| 81 | static inline int dql_avail(const struct dql *dql) | ||
| 82 | { | ||
| 83 | return dql->adj_limit - dql->num_queued; | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Record number of completed objects and recalculate the limit. */ | ||
| 87 | void dql_completed(struct dql *dql, unsigned int count); | ||
| 88 | |||
| 89 | /* Reset dql state */ | ||
| 90 | void dql_reset(struct dql *dql); | ||
| 91 | |||
| 92 | /* Initialize dql state */ | ||
| 93 | int dql_init(struct dql *dql, unsigned hold_time); | ||
| 94 | |||
| 95 | #endif /* _KERNEL_ */ | ||
| 96 | |||
| 97 | #endif /* _LINUX_DQL_H */ | ||
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c4627cbdb8e0..e50f98b0297a 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #define PCI_EEPROM_WIDTH_93C86 8 | 33 | #define PCI_EEPROM_WIDTH_93C86 8 |
| 34 | #define PCI_EEPROM_WIDTH_OPCODE 3 | 34 | #define PCI_EEPROM_WIDTH_OPCODE 3 |
| 35 | #define PCI_EEPROM_WRITE_OPCODE 0x05 | 35 | #define PCI_EEPROM_WRITE_OPCODE 0x05 |
| 36 | #define PCI_EEPROM_ERASE_OPCODE 0x07 | ||
| 36 | #define PCI_EEPROM_READ_OPCODE 0x06 | 37 | #define PCI_EEPROM_READ_OPCODE 0x06 |
| 37 | #define PCI_EEPROM_EWDS_OPCODE 0x10 | 38 | #define PCI_EEPROM_EWDS_OPCODE 0x10 |
| 38 | #define PCI_EEPROM_EWEN_OPCODE 0x13 | 39 | #define PCI_EEPROM_EWEN_OPCODE 0x13 |
| @@ -46,6 +47,7 @@ | |||
| 46 | * @register_write(struct eeprom_93cx6 *eeprom): handler to | 47 | * @register_write(struct eeprom_93cx6 *eeprom): handler to |
| 47 | * write to the eeprom register by using all reg_* fields. | 48 | * write to the eeprom register by using all reg_* fields. |
| 48 | * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines | 49 | * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines |
| 50 | * @drive_data: Set if we're driving the data line. | ||
| 49 | * @reg_data_in: register field to indicate data input | 51 | * @reg_data_in: register field to indicate data input |
| 50 | * @reg_data_out: register field to indicate data output | 52 | * @reg_data_out: register field to indicate data output |
| 51 | * @reg_data_clock: register field to set the data clock | 53 | * @reg_data_clock: register field to set the data clock |
| @@ -62,6 +64,7 @@ struct eeprom_93cx6 { | |||
| 62 | 64 | ||
| 63 | int width; | 65 | int width; |
| 64 | 66 | ||
| 67 | char drive_data; | ||
| 65 | char reg_data_in; | 68 | char reg_data_in; |
| 66 | char reg_data_out; | 69 | char reg_data_out; |
| 67 | char reg_data_clock; | 70 | char reg_data_clock; |
| @@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, | |||
| 72 | const u8 word, u16 *data); | 75 | const u8 word, u16 *data); |
| 73 | extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, | 76 | extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, |
| 74 | const u8 word, __le16 *data, const u16 words); | 77 | const u8 word, __le16 *data, const u16 words); |
| 78 | |||
| 79 | extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); | ||
| 80 | |||
| 81 | extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, | ||
| 82 | u8 addr, u16 data); | ||
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index 034072cea853..fd0628be45ce 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h | |||
| @@ -17,14 +17,15 @@ struct sock_extended_err { | |||
| 17 | #define SO_EE_ORIGIN_LOCAL 1 | 17 | #define SO_EE_ORIGIN_LOCAL 1 |
| 18 | #define SO_EE_ORIGIN_ICMP 2 | 18 | #define SO_EE_ORIGIN_ICMP 2 |
| 19 | #define SO_EE_ORIGIN_ICMP6 3 | 19 | #define SO_EE_ORIGIN_ICMP6 3 |
| 20 | #define SO_EE_ORIGIN_TIMESTAMPING 4 | 20 | #define SO_EE_ORIGIN_TXSTATUS 4 |
| 21 | #define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS | ||
| 21 | 22 | ||
| 22 | #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) | 23 | #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) |
| 23 | 24 | ||
| 24 | #ifdef __KERNEL__ | 25 | #ifdef __KERNEL__ |
| 25 | 26 | ||
| 26 | #include <net/ip.h> | 27 | #include <net/ip.h> |
| 27 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 28 | #if IS_ENABLED(CONFIG_IPV6) |
| 28 | #include <linux/ipv6.h> | 29 | #include <linux/ipv6.h> |
| 29 | #endif | 30 | #endif |
| 30 | 31 | ||
| @@ -33,7 +34,7 @@ struct sock_extended_err { | |||
| 33 | struct sock_exterr_skb { | 34 | struct sock_exterr_skb { |
| 34 | union { | 35 | union { |
| 35 | struct inet_skb_parm h4; | 36 | struct inet_skb_parm h4; |
| 36 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 37 | #if IS_ENABLED(CONFIG_IPV6) |
| 37 | struct inet6_skb_parm h6; | 38 | struct inet6_skb_parm h6; |
| 38 | #endif | 39 | #endif |
| 39 | } header; | 40 | } header; |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index de33de1e2052..da5b2de99ae4 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
| @@ -489,7 +489,10 @@ struct ethtool_rx_flow_spec { | |||
| 489 | * on return. | 489 | * on return. |
| 490 | * | 490 | * |
| 491 | * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined | 491 | * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined |
| 492 | * rules on return. | 492 | * rules on return. If @data is non-zero on return then it is the |
| 493 | * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the | ||
| 494 | * driver supports any special location values. If that flag is not | ||
| 495 | * set in @data then special location values should not be used. | ||
| 493 | * | 496 | * |
| 494 | * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an | 497 | * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an |
| 495 | * existing rule on entry and @fs contains the rule on return. | 498 | * existing rule on entry and @fs contains the rule on return. |
| @@ -501,10 +504,23 @@ struct ethtool_rx_flow_spec { | |||
| 501 | * must use the second parameter to get_rxnfc() instead of @rule_locs. | 504 | * must use the second parameter to get_rxnfc() instead of @rule_locs. |
| 502 | * | 505 | * |
| 503 | * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. | 506 | * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. |
| 504 | * @fs.@location specifies the location to use and must not be ignored. | 507 | * @fs.@location either specifies the location to use or is a special |
| 508 | * location value with %RX_CLS_LOC_SPECIAL flag set. On return, | ||
| 509 | * @fs.@location is the actual rule location. | ||
| 505 | * | 510 | * |
| 506 | * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an | 511 | * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an |
| 507 | * existing rule on entry. | 512 | * existing rule on entry. |
| 513 | * | ||
| 514 | * A driver supporting the special location values for | ||
| 515 | * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused | ||
| 516 | * location, and may remove a rule at a later location (lower | ||
| 517 | * priority) that matches exactly the same set of flows. The special | ||
| 518 | * values are: %RX_CLS_LOC_ANY, selecting any location; | ||
| 519 | * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum | ||
| 520 | * priority); and %RX_CLS_LOC_LAST, selecting the last suitable | ||
| 521 | * location (minimum priority). Additional special values may be | ||
| 522 | * defined in future and drivers must return -%EINVAL for any | ||
| 523 | * unrecognised value. | ||
| 508 | */ | 524 | */ |
| 509 | struct ethtool_rxnfc { | 525 | struct ethtool_rxnfc { |
| 510 | __u32 cmd; | 526 | __u32 cmd; |
| @@ -543,9 +559,15 @@ struct compat_ethtool_rxnfc { | |||
| 543 | /** | 559 | /** |
| 544 | * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection | 560 | * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection |
| 545 | * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR | 561 | * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR |
| 546 | * @size: On entry, the array size of the user buffer. On return from | 562 | * @size: On entry, the array size of the user buffer, which may be zero. |
| 547 | * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. | 563 | * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware |
| 564 | * indirection table. | ||
| 548 | * @ring_index: RX ring/queue index for each hash value | 565 | * @ring_index: RX ring/queue index for each hash value |
| 566 | * | ||
| 567 | * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size | ||
| 568 | * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means | ||
| 569 | * the table should be reset to default values. This last feature | ||
| 570 | * is not supported by the original implementations. | ||
| 549 | */ | 571 | */ |
| 550 | struct ethtool_rxfh_indir { | 572 | struct ethtool_rxfh_indir { |
| 551 | __u32 cmd; | 573 | __u32 cmd; |
| @@ -724,9 +746,6 @@ enum ethtool_sfeatures_retval_bits { | |||
| 724 | 746 | ||
| 725 | #include <linux/rculist.h> | 747 | #include <linux/rculist.h> |
| 726 | 748 | ||
| 727 | /* needed by dev_disable_lro() */ | ||
| 728 | extern int __ethtool_set_flags(struct net_device *dev, u32 flags); | ||
| 729 | |||
| 730 | extern int __ethtool_get_settings(struct net_device *dev, | 749 | extern int __ethtool_get_settings(struct net_device *dev, |
| 731 | struct ethtool_cmd *cmd); | 750 | struct ethtool_cmd *cmd); |
| 732 | 751 | ||
| @@ -750,19 +769,18 @@ struct net_device; | |||
| 750 | 769 | ||
| 751 | /* Some generic methods drivers may use in their ethtool_ops */ | 770 | /* Some generic methods drivers may use in their ethtool_ops */ |
| 752 | u32 ethtool_op_get_link(struct net_device *dev); | 771 | u32 ethtool_op_get_link(struct net_device *dev); |
| 753 | u32 ethtool_op_get_tx_csum(struct net_device *dev); | 772 | |
| 754 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); | 773 | /** |
| 755 | int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); | 774 | * ethtool_rxfh_indir_default - get default value for RX flow hash indirection |
| 756 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data); | 775 | * @index: Index in RX flow hash indirection table |
| 757 | u32 ethtool_op_get_sg(struct net_device *dev); | 776 | * @n_rx_rings: Number of RX rings to use |
| 758 | int ethtool_op_set_sg(struct net_device *dev, u32 data); | 777 | * |
| 759 | u32 ethtool_op_get_tso(struct net_device *dev); | 778 | * This function provides the default policy for RX flow hash indirection. |
| 760 | int ethtool_op_set_tso(struct net_device *dev, u32 data); | 779 | */ |
| 761 | u32 ethtool_op_get_ufo(struct net_device *dev); | 780 | static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) |
| 762 | int ethtool_op_set_ufo(struct net_device *dev, u32 data); | 781 | { |
| 763 | u32 ethtool_op_get_flags(struct net_device *dev); | 782 | return index % n_rx_rings; |
| 764 | int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); | 783 | } |
| 765 | bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); | ||
| 766 | 784 | ||
| 767 | /** | 785 | /** |
| 768 | * struct ethtool_ops - optional netdev operations | 786 | * struct ethtool_ops - optional netdev operations |
| @@ -807,22 +825,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); | |||
| 807 | * @get_pauseparam: Report pause parameters | 825 | * @get_pauseparam: Report pause parameters |
| 808 | * @set_pauseparam: Set pause parameters. Returns a negative error code | 826 | * @set_pauseparam: Set pause parameters. Returns a negative error code |
| 809 | * or zero. | 827 | * or zero. |
| 810 | * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM. | ||
| 811 | * Report whether receive checksums are turned on or off. | ||
| 812 | * @set_rx_csum: Deprecated in favour of generic netdev features. Turn | ||
| 813 | * receive checksum on or off. Returns a negative error code or zero. | ||
| 814 | * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums | ||
| 815 | * are turned on or off. | ||
| 816 | * @set_tx_csum: Deprecated in favour of generic netdev features. Turn | ||
| 817 | * transmit checksums on or off. Returns a negative error code or zero. | ||
| 818 | * @get_sg: Deprecated as redundant. Report whether scatter-gather is | ||
| 819 | * enabled. | ||
| 820 | * @set_sg: Deprecated in favour of generic netdev features. Turn | ||
| 821 | * scatter-gather on or off. Returns a negative error code or zero. | ||
| 822 | * @get_tso: Deprecated as redundant. Report whether TCP segmentation | ||
| 823 | * offload is enabled. | ||
| 824 | * @set_tso: Deprecated in favour of generic netdev features. Turn TCP | ||
| 825 | * segmentation offload on or off. Returns a negative error code or zero. | ||
| 826 | * @self_test: Run specified self-tests | 828 | * @self_test: Run specified self-tests |
| 827 | * @get_strings: Return a set of strings that describe the requested objects | 829 | * @get_strings: Return a set of strings that describe the requested objects |
| 828 | * @set_phys_id: Identify the physical devices, e.g. by flashing an LED | 830 | * @set_phys_id: Identify the physical devices, e.g. by flashing an LED |
| @@ -844,15 +846,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); | |||
| 844 | * negative error code or zero. | 846 | * negative error code or zero. |
| 845 | * @complete: Function to be called after any other operation except | 847 | * @complete: Function to be called after any other operation except |
| 846 | * @begin. Will be called even if the other operation failed. | 848 | * @begin. Will be called even if the other operation failed. |
| 847 | * @get_ufo: Deprecated as redundant. Report whether UDP fragmentation | ||
| 848 | * offload is enabled. | ||
| 849 | * @set_ufo: Deprecated in favour of generic netdev features. Turn UDP | ||
| 850 | * fragmentation offload on or off. Returns a negative error code or zero. | ||
| 851 | * @get_flags: Deprecated as redundant. Report features included in | ||
| 852 | * &enum ethtool_flags that are enabled. | ||
| 853 | * @set_flags: Deprecated in favour of generic netdev features. Turn | ||
| 854 | * features included in &enum ethtool_flags on or off. Returns a | ||
| 855 | * negative error code or zero. | ||
| 856 | * @get_priv_flags: Report driver-specific feature flags. | 849 | * @get_priv_flags: Report driver-specific feature flags. |
| 857 | * @set_priv_flags: Set driver-specific feature flags. Returns a negative | 850 | * @set_priv_flags: Set driver-specific feature flags. Returns a negative |
| 858 | * error code or zero. | 851 | * error code or zero. |
| @@ -866,11 +859,13 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); | |||
| 866 | * @reset: Reset (part of) the device, as specified by a bitmask of | 859 | * @reset: Reset (part of) the device, as specified by a bitmask of |
| 867 | * flags from &enum ethtool_reset_flags. Returns a negative | 860 | * flags from &enum ethtool_reset_flags. Returns a negative |
| 868 | * error code or zero. | 861 | * error code or zero. |
| 869 | * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code | 862 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. |
| 870 | * or zero. | 863 | * Returns zero if not supported for this specific device. |
| 871 | * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. | 864 | * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. |
| 865 | * Will not be called if @get_rxfh_indir_size returns zero. | ||
| 872 | * Returns a negative error code or zero. | 866 | * Returns a negative error code or zero. |
| 873 | * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. | 867 | * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. |
| 868 | * Will not be called if @get_rxfh_indir_size returns zero. | ||
| 874 | * Returns a negative error code or zero. | 869 | * Returns a negative error code or zero. |
| 875 | * @get_channels: Get number of channels. | 870 | * @get_channels: Get number of channels. |
| 876 | * @set_channels: Set number of channels. Returns a negative error code or | 871 | * @set_channels: Set number of channels. Returns a negative error code or |
| @@ -917,14 +912,6 @@ struct ethtool_ops { | |||
| 917 | struct ethtool_pauseparam*); | 912 | struct ethtool_pauseparam*); |
| 918 | int (*set_pauseparam)(struct net_device *, | 913 | int (*set_pauseparam)(struct net_device *, |
| 919 | struct ethtool_pauseparam*); | 914 | struct ethtool_pauseparam*); |
| 920 | u32 (*get_rx_csum)(struct net_device *); | ||
| 921 | int (*set_rx_csum)(struct net_device *, u32); | ||
| 922 | u32 (*get_tx_csum)(struct net_device *); | ||
| 923 | int (*set_tx_csum)(struct net_device *, u32); | ||
| 924 | u32 (*get_sg)(struct net_device *); | ||
| 925 | int (*set_sg)(struct net_device *, u32); | ||
| 926 | u32 (*get_tso)(struct net_device *); | ||
| 927 | int (*set_tso)(struct net_device *, u32); | ||
| 928 | void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); | 915 | void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); |
| 929 | void (*get_strings)(struct net_device *, u32 stringset, u8 *); | 916 | void (*get_strings)(struct net_device *, u32 stringset, u8 *); |
| 930 | int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); | 917 | int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); |
| @@ -932,10 +919,6 @@ struct ethtool_ops { | |||
| 932 | struct ethtool_stats *, u64 *); | 919 | struct ethtool_stats *, u64 *); |
| 933 | int (*begin)(struct net_device *); | 920 | int (*begin)(struct net_device *); |
| 934 | void (*complete)(struct net_device *); | 921 | void (*complete)(struct net_device *); |
| 935 | u32 (*get_ufo)(struct net_device *); | ||
| 936 | int (*set_ufo)(struct net_device *, u32); | ||
| 937 | u32 (*get_flags)(struct net_device *); | ||
| 938 | int (*set_flags)(struct net_device *, u32); | ||
| 939 | u32 (*get_priv_flags)(struct net_device *); | 922 | u32 (*get_priv_flags)(struct net_device *); |
| 940 | int (*set_priv_flags)(struct net_device *, u32); | 923 | int (*set_priv_flags)(struct net_device *, u32); |
| 941 | int (*get_sset_count)(struct net_device *, int); | 924 | int (*get_sset_count)(struct net_device *, int); |
| @@ -944,12 +927,9 @@ struct ethtool_ops { | |||
| 944 | int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); | 927 | int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); |
| 945 | int (*flash_device)(struct net_device *, struct ethtool_flash *); | 928 | int (*flash_device)(struct net_device *, struct ethtool_flash *); |
| 946 | int (*reset)(struct net_device *, u32 *); | 929 | int (*reset)(struct net_device *, u32 *); |
| 947 | int (*set_rx_ntuple)(struct net_device *, | 930 | u32 (*get_rxfh_indir_size)(struct net_device *); |
| 948 | struct ethtool_rx_ntuple *); | 931 | int (*get_rxfh_indir)(struct net_device *, u32 *); |
| 949 | int (*get_rxfh_indir)(struct net_device *, | 932 | int (*set_rxfh_indir)(struct net_device *, const u32 *); |
| 950 | struct ethtool_rxfh_indir *); | ||
| 951 | int (*set_rxfh_indir)(struct net_device *, | ||
| 952 | const struct ethtool_rxfh_indir *); | ||
| 953 | void (*get_channels)(struct net_device *, struct ethtool_channels *); | 933 | void (*get_channels)(struct net_device *, struct ethtool_channels *); |
| 954 | int (*set_channels)(struct net_device *, struct ethtool_channels *); | 934 | int (*set_channels)(struct net_device *, struct ethtool_channels *); |
| 955 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); | 935 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); |
| @@ -1173,6 +1153,12 @@ struct ethtool_ops { | |||
| 1173 | 1153 | ||
| 1174 | #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL | 1154 | #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL |
| 1175 | 1155 | ||
| 1156 | /* Special RX classification rule insert location values */ | ||
| 1157 | #define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ | ||
| 1158 | #define RX_CLS_LOC_ANY 0xffffffff | ||
| 1159 | #define RX_CLS_LOC_FIRST 0xfffffffe | ||
| 1160 | #define RX_CLS_LOC_LAST 0xfffffffd | ||
| 1161 | |||
| 1176 | /* Reset flags */ | 1162 | /* Reset flags */ |
| 1177 | /* The reset() operation must clear the flags for the components which | 1163 | /* The reset() operation must clear the flags for the components which |
| 1178 | * were actually reset. On successful return, the flags indicate the | 1164 | * were actually reset. On successful return, the flags indicate the |
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 61549b26ad6f..73c28dea10ae 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h | |||
| @@ -85,6 +85,30 @@ enum { | |||
| 85 | /* All generic netlink requests are serialized by a global lock. */ | 85 | /* All generic netlink requests are serialized by a global lock. */ |
| 86 | extern void genl_lock(void); | 86 | extern void genl_lock(void); |
| 87 | extern void genl_unlock(void); | 87 | extern void genl_unlock(void); |
| 88 | #ifdef CONFIG_PROVE_LOCKING | ||
| 89 | extern int lockdep_genl_is_held(void); | ||
| 90 | #endif | ||
| 91 | |||
| 92 | /** | ||
| 93 | * rcu_dereference_genl - rcu_dereference with debug checking | ||
| 94 | * @p: The pointer to read, prior to dereferencing | ||
| 95 | * | ||
| 96 | * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() | ||
| 97 | * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() | ||
| 98 | */ | ||
| 99 | #define rcu_dereference_genl(p) \ | ||
| 100 | rcu_dereference_check(p, lockdep_genl_is_held()) | ||
| 101 | |||
| 102 | /** | ||
| 103 | * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex | ||
| 104 | * @p: The pointer to read, prior to dereferencing | ||
| 105 | * | ||
| 106 | * Return the value of the specified RCU-protected pointer, but omit | ||
| 107 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | ||
| 108 | * caller holds genl mutex. | ||
| 109 | */ | ||
| 110 | #define genl_dereference(p) \ | ||
| 111 | rcu_dereference_protected(p, lockdep_genl_is_held()) | ||
| 88 | 112 | ||
| 89 | #endif /* __KERNEL__ */ | 113 | #endif /* __KERNEL__ */ |
| 90 | 114 | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f743883f769e..bb7f30971858 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
| 139 | extern void account_system_vtime(struct task_struct *tsk); | 139 | extern void account_system_vtime(struct task_struct *tsk); |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| 142 | #if defined(CONFIG_NO_HZ) | ||
| 143 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
| 144 | extern void rcu_enter_nohz(void); | ||
| 145 | extern void rcu_exit_nohz(void); | ||
| 146 | |||
| 147 | static inline void rcu_irq_enter(void) | ||
| 148 | { | ||
| 149 | rcu_exit_nohz(); | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void rcu_irq_exit(void) | ||
| 153 | { | ||
| 154 | rcu_enter_nohz(); | ||
| 155 | } | ||
| 156 | 143 | ||
| 157 | static inline void rcu_nmi_enter(void) | 144 | static inline void rcu_nmi_enter(void) |
| 158 | { | 145 | { |
| @@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void) | |||
| 163 | } | 150 | } |
| 164 | 151 | ||
| 165 | #else | 152 | #else |
| 166 | extern void rcu_irq_enter(void); | ||
| 167 | extern void rcu_irq_exit(void); | ||
| 168 | extern void rcu_nmi_enter(void); | 153 | extern void rcu_nmi_enter(void); |
| 169 | extern void rcu_nmi_exit(void); | 154 | extern void rcu_nmi_exit(void); |
| 170 | #endif | 155 | #endif |
| 171 | #else | ||
| 172 | # define rcu_irq_enter() do { } while (0) | ||
| 173 | # define rcu_irq_exit() do { } while (0) | ||
| 174 | # define rcu_nmi_enter() do { } while (0) | ||
| 175 | # define rcu_nmi_exit() do { } while (0) | ||
| 176 | #endif /* #if defined(CONFIG_NO_HZ) */ | ||
| 177 | 156 | ||
| 178 | /* | 157 | /* |
| 179 | * It is safe to do non-atomic ops on ->hardirq_context, | 158 | * It is safe to do non-atomic ops on ->hardirq_context, |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 48363c3c40f8..210e2c325534 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
| @@ -128,6 +128,7 @@ | |||
| 128 | #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 | 128 | #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 |
| 129 | #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 | 129 | #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 |
| 130 | #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 | 130 | #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 |
| 131 | #define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 | ||
| 131 | /* A-MSDU 802.11n */ | 132 | /* A-MSDU 802.11n */ |
| 132 | #define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 | 133 | #define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 |
| 133 | /* Mesh Control 802.11s */ | 134 | /* Mesh Control 802.11s */ |
| @@ -543,6 +544,15 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc) | |||
| 543 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); | 544 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); |
| 544 | } | 545 | } |
| 545 | 546 | ||
| 547 | /** | ||
| 548 | * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set | ||
| 549 | * @seq_ctrl: frame sequence control bytes in little-endian byteorder | ||
| 550 | */ | ||
| 551 | static inline int ieee80211_is_first_frag(__le16 seq_ctrl) | ||
| 552 | { | ||
| 553 | return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; | ||
| 554 | } | ||
| 555 | |||
| 546 | struct ieee80211s_hdr { | 556 | struct ieee80211s_hdr { |
| 547 | u8 flags; | 557 | u8 flags; |
| 548 | u8 ttl; | 558 | u8 ttl; |
| @@ -770,6 +780,9 @@ struct ieee80211_mgmt { | |||
| 770 | } u; | 780 | } u; |
| 771 | } __attribute__ ((packed)); | 781 | } __attribute__ ((packed)); |
| 772 | 782 | ||
| 783 | /* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ | ||
| 784 | #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 | ||
| 785 | |||
| 773 | /* mgmt header + 1 byte category code */ | 786 | /* mgmt header + 1 byte category code */ |
| 774 | #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) | 787 | #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) |
| 775 | 788 | ||
| @@ -1552,6 +1565,8 @@ enum ieee80211_sa_query_action { | |||
| 1552 | #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 | 1565 | #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 |
| 1553 | #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 | 1566 | #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 |
| 1554 | 1567 | ||
| 1568 | #define WLAN_CIPHER_SUITE_SMS4 0x00147201 | ||
| 1569 | |||
| 1555 | /* AKM suite selectors */ | 1570 | /* AKM suite selectors */ |
| 1556 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 | 1571 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 |
| 1557 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 | 1572 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 |
| @@ -1689,6 +1704,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) | |||
| 1689 | } | 1704 | } |
| 1690 | 1705 | ||
| 1691 | /** | 1706 | /** |
| 1707 | * ieee80211_is_public_action - check if frame is a public action frame | ||
| 1708 | * @hdr: the frame | ||
| 1709 | * @len: length of the frame | ||
| 1710 | */ | ||
| 1711 | static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, | ||
| 1712 | size_t len) | ||
| 1713 | { | ||
| 1714 | struct ieee80211_mgmt *mgmt = (void *)hdr; | ||
| 1715 | |||
| 1716 | if (len < IEEE80211_MIN_ACTION_SIZE) | ||
| 1717 | return false; | ||
| 1718 | if (!ieee80211_is_action(hdr->frame_control)) | ||
| 1719 | return false; | ||
| 1720 | return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; | ||
| 1721 | } | ||
| 1722 | |||
| 1723 | /** | ||
| 1692 | * ieee80211_fhss_chan_to_freq - get channel frequency | 1724 | * ieee80211_fhss_chan_to_freq - get channel frequency |
| 1693 | * @channel: the FHSS channel | 1725 | * @channel: the FHSS channel |
| 1694 | * | 1726 | * |
diff --git a/include/linux/if.h b/include/linux/if.h index db20bd4fd16b..06b6ef60c821 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
| @@ -79,6 +79,7 @@ | |||
| 79 | #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing | 79 | #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing |
| 80 | * skbs on transmit */ | 80 | * skbs on transmit */ |
| 81 | #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ | 81 | #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ |
| 82 | #define IFF_TEAM_PORT 0x40000 /* device used as team port */ | ||
| 82 | 83 | ||
| 83 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 84 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
| 84 | #define IF_GET_PROTO 0x0002 | 85 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index e473003e4bda..56d907a2c804 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
| @@ -79,6 +79,7 @@ | |||
| 79 | #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ | 79 | #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ |
| 80 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ | 80 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ |
| 81 | #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ | 81 | #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ |
| 82 | #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ | ||
| 82 | #define ETH_P_TIPC 0x88CA /* TIPC */ | 83 | #define ETH_P_TIPC 0x88CA /* TIPC */ |
| 83 | #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ | 84 | #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ |
| 84 | #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ | 85 | #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ |
diff --git a/include/linux/if_team.h b/include/linux/if_team.h new file mode 100644 index 000000000000..828181fbad5d --- /dev/null +++ b/include/linux/if_team.h | |||
| @@ -0,0 +1,242 @@ | |||
| 1 | /* | ||
| 2 | * include/linux/if_team.h - Network team device driver header | ||
| 3 | * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _LINUX_IF_TEAM_H_ | ||
| 12 | #define _LINUX_IF_TEAM_H_ | ||
| 13 | |||
| 14 | #ifdef __KERNEL__ | ||
| 15 | |||
| 16 | struct team_pcpu_stats { | ||
| 17 | u64 rx_packets; | ||
| 18 | u64 rx_bytes; | ||
| 19 | u64 rx_multicast; | ||
| 20 | u64 tx_packets; | ||
| 21 | u64 tx_bytes; | ||
| 22 | struct u64_stats_sync syncp; | ||
| 23 | u32 rx_dropped; | ||
| 24 | u32 tx_dropped; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct team; | ||
| 28 | |||
| 29 | struct team_port { | ||
| 30 | struct net_device *dev; | ||
| 31 | struct hlist_node hlist; /* node in hash list */ | ||
| 32 | struct list_head list; /* node in ordinary list */ | ||
| 33 | struct team *team; | ||
| 34 | int index; | ||
| 35 | |||
| 36 | /* | ||
| 37 | * A place for storing original values of the device before it | ||
| 38 | * become a port. | ||
| 39 | */ | ||
| 40 | struct { | ||
| 41 | unsigned char dev_addr[MAX_ADDR_LEN]; | ||
| 42 | unsigned int mtu; | ||
| 43 | } orig; | ||
| 44 | |||
| 45 | bool linkup; | ||
| 46 | u32 speed; | ||
| 47 | u8 duplex; | ||
| 48 | |||
| 49 | struct rcu_head rcu; | ||
| 50 | }; | ||
| 51 | |||
| 52 | struct team_mode_ops { | ||
| 53 | int (*init)(struct team *team); | ||
| 54 | void (*exit)(struct team *team); | ||
| 55 | rx_handler_result_t (*receive)(struct team *team, | ||
| 56 | struct team_port *port, | ||
| 57 | struct sk_buff *skb); | ||
| 58 | bool (*transmit)(struct team *team, struct sk_buff *skb); | ||
| 59 | int (*port_enter)(struct team *team, struct team_port *port); | ||
| 60 | void (*port_leave)(struct team *team, struct team_port *port); | ||
| 61 | void (*port_change_mac)(struct team *team, struct team_port *port); | ||
| 62 | }; | ||
| 63 | |||
| 64 | enum team_option_type { | ||
| 65 | TEAM_OPTION_TYPE_U32, | ||
| 66 | TEAM_OPTION_TYPE_STRING, | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct team_option { | ||
| 70 | struct list_head list; | ||
| 71 | const char *name; | ||
| 72 | enum team_option_type type; | ||
| 73 | int (*getter)(struct team *team, void *arg); | ||
| 74 | int (*setter)(struct team *team, void *arg); | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct team_mode { | ||
| 78 | struct list_head list; | ||
| 79 | const char *kind; | ||
| 80 | struct module *owner; | ||
| 81 | size_t priv_size; | ||
| 82 | const struct team_mode_ops *ops; | ||
| 83 | }; | ||
| 84 | |||
| 85 | #define TEAM_PORT_HASHBITS 4 | ||
| 86 | #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) | ||
| 87 | |||
| 88 | #define TEAM_MODE_PRIV_LONGS 4 | ||
| 89 | #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) | ||
| 90 | |||
| 91 | struct team { | ||
| 92 | struct net_device *dev; /* associated netdevice */ | ||
| 93 | struct team_pcpu_stats __percpu *pcpu_stats; | ||
| 94 | |||
| 95 | struct mutex lock; /* used for overall locking, e.g. port lists write */ | ||
| 96 | |||
| 97 | /* | ||
| 98 | * port lists with port count | ||
| 99 | */ | ||
| 100 | int port_count; | ||
| 101 | struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; | ||
| 102 | struct list_head port_list; | ||
| 103 | |||
| 104 | struct list_head option_list; | ||
| 105 | |||
| 106 | const struct team_mode *mode; | ||
| 107 | struct team_mode_ops ops; | ||
| 108 | long mode_priv[TEAM_MODE_PRIV_LONGS]; | ||
| 109 | }; | ||
| 110 | |||
| 111 | static inline struct hlist_head *team_port_index_hash(struct team *team, | ||
| 112 | int port_index) | ||
| 113 | { | ||
| 114 | return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline struct team_port *team_get_port_by_index(struct team *team, | ||
| 118 | int port_index) | ||
| 119 | { | ||
| 120 | struct hlist_node *p; | ||
| 121 | struct team_port *port; | ||
| 122 | struct hlist_head *head = team_port_index_hash(team, port_index); | ||
| 123 | |||
| 124 | hlist_for_each_entry(port, p, head, hlist) | ||
| 125 | if (port->index == port_index) | ||
| 126 | return port; | ||
| 127 | return NULL; | ||
| 128 | } | ||
| 129 | static inline struct team_port *team_get_port_by_index_rcu(struct team *team, | ||
| 130 | int port_index) | ||
| 131 | { | ||
| 132 | struct hlist_node *p; | ||
| 133 | struct team_port *port; | ||
| 134 | struct hlist_head *head = team_port_index_hash(team, port_index); | ||
| 135 | |||
| 136 | hlist_for_each_entry_rcu(port, p, head, hlist) | ||
| 137 | if (port->index == port_index) | ||
| 138 | return port; | ||
| 139 | return NULL; | ||
| 140 | } | ||
| 141 | |||
| 142 | extern int team_port_set_team_mac(struct team_port *port); | ||
| 143 | extern int team_options_register(struct team *team, | ||
| 144 | const struct team_option *option, | ||
| 145 | size_t option_count); | ||
| 146 | extern void team_options_unregister(struct team *team, | ||
| 147 | const struct team_option *option, | ||
| 148 | size_t option_count); | ||
| 149 | extern int team_mode_register(struct team_mode *mode); | ||
| 150 | extern int team_mode_unregister(struct team_mode *mode); | ||
| 151 | |||
| 152 | #endif /* __KERNEL__ */ | ||
| 153 | |||
| 154 | #define TEAM_STRING_MAX_LEN 32 | ||
| 155 | |||
| 156 | /********************************** | ||
| 157 | * NETLINK_GENERIC netlink family. | ||
| 158 | **********************************/ | ||
| 159 | |||
| 160 | enum { | ||
| 161 | TEAM_CMD_NOOP, | ||
| 162 | TEAM_CMD_OPTIONS_SET, | ||
| 163 | TEAM_CMD_OPTIONS_GET, | ||
| 164 | TEAM_CMD_PORT_LIST_GET, | ||
| 165 | |||
| 166 | __TEAM_CMD_MAX, | ||
| 167 | TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), | ||
| 168 | }; | ||
| 169 | |||
| 170 | enum { | ||
| 171 | TEAM_ATTR_UNSPEC, | ||
| 172 | TEAM_ATTR_TEAM_IFINDEX, /* u32 */ | ||
| 173 | TEAM_ATTR_LIST_OPTION, /* nest */ | ||
| 174 | TEAM_ATTR_LIST_PORT, /* nest */ | ||
| 175 | |||
| 176 | __TEAM_ATTR_MAX, | ||
| 177 | TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* Nested layout of get/set msg: | ||
| 181 | * | ||
| 182 | * [TEAM_ATTR_LIST_OPTION] | ||
| 183 | * [TEAM_ATTR_ITEM_OPTION] | ||
| 184 | * [TEAM_ATTR_OPTION_*], ... | ||
| 185 | * [TEAM_ATTR_ITEM_OPTION] | ||
| 186 | * [TEAM_ATTR_OPTION_*], ... | ||
| 187 | * ... | ||
| 188 | * [TEAM_ATTR_LIST_PORT] | ||
| 189 | * [TEAM_ATTR_ITEM_PORT] | ||
| 190 | * [TEAM_ATTR_PORT_*], ... | ||
| 191 | * [TEAM_ATTR_ITEM_PORT] | ||
| 192 | * [TEAM_ATTR_PORT_*], ... | ||
| 193 | * ... | ||
| 194 | */ | ||
| 195 | |||
| 196 | enum { | ||
| 197 | TEAM_ATTR_ITEM_OPTION_UNSPEC, | ||
| 198 | TEAM_ATTR_ITEM_OPTION, /* nest */ | ||
| 199 | |||
| 200 | __TEAM_ATTR_ITEM_OPTION_MAX, | ||
| 201 | TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, | ||
| 202 | }; | ||
| 203 | |||
| 204 | enum { | ||
| 205 | TEAM_ATTR_OPTION_UNSPEC, | ||
| 206 | TEAM_ATTR_OPTION_NAME, /* string */ | ||
| 207 | TEAM_ATTR_OPTION_CHANGED, /* flag */ | ||
| 208 | TEAM_ATTR_OPTION_TYPE, /* u8 */ | ||
| 209 | TEAM_ATTR_OPTION_DATA, /* dynamic */ | ||
| 210 | |||
| 211 | __TEAM_ATTR_OPTION_MAX, | ||
| 212 | TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, | ||
| 213 | }; | ||
| 214 | |||
| 215 | enum { | ||
| 216 | TEAM_ATTR_ITEM_PORT_UNSPEC, | ||
| 217 | TEAM_ATTR_ITEM_PORT, /* nest */ | ||
| 218 | |||
| 219 | __TEAM_ATTR_ITEM_PORT_MAX, | ||
| 220 | TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, | ||
| 221 | }; | ||
| 222 | |||
| 223 | enum { | ||
| 224 | TEAM_ATTR_PORT_UNSPEC, | ||
| 225 | TEAM_ATTR_PORT_IFINDEX, /* u32 */ | ||
| 226 | TEAM_ATTR_PORT_CHANGED, /* flag */ | ||
| 227 | TEAM_ATTR_PORT_LINKUP, /* flag */ | ||
| 228 | TEAM_ATTR_PORT_SPEED, /* u32 */ | ||
| 229 | TEAM_ATTR_PORT_DUPLEX, /* u8 */ | ||
| 230 | |||
| 231 | __TEAM_ATTR_PORT_MAX, | ||
| 232 | TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, | ||
| 233 | }; | ||
| 234 | |||
| 235 | /* | ||
| 236 | * NETLINK_GENERIC related info | ||
| 237 | */ | ||
| 238 | #define TEAM_GENL_NAME "team" | ||
| 239 | #define TEAM_GENL_VERSION 0x1 | ||
| 240 | #define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" | ||
| 241 | |||
| 242 | #endif /* _LINUX_IF_TEAM_H_ */ | ||
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 12d5543b14f2..13aff1e2183b 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
| @@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) | |||
| 74 | /* found in socket.c */ | 74 | /* found in socket.c */ |
| 75 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | 75 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
| 76 | 76 | ||
| 77 | /* if this changes, algorithm will have to be reworked because this | 77 | struct vlan_info; |
| 78 | * depends on completely exhausting the VLAN identifier space. Thus | ||
| 79 | * it gives constant time look-up, but in many cases it wastes memory. | ||
| 80 | */ | ||
| 81 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 | ||
| 82 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) | ||
| 83 | |||
| 84 | struct vlan_group { | ||
| 85 | struct net_device *real_dev; /* The ethernet(like) device | ||
| 86 | * the vlan is attached to. | ||
| 87 | */ | ||
| 88 | unsigned int nr_vlans; | ||
| 89 | struct hlist_node hlist; /* linked list */ | ||
| 90 | struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; | ||
| 91 | struct rcu_head rcu; | ||
| 92 | }; | ||
| 93 | 78 | ||
| 94 | static inline int is_vlan_dev(struct net_device *dev) | 79 | static inline int is_vlan_dev(struct net_device *dev) |
| 95 | { | 80 | { |
| @@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); | |||
| 109 | extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); | 94 | extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); |
| 110 | extern struct sk_buff *vlan_untag(struct sk_buff *skb); | 95 | extern struct sk_buff *vlan_untag(struct sk_buff *skb); |
| 111 | 96 | ||
| 97 | extern int vlan_vid_add(struct net_device *dev, unsigned short vid); | ||
| 98 | extern void vlan_vid_del(struct net_device *dev, unsigned short vid); | ||
| 99 | |||
| 100 | extern int vlan_vids_add_by_dev(struct net_device *dev, | ||
| 101 | const struct net_device *by_dev); | ||
| 102 | extern void vlan_vids_del_by_dev(struct net_device *dev, | ||
| 103 | const struct net_device *by_dev); | ||
| 112 | #else | 104 | #else |
| 113 | static inline struct net_device * | 105 | static inline struct net_device * |
| 114 | __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) | 106 | __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) |
| @@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb) | |||
| 139 | { | 131 | { |
| 140 | return skb; | 132 | return skb; |
| 141 | } | 133 | } |
| 134 | |||
| 135 | static inline int vlan_vid_add(struct net_device *dev, unsigned short vid) | ||
| 136 | { | ||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void vlan_vid_del(struct net_device *dev, unsigned short vid) | ||
| 141 | { | ||
| 142 | } | ||
| 143 | |||
| 144 | static inline int vlan_vids_add_by_dev(struct net_device *dev, | ||
| 145 | const struct net_device *by_dev) | ||
| 146 | { | ||
| 147 | return 0; | ||
| 148 | } | ||
| 149 | |||
| 150 | static inline void vlan_vids_del_by_dev(struct net_device *dev, | ||
| 151 | const struct net_device *by_dev) | ||
| 152 | { | ||
| 153 | } | ||
| 142 | #endif | 154 | #endif |
| 143 | 155 | ||
| 144 | /** | 156 | /** |
| @@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) | |||
| 310 | 322 | ||
| 311 | return protocol; | 323 | return protocol; |
| 312 | } | 324 | } |
| 325 | |||
| 326 | static inline void vlan_set_encap_proto(struct sk_buff *skb, | ||
| 327 | struct vlan_hdr *vhdr) | ||
| 328 | { | ||
| 329 | __be16 proto; | ||
| 330 | unsigned char *rawp; | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Was a VLAN packet, grab the encapsulated protocol, which the layer | ||
| 334 | * three protocols care about. | ||
| 335 | */ | ||
| 336 | |||
| 337 | proto = vhdr->h_vlan_encapsulated_proto; | ||
| 338 | if (ntohs(proto) >= 1536) { | ||
| 339 | skb->protocol = proto; | ||
| 340 | return; | ||
| 341 | } | ||
| 342 | |||
| 343 | rawp = skb->data; | ||
| 344 | if (*(unsigned short *) rawp == 0xFFFF) | ||
| 345 | /* | ||
| 346 | * This is a magic hack to spot IPX packets. Older Novell | ||
| 347 | * breaks the protocol design and runs IPX over 802.3 without | ||
| 348 | * an 802.2 LLC layer. We look for FFFF which isn't a used | ||
| 349 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware | ||
| 350 | * but does for the rest. | ||
| 351 | */ | ||
| 352 | skb->protocol = htons(ETH_P_802_3); | ||
| 353 | else | ||
| 354 | /* | ||
| 355 | * Real 802.2 LLC | ||
| 356 | */ | ||
| 357 | skb->protocol = htons(ETH_P_802_2); | ||
| 358 | } | ||
| 313 | #endif /* __KERNEL__ */ | 359 | #endif /* __KERNEL__ */ |
| 314 | 360 | ||
| 315 | /* VLAN IOCTLs are found in sockios.h */ | 361 | /* VLAN IOCTLs are found in sockios.h */ |
| @@ -352,7 +398,7 @@ struct vlan_ioctl_args { | |||
| 352 | unsigned int skb_priority; | 398 | unsigned int skb_priority; |
| 353 | unsigned int name_type; | 399 | unsigned int name_type; |
| 354 | unsigned int bind_type; | 400 | unsigned int bind_type; |
| 355 | unsigned int flag; /* Matches vlan_dev_info flags */ | 401 | unsigned int flag; /* Matches vlan_dev_priv flags */ |
| 356 | } u; | 402 | } u; |
| 357 | 403 | ||
| 358 | short vlan_qos; | 404 | short vlan_qos; |
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index abf5028db981..34e8d52c1925 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h | |||
| @@ -22,7 +22,7 @@ struct inet_diag_sockid { | |||
| 22 | 22 | ||
| 23 | /* Request structure */ | 23 | /* Request structure */ |
| 24 | 24 | ||
| 25 | struct inet_diag_req { | 25 | struct inet_diag_req_compat { |
| 26 | __u8 idiag_family; /* Family of addresses. */ | 26 | __u8 idiag_family; /* Family of addresses. */ |
| 27 | __u8 idiag_src_len; | 27 | __u8 idiag_src_len; |
| 28 | __u8 idiag_dst_len; | 28 | __u8 idiag_dst_len; |
| @@ -34,6 +34,15 @@ struct inet_diag_req { | |||
| 34 | __u32 idiag_dbs; /* Tables to dump (NI) */ | 34 | __u32 idiag_dbs; /* Tables to dump (NI) */ |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct inet_diag_req { | ||
| 38 | __u8 sdiag_family; | ||
| 39 | __u8 sdiag_protocol; | ||
| 40 | __u8 idiag_ext; | ||
| 41 | __u8 pad; | ||
| 42 | __u32 idiag_states; | ||
| 43 | struct inet_diag_sockid id; | ||
| 44 | }; | ||
| 45 | |||
| 37 | enum { | 46 | enum { |
| 38 | INET_DIAG_REQ_NONE, | 47 | INET_DIAG_REQ_NONE, |
| 39 | INET_DIAG_REQ_BYTECODE, | 48 | INET_DIAG_REQ_BYTECODE, |
| @@ -99,9 +108,10 @@ enum { | |||
| 99 | INET_DIAG_CONG, | 108 | INET_DIAG_CONG, |
| 100 | INET_DIAG_TOS, | 109 | INET_DIAG_TOS, |
| 101 | INET_DIAG_TCLASS, | 110 | INET_DIAG_TCLASS, |
| 111 | INET_DIAG_SKMEMINFO, | ||
| 102 | }; | 112 | }; |
| 103 | 113 | ||
| 104 | #define INET_DIAG_MAX INET_DIAG_TCLASS | 114 | #define INET_DIAG_MAX INET_DIAG_SKMEMINFO |
| 105 | 115 | ||
| 106 | 116 | ||
| 107 | /* INET_DIAG_MEM */ | 117 | /* INET_DIAG_MEM */ |
| @@ -125,16 +135,41 @@ struct tcpvegas_info { | |||
| 125 | #ifdef __KERNEL__ | 135 | #ifdef __KERNEL__ |
| 126 | struct sock; | 136 | struct sock; |
| 127 | struct inet_hashinfo; | 137 | struct inet_hashinfo; |
| 138 | struct nlattr; | ||
| 139 | struct nlmsghdr; | ||
| 140 | struct sk_buff; | ||
| 141 | struct netlink_callback; | ||
| 128 | 142 | ||
| 129 | struct inet_diag_handler { | 143 | struct inet_diag_handler { |
| 130 | struct inet_hashinfo *idiag_hashinfo; | 144 | void (*dump)(struct sk_buff *skb, |
| 145 | struct netlink_callback *cb, | ||
| 146 | struct inet_diag_req *r, | ||
| 147 | struct nlattr *bc); | ||
| 148 | |||
| 149 | int (*dump_one)(struct sk_buff *in_skb, | ||
| 150 | const struct nlmsghdr *nlh, | ||
| 151 | struct inet_diag_req *req); | ||
| 152 | |||
| 131 | void (*idiag_get_info)(struct sock *sk, | 153 | void (*idiag_get_info)(struct sock *sk, |
| 132 | struct inet_diag_msg *r, | 154 | struct inet_diag_msg *r, |
| 133 | void *info); | 155 | void *info); |
| 134 | __u16 idiag_info_size; | ||
| 135 | __u16 idiag_type; | 156 | __u16 idiag_type; |
| 136 | }; | 157 | }; |
| 137 | 158 | ||
| 159 | struct inet_connection_sock; | ||
| 160 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | ||
| 161 | struct sk_buff *skb, struct inet_diag_req *req, | ||
| 162 | u32 pid, u32 seq, u16 nlmsg_flags, | ||
| 163 | const struct nlmsghdr *unlh); | ||
| 164 | void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, | ||
| 165 | struct netlink_callback *cb, struct inet_diag_req *r, | ||
| 166 | struct nlattr *bc); | ||
| 167 | int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, | ||
| 168 | struct sk_buff *in_skb, const struct nlmsghdr *nlh, | ||
| 169 | struct inet_diag_req *req); | ||
| 170 | |||
| 171 | int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); | ||
| 172 | |||
| 138 | extern int inet_diag_register(const struct inet_diag_handler *handler); | 173 | extern int inet_diag_register(const struct inet_diag_handler *handler); |
| 139 | extern void inet_diag_unregister(const struct inet_diag_handler *handler); | 174 | extern void inet_diag_unregister(const struct inet_diag_handler *handler); |
| 140 | #endif /* __KERNEL__ */ | 175 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0c997767429a..6318268dcaf5 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -404,7 +404,7 @@ struct tcp6_sock { | |||
| 404 | 404 | ||
| 405 | extern int inet6_sk_rebuild_header(struct sock *sk); | 405 | extern int inet6_sk_rebuild_header(struct sock *sk); |
| 406 | 406 | ||
| 407 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 407 | #if IS_ENABLED(CONFIG_IPV6) |
| 408 | static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) | 408 | static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) |
| 409 | { | 409 | { |
| 410 | return inet_sk(__sk)->pinet6; | 410 | return inet_sk(__sk)->pinet6; |
| @@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
| 515 | #define inet6_rcv_saddr(__sk) NULL | 515 | #define inet6_rcv_saddr(__sk) NULL |
| 516 | #define tcp_twsk_ipv6only(__sk) 0 | 516 | #define tcp_twsk_ipv6only(__sk) 0 |
| 517 | #define inet_v6_ipv6only(__sk) 0 | 517 | #define inet_v6_ipv6only(__sk) 0 |
| 518 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 518 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
| 519 | 519 | ||
| 520 | #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ | 520 | #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ |
| 521 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ | 521 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 99834e581b9e..bd4272b61a14 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -91,10 +91,11 @@ static inline unsigned int irq_domain_to_irq(struct irq_domain *d, | |||
| 91 | 91 | ||
| 92 | extern void irq_domain_add(struct irq_domain *domain); | 92 | extern void irq_domain_add(struct irq_domain *domain); |
| 93 | extern void irq_domain_del(struct irq_domain *domain); | 93 | extern void irq_domain_del(struct irq_domain *domain); |
| 94 | |||
| 95 | extern struct irq_domain_ops irq_domain_simple_ops; | ||
| 94 | #endif /* CONFIG_IRQ_DOMAIN */ | 96 | #endif /* CONFIG_IRQ_DOMAIN */ |
| 95 | 97 | ||
| 96 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) | 98 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) |
| 97 | extern struct irq_domain_ops irq_domain_simple_ops; | ||
| 98 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); | 99 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); |
| 99 | extern void irq_domain_generate_simple(const struct of_device_id *match, | 100 | extern void irq_domain_generate_simple(const struct of_device_id *match, |
| 100 | u64 phys_base, unsigned int irq_start); | 101 | u64 phys_base, unsigned int irq_start); |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 388b0d425b50..5ce8b140428f 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
| 6 | #include <linux/workqueue.h> | ||
| 6 | 7 | ||
| 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 8 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
| 8 | 9 | ||
| @@ -14,6 +15,12 @@ struct jump_label_key { | |||
| 14 | #endif | 15 | #endif |
| 15 | }; | 16 | }; |
| 16 | 17 | ||
| 18 | struct jump_label_key_deferred { | ||
| 19 | struct jump_label_key key; | ||
| 20 | unsigned long timeout; | ||
| 21 | struct delayed_work work; | ||
| 22 | }; | ||
| 23 | |||
| 17 | # include <asm/jump_label.h> | 24 | # include <asm/jump_label.h> |
| 18 | # define HAVE_JUMP_LABEL | 25 | # define HAVE_JUMP_LABEL |
| 19 | #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ | 26 | #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ |
| @@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, | |||
| 51 | extern int jump_label_text_reserved(void *start, void *end); | 58 | extern int jump_label_text_reserved(void *start, void *end); |
| 52 | extern void jump_label_inc(struct jump_label_key *key); | 59 | extern void jump_label_inc(struct jump_label_key *key); |
| 53 | extern void jump_label_dec(struct jump_label_key *key); | 60 | extern void jump_label_dec(struct jump_label_key *key); |
| 61 | extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); | ||
| 54 | extern bool jump_label_enabled(struct jump_label_key *key); | 62 | extern bool jump_label_enabled(struct jump_label_key *key); |
| 55 | extern void jump_label_apply_nops(struct module *mod); | 63 | extern void jump_label_apply_nops(struct module *mod); |
| 64 | extern void jump_label_rate_limit(struct jump_label_key_deferred *key, | ||
| 65 | unsigned long rl); | ||
| 56 | 66 | ||
| 57 | #else /* !HAVE_JUMP_LABEL */ | 67 | #else /* !HAVE_JUMP_LABEL */ |
| 58 | 68 | ||
| @@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void) | |||
| 68 | { | 78 | { |
| 69 | } | 79 | } |
| 70 | 80 | ||
| 81 | struct jump_label_key_deferred { | ||
| 82 | struct jump_label_key key; | ||
| 83 | }; | ||
| 84 | |||
| 71 | static __always_inline bool static_branch(struct jump_label_key *key) | 85 | static __always_inline bool static_branch(struct jump_label_key *key) |
| 72 | { | 86 | { |
| 73 | if (unlikely(atomic_read(&key->enabled))) | 87 | if (unlikely(atomic_read(&key->enabled))) |
| @@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key) | |||
| 85 | atomic_dec(&key->enabled); | 99 | atomic_dec(&key->enabled); |
| 86 | } | 100 | } |
| 87 | 101 | ||
| 102 | static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) | ||
| 103 | { | ||
| 104 | jump_label_dec(&key->key); | ||
| 105 | } | ||
| 106 | |||
| 88 | static inline int jump_label_text_reserved(void *start, void *end) | 107 | static inline int jump_label_text_reserved(void *start, void *end) |
| 89 | { | 108 | { |
| 90 | return 0; | 109 | return 0; |
| @@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod) | |||
| 102 | { | 121 | { |
| 103 | return 0; | 122 | return 0; |
| 104 | } | 123 | } |
| 124 | |||
| 125 | static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, | ||
| 126 | unsigned long rl) | ||
| 127 | { | ||
| 128 | } | ||
| 105 | #endif /* HAVE_JUMP_LABEL */ | 129 | #endif /* HAVE_JUMP_LABEL */ |
| 106 | 130 | ||
| 131 | #define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) | ||
| 132 | #define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) | ||
| 133 | |||
| 107 | #endif /* _LINUX_JUMP_LABEL_H */ | 134 | #endif /* _LINUX_JUMP_LABEL_H */ |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0cce2db580c3..2fbd9053c2df 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
| 7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
| 8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
| 9 | #include <linux/sched.h> | ||
| 9 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
| 10 | #include <asm/cputime.h> | 11 | #include <asm/cputime.h> |
| 11 | 12 | ||
| @@ -15,21 +16,25 @@ | |||
| 15 | * used by rstatd/perfmeter | 16 | * used by rstatd/perfmeter |
| 16 | */ | 17 | */ |
| 17 | 18 | ||
| 18 | struct cpu_usage_stat { | 19 | enum cpu_usage_stat { |
| 19 | cputime64_t user; | 20 | CPUTIME_USER, |
| 20 | cputime64_t nice; | 21 | CPUTIME_NICE, |
| 21 | cputime64_t system; | 22 | CPUTIME_SYSTEM, |
| 22 | cputime64_t softirq; | 23 | CPUTIME_SOFTIRQ, |
| 23 | cputime64_t irq; | 24 | CPUTIME_IRQ, |
| 24 | cputime64_t idle; | 25 | CPUTIME_IDLE, |
| 25 | cputime64_t iowait; | 26 | CPUTIME_IOWAIT, |
| 26 | cputime64_t steal; | 27 | CPUTIME_STEAL, |
| 27 | cputime64_t guest; | 28 | CPUTIME_GUEST, |
| 28 | cputime64_t guest_nice; | 29 | CPUTIME_GUEST_NICE, |
| 30 | NR_STATS, | ||
| 31 | }; | ||
| 32 | |||
| 33 | struct kernel_cpustat { | ||
| 34 | u64 cpustat[NR_STATS]; | ||
| 29 | }; | 35 | }; |
| 30 | 36 | ||
| 31 | struct kernel_stat { | 37 | struct kernel_stat { |
| 32 | struct cpu_usage_stat cpustat; | ||
| 33 | #ifndef CONFIG_GENERIC_HARDIRQS | 38 | #ifndef CONFIG_GENERIC_HARDIRQS |
| 34 | unsigned int irqs[NR_IRQS]; | 39 | unsigned int irqs[NR_IRQS]; |
| 35 | #endif | 40 | #endif |
| @@ -38,10 +43,13 @@ struct kernel_stat { | |||
| 38 | }; | 43 | }; |
| 39 | 44 | ||
| 40 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 45 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
| 46 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | ||
| 41 | 47 | ||
| 42 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
| 43 | /* Must have preemption disabled for this to be meaningful. */ | 48 | /* Must have preemption disabled for this to be meaningful. */ |
| 44 | #define kstat_this_cpu __get_cpu_var(kstat) | 49 | #define kstat_this_cpu (&__get_cpu_var(kstat)) |
| 50 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) | ||
| 51 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
| 52 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | ||
| 45 | 53 | ||
| 46 | extern unsigned long long nr_context_switches(void); | 54 | extern unsigned long long nr_context_switches(void); |
| 47 | 55 | ||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c3892fc1d538..68e67e50d028 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
| @@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo { | |||
| 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ | 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ |
| 558 | #define KVM_CAP_PPC_PAPR 68 | 558 | #define KVM_CAP_PPC_PAPR 68 |
| 559 | #define KVM_CAP_S390_GMAP 71 | 559 | #define KVM_CAP_S390_GMAP 71 |
| 560 | #define KVM_CAP_TSC_DEADLINE_TIMER 72 | ||
| 560 | 561 | ||
| 561 | #ifdef KVM_CAP_IRQ_ROUTING | 562 | #ifdef KVM_CAP_IRQ_ROUTING |
| 562 | 563 | ||
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index b0e99898527c..e23121f9d82a 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ | 10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ |
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | struct task_struct; | ||
| 14 | |||
| 13 | #ifdef CONFIG_LATENCYTOP | 15 | #ifdef CONFIG_LATENCYTOP |
| 14 | 16 | ||
| 15 | #define LT_SAVECOUNT 32 | 17 | #define LT_SAVECOUNT 32 |
| @@ -23,7 +25,6 @@ struct latency_record { | |||
| 23 | }; | 25 | }; |
| 24 | 26 | ||
| 25 | 27 | ||
| 26 | struct task_struct; | ||
| 27 | 28 | ||
| 28 | extern int latencytop_enabled; | 29 | extern int latencytop_enabled; |
| 29 | void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); | 30 | void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index f549056fb20b..87f402ccec55 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/lockdep.h> | 23 | #include <linux/lockdep.h> |
| 24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
| 25 | #include <linux/cpu.h> | ||
| 25 | 26 | ||
| 26 | /* can make br locks by using local lock for read side, global lock for write */ | 27 | /* can make br locks by using local lock for read side, global lock for write */ |
| 27 | #define br_lock_init(name) name##_lock_init() | 28 | #define br_lock_init(name) name##_lock_init() |
| @@ -72,9 +73,31 @@ | |||
| 72 | 73 | ||
| 73 | #define DEFINE_LGLOCK(name) \ | 74 | #define DEFINE_LGLOCK(name) \ |
| 74 | \ | 75 | \ |
| 76 | DEFINE_SPINLOCK(name##_cpu_lock); \ | ||
| 77 | cpumask_t name##_cpus __read_mostly; \ | ||
| 75 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | 78 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ |
| 76 | DEFINE_LGLOCK_LOCKDEP(name); \ | 79 | DEFINE_LGLOCK_LOCKDEP(name); \ |
| 77 | \ | 80 | \ |
| 81 | static int \ | ||
| 82 | name##_lg_cpu_callback(struct notifier_block *nb, \ | ||
| 83 | unsigned long action, void *hcpu) \ | ||
| 84 | { \ | ||
| 85 | switch (action & ~CPU_TASKS_FROZEN) { \ | ||
| 86 | case CPU_UP_PREPARE: \ | ||
| 87 | spin_lock(&name##_cpu_lock); \ | ||
| 88 | cpu_set((unsigned long)hcpu, name##_cpus); \ | ||
| 89 | spin_unlock(&name##_cpu_lock); \ | ||
| 90 | break; \ | ||
| 91 | case CPU_UP_CANCELED: case CPU_DEAD: \ | ||
| 92 | spin_lock(&name##_cpu_lock); \ | ||
| 93 | cpu_clear((unsigned long)hcpu, name##_cpus); \ | ||
| 94 | spin_unlock(&name##_cpu_lock); \ | ||
| 95 | } \ | ||
| 96 | return NOTIFY_OK; \ | ||
| 97 | } \ | ||
| 98 | static struct notifier_block name##_lg_cpu_notifier = { \ | ||
| 99 | .notifier_call = name##_lg_cpu_callback, \ | ||
| 100 | }; \ | ||
| 78 | void name##_lock_init(void) { \ | 101 | void name##_lock_init(void) { \ |
| 79 | int i; \ | 102 | int i; \ |
| 80 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | 103 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ |
| @@ -83,6 +106,11 @@ | |||
| 83 | lock = &per_cpu(name##_lock, i); \ | 106 | lock = &per_cpu(name##_lock, i); \ |
| 84 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | 107 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ |
| 85 | } \ | 108 | } \ |
| 109 | register_hotcpu_notifier(&name##_lg_cpu_notifier); \ | ||
| 110 | get_online_cpus(); \ | ||
| 111 | for_each_online_cpu(i) \ | ||
| 112 | cpu_set(i, name##_cpus); \ | ||
| 113 | put_online_cpus(); \ | ||
| 86 | } \ | 114 | } \ |
| 87 | EXPORT_SYMBOL(name##_lock_init); \ | 115 | EXPORT_SYMBOL(name##_lock_init); \ |
| 88 | \ | 116 | \ |
| @@ -124,9 +152,9 @@ | |||
| 124 | \ | 152 | \ |
| 125 | void name##_global_lock_online(void) { \ | 153 | void name##_global_lock_online(void) { \ |
| 126 | int i; \ | 154 | int i; \ |
| 127 | preempt_disable(); \ | 155 | spin_lock(&name##_cpu_lock); \ |
| 128 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 156 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
| 129 | for_each_online_cpu(i) { \ | 157 | for_each_cpu(i, &name##_cpus) { \ |
| 130 | arch_spinlock_t *lock; \ | 158 | arch_spinlock_t *lock; \ |
| 131 | lock = &per_cpu(name##_lock, i); \ | 159 | lock = &per_cpu(name##_lock, i); \ |
| 132 | arch_spin_lock(lock); \ | 160 | arch_spin_lock(lock); \ |
| @@ -137,12 +165,12 @@ | |||
| 137 | void name##_global_unlock_online(void) { \ | 165 | void name##_global_unlock_online(void) { \ |
| 138 | int i; \ | 166 | int i; \ |
| 139 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 167 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
| 140 | for_each_online_cpu(i) { \ | 168 | for_each_cpu(i, &name##_cpus) { \ |
| 141 | arch_spinlock_t *lock; \ | 169 | arch_spinlock_t *lock; \ |
| 142 | lock = &per_cpu(name##_lock, i); \ | 170 | lock = &per_cpu(name##_lock, i); \ |
| 143 | arch_spin_unlock(lock); \ | 171 | arch_spin_unlock(lock); \ |
| 144 | } \ | 172 | } \ |
| 145 | preempt_enable(); \ | 173 | spin_unlock(&name##_cpu_lock); \ |
| 146 | } \ | 174 | } \ |
| 147 | EXPORT_SYMBOL(name##_global_unlock_online); \ | 175 | EXPORT_SYMBOL(name##_global_unlock_online); \ |
| 148 | \ | 176 | \ |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index ff9abff55aa0..90b0656a869e 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
| @@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap) | |||
| 301 | return ipv4_is_loopback(sin->sin_addr.s_addr); | 301 | return ipv4_is_loopback(sin->sin_addr.s_addr); |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 304 | #if IS_ENABLED(CONFIG_IPV6) |
| 305 | static inline int __nlm_privileged_request6(const struct sockaddr *sap) | 305 | static inline int __nlm_privileged_request6(const struct sockaddr *sap) |
| 306 | { | 306 | { |
| 307 | const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; | 307 | const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; |
| @@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap) | |||
| 314 | 314 | ||
| 315 | return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; | 315 | return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; |
| 316 | } | 316 | } |
| 317 | #else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 317 | #else /* IS_ENABLED(CONFIG_IPV6) */ |
| 318 | static inline int __nlm_privileged_request6(const struct sockaddr *sap) | 318 | static inline int __nlm_privileged_request6(const struct sockaddr *sap) |
| 319 | { | 319 | { |
| 320 | return 0; | 320 | return 0; |
| 321 | } | 321 | } |
| 322 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 322 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
| 323 | 323 | ||
| 324 | /* | 324 | /* |
| 325 | * Ensure incoming requests are from local privileged callers. | 325 | * Ensure incoming requests are from local privileged callers. |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b6a56e37284c..d36619ead3ba 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
| 343 | 343 | ||
| 344 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) | 344 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) |
| 345 | 345 | ||
| 346 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) | ||
| 347 | |||
| 346 | #else /* !LOCKDEP */ | 348 | #else /* !LOCKDEP */ |
| 347 | 349 | ||
| 348 | static inline void lockdep_off(void) | 350 | static inline void lockdep_off(void) |
| @@ -392,6 +394,8 @@ struct lock_class_key { }; | |||
| 392 | 394 | ||
| 393 | #define lockdep_assert_held(l) do { } while (0) | 395 | #define lockdep_assert_held(l) do { } while (0) |
| 394 | 396 | ||
| 397 | #define lockdep_recursing(tsk) (0) | ||
| 398 | |||
| 395 | #endif /* !LOCKDEP */ | 399 | #endif /* !LOCKDEP */ |
| 396 | 400 | ||
| 397 | #ifdef CONFIG_LOCK_STAT | 401 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/log2.h b/include/linux/log2.h index 25b808631cd9..fd7ff3d91e6a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h | |||
| @@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) | |||
| 185 | #define rounddown_pow_of_two(n) \ | 185 | #define rounddown_pow_of_two(n) \ |
| 186 | ( \ | 186 | ( \ |
| 187 | __builtin_constant_p(n) ? ( \ | 187 | __builtin_constant_p(n) ? ( \ |
| 188 | (n == 1) ? 0 : \ | ||
| 189 | (1UL << ilog2(n))) : \ | 188 | (1UL << ilog2(n))) : \ |
| 190 | __rounddown_pow_of_two(n) \ | 189 | __rounddown_pow_of_two(n) \ |
| 191 | ) | 190 | ) |
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index 0fe00cd4c93c..76f52bbbb2f4 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h | |||
| @@ -32,6 +32,8 @@ struct mdiobb_ops { | |||
| 32 | 32 | ||
| 33 | struct mdiobb_ctrl { | 33 | struct mdiobb_ctrl { |
| 34 | const struct mdiobb_ops *ops; | 34 | const struct mdiobb_ops *ops; |
| 35 | /* reset callback */ | ||
| 36 | int (*reset)(struct mii_bus *bus); | ||
| 35 | }; | 37 | }; |
| 36 | 38 | ||
| 37 | /* The returned bus is not yet registered with the phy layer. */ | 39 | /* The returned bus is not yet registered with the phy layer. */ |
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h index e9d3fdfe41d7..7c9fe3c2be73 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/mdio-gpio.h | |||
| @@ -20,6 +20,8 @@ struct mdio_gpio_platform_data { | |||
| 20 | 20 | ||
| 21 | unsigned int phy_mask; | 21 | unsigned int phy_mask; |
| 22 | int irqs[PHY_MAX_ADDR]; | 22 | int irqs[PHY_MAX_ADDR]; |
| 23 | /* reset callback */ | ||
| 24 | int (*reset)(struct mii_bus *bus); | ||
| 23 | }; | 25 | }; |
| 24 | 26 | ||
| 25 | #endif /* __LINUX_MDIO_GPIO_H */ | 27 | #endif /* __LINUX_MDIO_GPIO_H */ |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e6b843e16e81..a6bb10235148 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -2,8 +2,6 @@ | |||
| 2 | #define _LINUX_MEMBLOCK_H | 2 | #define _LINUX_MEMBLOCK_H |
| 3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
| 4 | 4 | ||
| 5 | #define MEMBLOCK_ERROR 0 | ||
| 6 | |||
| 7 | #ifdef CONFIG_HAVE_MEMBLOCK | 5 | #ifdef CONFIG_HAVE_MEMBLOCK |
| 8 | /* | 6 | /* |
| 9 | * Logical memory blocks. | 7 | * Logical memory blocks. |
| @@ -19,81 +17,161 @@ | |||
| 19 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 20 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
| 21 | 19 | ||
| 22 | #include <asm/memblock.h> | ||
| 23 | |||
| 24 | #define INIT_MEMBLOCK_REGIONS 128 | 20 | #define INIT_MEMBLOCK_REGIONS 128 |
| 25 | 21 | ||
| 26 | struct memblock_region { | 22 | struct memblock_region { |
| 27 | phys_addr_t base; | 23 | phys_addr_t base; |
| 28 | phys_addr_t size; | 24 | phys_addr_t size; |
| 25 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
| 26 | int nid; | ||
| 27 | #endif | ||
| 29 | }; | 28 | }; |
| 30 | 29 | ||
| 31 | struct memblock_type { | 30 | struct memblock_type { |
| 32 | unsigned long cnt; /* number of regions */ | 31 | unsigned long cnt; /* number of regions */ |
| 33 | unsigned long max; /* size of the allocated array */ | 32 | unsigned long max; /* size of the allocated array */ |
| 33 | phys_addr_t total_size; /* size of all regions */ | ||
| 34 | struct memblock_region *regions; | 34 | struct memblock_region *regions; |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct memblock { | 37 | struct memblock { |
| 38 | phys_addr_t current_limit; | 38 | phys_addr_t current_limit; |
| 39 | phys_addr_t memory_size; /* Updated by memblock_analyze() */ | ||
| 40 | struct memblock_type memory; | 39 | struct memblock_type memory; |
| 41 | struct memblock_type reserved; | 40 | struct memblock_type reserved; |
| 42 | }; | 41 | }; |
| 43 | 42 | ||
| 44 | extern struct memblock memblock; | 43 | extern struct memblock memblock; |
| 45 | extern int memblock_debug; | 44 | extern int memblock_debug; |
| 46 | extern int memblock_can_resize; | ||
| 47 | 45 | ||
| 48 | #define memblock_dbg(fmt, ...) \ | 46 | #define memblock_dbg(fmt, ...) \ |
| 49 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 47 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
| 50 | 48 | ||
| 51 | u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); | 49 | phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, |
| 50 | phys_addr_t size, phys_addr_t align, int nid); | ||
| 51 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | ||
| 52 | phys_addr_t size, phys_addr_t align); | ||
| 52 | int memblock_free_reserved_regions(void); | 53 | int memblock_free_reserved_regions(void); |
| 53 | int memblock_reserve_reserved_regions(void); | 54 | int memblock_reserve_reserved_regions(void); |
| 54 | 55 | ||
| 55 | extern void memblock_init(void); | 56 | void memblock_allow_resize(void); |
| 56 | extern void memblock_analyze(void); | 57 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); |
| 57 | extern long memblock_add(phys_addr_t base, phys_addr_t size); | 58 | int memblock_add(phys_addr_t base, phys_addr_t size); |
| 58 | extern long memblock_remove(phys_addr_t base, phys_addr_t size); | 59 | int memblock_remove(phys_addr_t base, phys_addr_t size); |
| 59 | extern long memblock_free(phys_addr_t base, phys_addr_t size); | 60 | int memblock_free(phys_addr_t base, phys_addr_t size); |
| 60 | extern long memblock_reserve(phys_addr_t base, phys_addr_t size); | 61 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
| 62 | |||
| 63 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
| 64 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | ||
| 65 | unsigned long *out_end_pfn, int *out_nid); | ||
| 66 | |||
| 67 | /** | ||
| 68 | * for_each_mem_pfn_range - early memory pfn range iterator | ||
| 69 | * @i: an integer used as loop variable | ||
| 70 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 71 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL | ||
| 72 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL | ||
| 73 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 74 | * | ||
| 75 | * Walks over configured memory ranges. Available after early_node_map is | ||
| 76 | * populated. | ||
| 77 | */ | ||
| 78 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ | ||
| 79 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ | ||
| 80 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | ||
| 81 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
| 82 | |||
| 83 | void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 84 | phys_addr_t *out_end, int *out_nid); | ||
| 85 | |||
| 86 | /** | ||
| 87 | * for_each_free_mem_range - iterate through free memblock areas | ||
| 88 | * @i: u64 used as loop variable | ||
| 89 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 90 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 91 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 92 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 93 | * | ||
| 94 | * Walks over free (memory && !reserved) areas of memblock. Available as | ||
| 95 | * soon as memblock is initialized. | ||
| 96 | */ | ||
| 97 | #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ | ||
| 98 | for (i = 0, \ | ||
| 99 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ | ||
| 100 | i != (u64)ULLONG_MAX; \ | ||
| 101 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) | ||
| 102 | |||
| 103 | void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 104 | phys_addr_t *out_end, int *out_nid); | ||
| 61 | 105 | ||
| 62 | /* The numa aware allocator is only available if | 106 | /** |
| 63 | * CONFIG_ARCH_POPULATES_NODE_MAP is set | 107 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
| 108 | * @i: u64 used as loop variable | ||
| 109 | * @nid: node selector, %MAX_NUMNODES for all nodes | ||
| 110 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 111 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 112 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 113 | * | ||
| 114 | * Walks over free (memory && !reserved) areas of memblock in reverse | ||
| 115 | * order. Available as soon as memblock is initialized. | ||
| 64 | */ | 116 | */ |
| 65 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, | 117 | #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ |
| 66 | int nid); | 118 | for (i = (u64)ULLONG_MAX, \ |
| 67 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | 119 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ |
| 68 | int nid); | 120 | i != (u64)ULLONG_MAX; \ |
| 121 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) | ||
| 69 | 122 | ||
| 70 | extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | 123 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 124 | int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); | ||
| 125 | |||
| 126 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | ||
| 127 | { | ||
| 128 | r->nid = nid; | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline int memblock_get_region_node(const struct memblock_region *r) | ||
| 132 | { | ||
| 133 | return r->nid; | ||
| 134 | } | ||
| 135 | #else | ||
| 136 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | ||
| 137 | { | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline int memblock_get_region_node(const struct memblock_region *r) | ||
| 141 | { | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
| 145 | |||
| 146 | phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); | ||
| 147 | phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); | ||
| 148 | |||
| 149 | phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | ||
| 71 | 150 | ||
| 72 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ | 151 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ |
| 73 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | 152 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
| 74 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | 153 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
| 75 | 154 | ||
| 76 | extern phys_addr_t memblock_alloc_base(phys_addr_t size, | 155 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
| 77 | phys_addr_t align, | 156 | phys_addr_t max_addr); |
| 78 | phys_addr_t max_addr); | 157 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
| 79 | extern phys_addr_t __memblock_alloc_base(phys_addr_t size, | 158 | phys_addr_t max_addr); |
| 80 | phys_addr_t align, | 159 | phys_addr_t memblock_phys_mem_size(void); |
| 81 | phys_addr_t max_addr); | 160 | phys_addr_t memblock_start_of_DRAM(void); |
| 82 | extern phys_addr_t memblock_phys_mem_size(void); | 161 | phys_addr_t memblock_end_of_DRAM(void); |
| 83 | extern phys_addr_t memblock_start_of_DRAM(void); | 162 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); |
| 84 | extern phys_addr_t memblock_end_of_DRAM(void); | 163 | int memblock_is_memory(phys_addr_t addr); |
| 85 | extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); | 164 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
| 86 | extern int memblock_is_memory(phys_addr_t addr); | 165 | int memblock_is_reserved(phys_addr_t addr); |
| 87 | extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | 166 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
| 88 | extern int memblock_is_reserved(phys_addr_t addr); | 167 | |
| 89 | extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | 168 | extern void __memblock_dump_all(void); |
| 90 | 169 | ||
| 91 | extern void memblock_dump_all(void); | 170 | static inline void memblock_dump_all(void) |
| 92 | 171 | { | |
| 93 | /* Provided by the architecture */ | 172 | if (memblock_debug) |
| 94 | extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); | 173 | __memblock_dump_all(); |
| 95 | extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | 174 | } |
| 96 | phys_addr_t addr2, phys_addr_t size2); | ||
| 97 | 175 | ||
| 98 | /** | 176 | /** |
| 99 | * memblock_set_current_limit - Set the current allocation limit to allow | 177 | * memblock_set_current_limit - Set the current allocation limit to allow |
| @@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | |||
| 101 | * accessible during boot | 179 | * accessible during boot |
| 102 | * @limit: New limit value (physical address) | 180 | * @limit: New limit value (physical address) |
| 103 | */ | 181 | */ |
| 104 | extern void memblock_set_current_limit(phys_addr_t limit); | 182 | void memblock_set_current_limit(phys_addr_t limit); |
| 105 | 183 | ||
| 106 | 184 | ||
| 107 | /* | 185 | /* |
| @@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo | |||
| 154 | region++) | 232 | region++) |
| 155 | 233 | ||
| 156 | 234 | ||
| 157 | #ifdef ARCH_DISCARD_MEMBLOCK | 235 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
| 158 | #define __init_memblock __init | 236 | #define __init_memblock __meminit |
| 159 | #define __initdata_memblock __initdata | 237 | #define __initdata_memblock __meminitdata |
| 160 | #else | 238 | #else |
| 161 | #define __init_memblock | 239 | #define __init_memblock |
| 162 | #define __initdata_memblock | 240 | #define __initdata_memblock |
| @@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo | |||
| 165 | #else | 243 | #else |
| 166 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) | 244 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) |
| 167 | { | 245 | { |
| 168 | return MEMBLOCK_ERROR; | 246 | return 0; |
| 169 | } | 247 | } |
| 170 | 248 | ||
| 171 | #endif /* CONFIG_HAVE_MEMBLOCK */ | 249 | #endif /* CONFIG_HAVE_MEMBLOCK */ |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b87068a1a09e..9b296ea41bb8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | |||
| 85 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 85 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
| 86 | extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); | 86 | extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); |
| 87 | 87 | ||
| 88 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | ||
| 89 | extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); | ||
| 90 | |||
| 88 | static inline | 91 | static inline |
| 89 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | 92 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) |
| 90 | { | 93 | { |
| @@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page) | |||
| 381 | } | 384 | } |
| 382 | #endif | 385 | #endif |
| 383 | 386 | ||
| 387 | enum { | ||
| 388 | UNDER_LIMIT, | ||
| 389 | SOFT_LIMIT, | ||
| 390 | OVER_LIMIT, | ||
| 391 | }; | ||
| 392 | |||
| 393 | #ifdef CONFIG_INET | ||
| 394 | struct sock; | ||
| 395 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | ||
| 396 | void sock_update_memcg(struct sock *sk); | ||
| 397 | void sock_release_memcg(struct sock *sk); | ||
| 398 | #else | ||
| 399 | static inline void sock_update_memcg(struct sock *sk) | ||
| 400 | { | ||
| 401 | } | ||
| 402 | static inline void sock_release_memcg(struct sock *sk) | ||
| 403 | { | ||
| 404 | } | ||
| 405 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ | ||
| 406 | #endif /* CONFIG_INET */ | ||
| 384 | #endif /* _LINUX_MEMCONTROL_H */ | 407 | #endif /* _LINUX_MEMCONTROL_H */ |
| 385 | 408 | ||
diff --git a/include/linux/mii.h b/include/linux/mii.h index 27748230aa69..2783eca629a0 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #define __LINUX_MII_H__ | 9 | #define __LINUX_MII_H__ |
| 10 | 10 | ||
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| 12 | #include <linux/ethtool.h> | ||
| 12 | 13 | ||
| 13 | /* Generic MII registers. */ | 14 | /* Generic MII registers. */ |
| 14 | #define MII_BMCR 0x00 /* Basic mode control register */ | 15 | #define MII_BMCR 0x00 /* Basic mode control register */ |
| @@ -240,6 +241,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock, | |||
| 240 | } | 241 | } |
| 241 | 242 | ||
| 242 | /** | 243 | /** |
| 244 | * ethtool_adv_to_mii_adv_t | ||
| 245 | * @ethadv: the ethtool advertisement settings | ||
| 246 | * | ||
| 247 | * A small helper function that translates ethtool advertisement | ||
| 248 | * settings to phy autonegotiation advertisements for the | ||
| 249 | * MII_ADVERTISE register. | ||
| 250 | */ | ||
| 251 | static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) | ||
| 252 | { | ||
| 253 | u32 result = 0; | ||
| 254 | |||
| 255 | if (ethadv & ADVERTISED_10baseT_Half) | ||
| 256 | result |= ADVERTISE_10HALF; | ||
| 257 | if (ethadv & ADVERTISED_10baseT_Full) | ||
| 258 | result |= ADVERTISE_10FULL; | ||
| 259 | if (ethadv & ADVERTISED_100baseT_Half) | ||
| 260 | result |= ADVERTISE_100HALF; | ||
| 261 | if (ethadv & ADVERTISED_100baseT_Full) | ||
| 262 | result |= ADVERTISE_100FULL; | ||
| 263 | if (ethadv & ADVERTISED_Pause) | ||
| 264 | result |= ADVERTISE_PAUSE_CAP; | ||
| 265 | if (ethadv & ADVERTISED_Asym_Pause) | ||
| 266 | result |= ADVERTISE_PAUSE_ASYM; | ||
| 267 | |||
| 268 | return result; | ||
| 269 | } | ||
| 270 | |||
| 271 | /** | ||
| 272 | * mii_adv_to_ethtool_adv_t | ||
| 273 | * @adv: value of the MII_ADVERTISE register | ||
| 274 | * | ||
| 275 | * A small helper function that translates MII_ADVERTISE bits | ||
| 276 | * to ethtool advertisement settings. | ||
| 277 | */ | ||
| 278 | static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) | ||
| 279 | { | ||
| 280 | u32 result = 0; | ||
| 281 | |||
| 282 | if (adv & ADVERTISE_10HALF) | ||
| 283 | result |= ADVERTISED_10baseT_Half; | ||
| 284 | if (adv & ADVERTISE_10FULL) | ||
| 285 | result |= ADVERTISED_10baseT_Full; | ||
| 286 | if (adv & ADVERTISE_100HALF) | ||
| 287 | result |= ADVERTISED_100baseT_Half; | ||
| 288 | if (adv & ADVERTISE_100FULL) | ||
| 289 | result |= ADVERTISED_100baseT_Full; | ||
| 290 | if (adv & ADVERTISE_PAUSE_CAP) | ||
| 291 | result |= ADVERTISED_Pause; | ||
| 292 | if (adv & ADVERTISE_PAUSE_ASYM) | ||
| 293 | result |= ADVERTISED_Asym_Pause; | ||
| 294 | |||
| 295 | return result; | ||
| 296 | } | ||
| 297 | |||
| 298 | /** | ||
| 299 | * ethtool_adv_to_mii_ctrl1000_t | ||
| 300 | * @ethadv: the ethtool advertisement settings | ||
| 301 | * | ||
| 302 | * A small helper function that translates ethtool advertisement | ||
| 303 | * settings to phy autonegotiation advertisements for the | ||
| 304 | * MII_CTRL1000 register when in 1000T mode. | ||
| 305 | */ | ||
| 306 | static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) | ||
| 307 | { | ||
| 308 | u32 result = 0; | ||
| 309 | |||
| 310 | if (ethadv & ADVERTISED_1000baseT_Half) | ||
| 311 | result |= ADVERTISE_1000HALF; | ||
| 312 | if (ethadv & ADVERTISED_1000baseT_Full) | ||
| 313 | result |= ADVERTISE_1000FULL; | ||
| 314 | |||
| 315 | return result; | ||
| 316 | } | ||
| 317 | |||
| 318 | /** | ||
| 319 | * mii_ctrl1000_to_ethtool_adv_t | ||
| 320 | * @adv: value of the MII_CTRL1000 register | ||
| 321 | * | ||
| 322 | * A small helper function that translates MII_CTRL1000 | ||
| 323 | * bits, when in 1000Base-T mode, to ethtool | ||
| 324 | * advertisement settings. | ||
| 325 | */ | ||
| 326 | static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) | ||
| 327 | { | ||
| 328 | u32 result = 0; | ||
| 329 | |||
| 330 | if (adv & ADVERTISE_1000HALF) | ||
| 331 | result |= ADVERTISED_1000baseT_Half; | ||
| 332 | if (adv & ADVERTISE_1000FULL) | ||
| 333 | result |= ADVERTISED_1000baseT_Full; | ||
| 334 | |||
| 335 | return result; | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * mii_lpa_to_ethtool_lpa_t | ||
| 340 | * @adv: value of the MII_LPA register | ||
| 341 | * | ||
| 342 | * A small helper function that translates MII_LPA | ||
| 343 | * bits, when in 1000Base-T mode, to ethtool | ||
| 344 | * LP advertisement settings. | ||
| 345 | */ | ||
| 346 | static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) | ||
| 347 | { | ||
| 348 | u32 result = 0; | ||
| 349 | |||
| 350 | if (lpa & LPA_LPACK) | ||
| 351 | result |= ADVERTISED_Autoneg; | ||
| 352 | |||
| 353 | return result | mii_adv_to_ethtool_adv_t(lpa); | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * mii_stat1000_to_ethtool_lpa_t | ||
| 358 | * @adv: value of the MII_STAT1000 register | ||
| 359 | * | ||
| 360 | * A small helper function that translates MII_STAT1000 | ||
| 361 | * bits, when in 1000Base-T mode, to ethtool | ||
| 362 | * advertisement settings. | ||
| 363 | */ | ||
| 364 | static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) | ||
| 365 | { | ||
| 366 | u32 result = 0; | ||
| 367 | |||
| 368 | if (lpa & LPA_1000HALF) | ||
| 369 | result |= ADVERTISED_1000baseT_Half; | ||
| 370 | if (lpa & LPA_1000FULL) | ||
| 371 | result |= ADVERTISED_1000baseT_Full; | ||
| 372 | |||
| 373 | return result; | ||
| 374 | } | ||
| 375 | |||
| 376 | /** | ||
| 377 | * ethtool_adv_to_mii_adv_x | ||
| 378 | * @ethadv: the ethtool advertisement settings | ||
| 379 | * | ||
| 380 | * A small helper function that translates ethtool advertisement | ||
| 381 | * settings to phy autonegotiation advertisements for the | ||
| 382 | * MII_CTRL1000 register when in 1000Base-X mode. | ||
| 383 | */ | ||
| 384 | static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) | ||
| 385 | { | ||
| 386 | u32 result = 0; | ||
| 387 | |||
| 388 | if (ethadv & ADVERTISED_1000baseT_Half) | ||
| 389 | result |= ADVERTISE_1000XHALF; | ||
| 390 | if (ethadv & ADVERTISED_1000baseT_Full) | ||
| 391 | result |= ADVERTISE_1000XFULL; | ||
| 392 | if (ethadv & ADVERTISED_Pause) | ||
| 393 | result |= ADVERTISE_1000XPAUSE; | ||
| 394 | if (ethadv & ADVERTISED_Asym_Pause) | ||
| 395 | result |= ADVERTISE_1000XPSE_ASYM; | ||
| 396 | |||
| 397 | return result; | ||
| 398 | } | ||
| 399 | |||
| 400 | /** | ||
| 401 | * mii_adv_to_ethtool_adv_x | ||
| 402 | * @adv: value of the MII_CTRL1000 register | ||
| 403 | * | ||
| 404 | * A small helper function that translates MII_CTRL1000 | ||
| 405 | * bits, when in 1000Base-X mode, to ethtool | ||
| 406 | * advertisement settings. | ||
| 407 | */ | ||
| 408 | static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) | ||
| 409 | { | ||
| 410 | u32 result = 0; | ||
| 411 | |||
| 412 | if (adv & ADVERTISE_1000XHALF) | ||
| 413 | result |= ADVERTISED_1000baseT_Half; | ||
| 414 | if (adv & ADVERTISE_1000XFULL) | ||
| 415 | result |= ADVERTISED_1000baseT_Full; | ||
| 416 | if (adv & ADVERTISE_1000XPAUSE) | ||
| 417 | result |= ADVERTISED_Pause; | ||
| 418 | if (adv & ADVERTISE_1000XPSE_ASYM) | ||
| 419 | result |= ADVERTISED_Asym_Pause; | ||
| 420 | |||
| 421 | return result; | ||
| 422 | } | ||
| 423 | |||
| 424 | /** | ||
| 425 | * mii_lpa_to_ethtool_lpa_x | ||
| 426 | * @adv: value of the MII_LPA register | ||
| 427 | * | ||
| 428 | * A small helper function that translates MII_LPA | ||
| 429 | * bits, when in 1000Base-X mode, to ethtool | ||
| 430 | * LP advertisement settings. | ||
| 431 | */ | ||
| 432 | static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) | ||
| 433 | { | ||
| 434 | u32 result = 0; | ||
| 435 | |||
| 436 | if (lpa & LPA_LPACK) | ||
| 437 | result |= ADVERTISED_Autoneg; | ||
| 438 | |||
| 439 | return result | mii_adv_to_ethtool_adv_x(lpa); | ||
| 440 | } | ||
| 441 | |||
| 442 | /** | ||
| 243 | * mii_advertise_flowctrl - get flow control advertisement flags | 443 | * mii_advertise_flowctrl - get flow control advertisement flags |
| 244 | * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) | 444 | * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) |
| 245 | */ | 445 | */ |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index b56e4587208d..9958ff2cad3c 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
| @@ -59,12 +59,15 @@ enum { | |||
| 59 | MLX4_CMD_HW_HEALTH_CHECK = 0x50, | 59 | MLX4_CMD_HW_HEALTH_CHECK = 0x50, |
| 60 | MLX4_CMD_SET_PORT = 0xc, | 60 | MLX4_CMD_SET_PORT = 0xc, |
| 61 | MLX4_CMD_SET_NODE = 0x5a, | 61 | MLX4_CMD_SET_NODE = 0x5a, |
| 62 | MLX4_CMD_QUERY_FUNC = 0x56, | ||
| 62 | MLX4_CMD_ACCESS_DDR = 0x2e, | 63 | MLX4_CMD_ACCESS_DDR = 0x2e, |
| 63 | MLX4_CMD_MAP_ICM = 0xffa, | 64 | MLX4_CMD_MAP_ICM = 0xffa, |
| 64 | MLX4_CMD_UNMAP_ICM = 0xff9, | 65 | MLX4_CMD_UNMAP_ICM = 0xff9, |
| 65 | MLX4_CMD_MAP_ICM_AUX = 0xffc, | 66 | MLX4_CMD_MAP_ICM_AUX = 0xffc, |
| 66 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, | 67 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, |
| 67 | MLX4_CMD_SET_ICM_SIZE = 0xffd, | 68 | MLX4_CMD_SET_ICM_SIZE = 0xffd, |
| 69 | /*master notify fw on finish for slave's flr*/ | ||
| 70 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, | ||
| 68 | 71 | ||
| 69 | /* TPT commands */ | 72 | /* TPT commands */ |
| 70 | MLX4_CMD_SW2HW_MPT = 0xd, | 73 | MLX4_CMD_SW2HW_MPT = 0xd, |
| @@ -119,6 +122,26 @@ enum { | |||
| 119 | /* miscellaneous commands */ | 122 | /* miscellaneous commands */ |
| 120 | MLX4_CMD_DIAG_RPRT = 0x30, | 123 | MLX4_CMD_DIAG_RPRT = 0x30, |
| 121 | MLX4_CMD_NOP = 0x31, | 124 | MLX4_CMD_NOP = 0x31, |
| 125 | MLX4_CMD_ACCESS_MEM = 0x2e, | ||
| 126 | MLX4_CMD_SET_VEP = 0x52, | ||
| 127 | |||
| 128 | /* Ethernet specific commands */ | ||
| 129 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | ||
| 130 | MLX4_CMD_SET_MCAST_FLTR = 0x48, | ||
| 131 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | ||
| 132 | |||
| 133 | /* Communication channel commands */ | ||
| 134 | MLX4_CMD_ARM_COMM_CHANNEL = 0x57, | ||
| 135 | MLX4_CMD_GEN_EQE = 0x58, | ||
| 136 | |||
| 137 | /* virtual commands */ | ||
| 138 | MLX4_CMD_ALLOC_RES = 0xf00, | ||
| 139 | MLX4_CMD_FREE_RES = 0xf01, | ||
| 140 | MLX4_CMD_MCAST_ATTACH = 0xf05, | ||
| 141 | MLX4_CMD_UCAST_ATTACH = 0xf06, | ||
| 142 | MLX4_CMD_PROMISC = 0xf08, | ||
| 143 | MLX4_CMD_QUERY_FUNC_CAP = 0xf0a, | ||
| 144 | MLX4_CMD_QP_ATTACH = 0xf0b, | ||
| 122 | 145 | ||
| 123 | /* debug commands */ | 146 | /* debug commands */ |
| 124 | MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, | 147 | MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, |
| @@ -126,6 +149,7 @@ enum { | |||
| 126 | 149 | ||
| 127 | /* statistics commands */ | 150 | /* statistics commands */ |
| 128 | MLX4_CMD_QUERY_IF_STAT = 0X54, | 151 | MLX4_CMD_QUERY_IF_STAT = 0X54, |
| 152 | MLX4_CMD_SET_IF_STAT = 0X55, | ||
| 129 | }; | 153 | }; |
| 130 | 154 | ||
| 131 | enum { | 155 | enum { |
| @@ -135,7 +159,8 @@ enum { | |||
| 135 | }; | 159 | }; |
| 136 | 160 | ||
| 137 | enum { | 161 | enum { |
| 138 | MLX4_MAILBOX_SIZE = 4096 | 162 | MLX4_MAILBOX_SIZE = 4096, |
| 163 | MLX4_ACCESS_MEM_ALIGN = 256, | ||
| 139 | }; | 164 | }; |
| 140 | 165 | ||
| 141 | enum { | 166 | enum { |
| @@ -148,6 +173,11 @@ enum { | |||
| 148 | MLX4_SET_PORT_GID_TABLE = 0x5, | 173 | MLX4_SET_PORT_GID_TABLE = 0x5, |
| 149 | }; | 174 | }; |
| 150 | 175 | ||
| 176 | enum { | ||
| 177 | MLX4_CMD_WRAPPED, | ||
| 178 | MLX4_CMD_NATIVE | ||
| 179 | }; | ||
| 180 | |||
| 151 | struct mlx4_dev; | 181 | struct mlx4_dev; |
| 152 | 182 | ||
| 153 | struct mlx4_cmd_mailbox { | 183 | struct mlx4_cmd_mailbox { |
| @@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox { | |||
| 157 | 187 | ||
| 158 | int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | 188 | int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, |
| 159 | int out_is_imm, u32 in_modifier, u8 op_modifier, | 189 | int out_is_imm, u32 in_modifier, u8 op_modifier, |
| 160 | u16 op, unsigned long timeout); | 190 | u16 op, unsigned long timeout, int native); |
| 161 | 191 | ||
| 162 | /* Invoke a command with no output parameter */ | 192 | /* Invoke a command with no output parameter */ |
| 163 | static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, | 193 | static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, |
| 164 | u8 op_modifier, u16 op, unsigned long timeout) | 194 | u8 op_modifier, u16 op, unsigned long timeout, |
| 195 | int native) | ||
| 165 | { | 196 | { |
| 166 | return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, | 197 | return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, |
| 167 | op_modifier, op, timeout); | 198 | op_modifier, op, timeout, native); |
| 168 | } | 199 | } |
| 169 | 200 | ||
| 170 | /* Invoke a command with an output mailbox */ | 201 | /* Invoke a command with an output mailbox */ |
| 171 | static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, | 202 | static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, |
| 172 | u32 in_modifier, u8 op_modifier, u16 op, | 203 | u32 in_modifier, u8 op_modifier, u16 op, |
| 173 | unsigned long timeout) | 204 | unsigned long timeout, int native) |
| 174 | { | 205 | { |
| 175 | return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, | 206 | return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, |
| 176 | op_modifier, op, timeout); | 207 | op_modifier, op, timeout, native); |
| 177 | } | 208 | } |
| 178 | 209 | ||
| 179 | /* | 210 | /* |
| @@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param | |||
| 183 | */ | 214 | */ |
| 184 | static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | 215 | static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, |
| 185 | u32 in_modifier, u8 op_modifier, u16 op, | 216 | u32 in_modifier, u8 op_modifier, u16 op, |
| 186 | unsigned long timeout) | 217 | unsigned long timeout, int native) |
| 187 | { | 218 | { |
| 188 | return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, | 219 | return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, |
| 189 | op_modifier, op, timeout); | 220 | op_modifier, op, timeout, native); |
| 190 | } | 221 | } |
| 191 | 222 | ||
| 192 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); | 223 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); |
| 193 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); | 224 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); |
| 194 | 225 | ||
| 226 | u32 mlx4_comm_get_version(void); | ||
| 227 | |||
| 228 | #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) | ||
| 229 | |||
| 195 | #endif /* MLX4_CMD_H */ | 230 | #endif /* MLX4_CMD_H */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 84b0b1848f17..5c4fe8e5bfe5 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -47,6 +47,9 @@ | |||
| 47 | enum { | 47 | enum { |
| 48 | MLX4_FLAG_MSI_X = 1 << 0, | 48 | MLX4_FLAG_MSI_X = 1 << 0, |
| 49 | MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, | 49 | MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, |
| 50 | MLX4_FLAG_MASTER = 1 << 2, | ||
| 51 | MLX4_FLAG_SLAVE = 1 << 3, | ||
| 52 | MLX4_FLAG_SRIOV = 1 << 4, | ||
| 50 | }; | 53 | }; |
| 51 | 54 | ||
| 52 | enum { | 55 | enum { |
| @@ -58,6 +61,15 @@ enum { | |||
| 58 | }; | 61 | }; |
| 59 | 62 | ||
| 60 | enum { | 63 | enum { |
| 64 | MLX4_MAX_NUM_PF = 16, | ||
| 65 | MLX4_MAX_NUM_VF = 64, | ||
| 66 | MLX4_MFUNC_MAX = 80, | ||
| 67 | MLX4_MFUNC_EQ_NUM = 4, | ||
| 68 | MLX4_MFUNC_MAX_EQES = 8, | ||
| 69 | MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) | ||
| 70 | }; | ||
| 71 | |||
| 72 | enum { | ||
| 61 | MLX4_DEV_CAP_FLAG_RC = 1LL << 0, | 73 | MLX4_DEV_CAP_FLAG_RC = 1LL << 0, |
| 62 | MLX4_DEV_CAP_FLAG_UC = 1LL << 1, | 74 | MLX4_DEV_CAP_FLAG_UC = 1LL << 1, |
| 63 | MLX4_DEV_CAP_FLAG_UD = 1LL << 2, | 75 | MLX4_DEV_CAP_FLAG_UD = 1LL << 2, |
| @@ -77,11 +89,13 @@ enum { | |||
| 77 | MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, | 89 | MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, |
| 78 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, | 90 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, |
| 79 | MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, | 91 | MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, |
| 80 | MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, | 92 | MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, |
| 93 | MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, | ||
| 81 | MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, | 94 | MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, |
| 82 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, | 95 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, |
| 83 | MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, | 96 | MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, |
| 84 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 | 97 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, |
| 98 | MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 | ||
| 85 | }; | 99 | }; |
| 86 | 100 | ||
| 87 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) | 101 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) |
| @@ -116,7 +130,11 @@ enum mlx4_event { | |||
| 116 | MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, | 130 | MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, |
| 117 | MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, | 131 | MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, |
| 118 | MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, | 132 | MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, |
| 119 | MLX4_EVENT_TYPE_CMD = 0x0a | 133 | MLX4_EVENT_TYPE_CMD = 0x0a, |
| 134 | MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, | ||
| 135 | MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, | ||
| 136 | MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, | ||
| 137 | MLX4_EVENT_TYPE_NONE = 0xff, | ||
| 120 | }; | 138 | }; |
| 121 | 139 | ||
| 122 | enum { | 140 | enum { |
| @@ -183,6 +201,7 @@ enum mlx4_qp_region { | |||
| 183 | }; | 201 | }; |
| 184 | 202 | ||
| 185 | enum mlx4_port_type { | 203 | enum mlx4_port_type { |
| 204 | MLX4_PORT_TYPE_NONE = 0, | ||
| 186 | MLX4_PORT_TYPE_IB = 1, | 205 | MLX4_PORT_TYPE_IB = 1, |
| 187 | MLX4_PORT_TYPE_ETH = 2, | 206 | MLX4_PORT_TYPE_ETH = 2, |
| 188 | MLX4_PORT_TYPE_AUTO = 3 | 207 | MLX4_PORT_TYPE_AUTO = 3 |
| @@ -215,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | |||
| 215 | 234 | ||
| 216 | struct mlx4_caps { | 235 | struct mlx4_caps { |
| 217 | u64 fw_ver; | 236 | u64 fw_ver; |
| 237 | u32 function; | ||
| 218 | int num_ports; | 238 | int num_ports; |
| 219 | int vl_cap[MLX4_MAX_PORTS + 1]; | 239 | int vl_cap[MLX4_MAX_PORTS + 1]; |
| 220 | int ib_mtu_cap[MLX4_MAX_PORTS + 1]; | 240 | int ib_mtu_cap[MLX4_MAX_PORTS + 1]; |
| @@ -229,6 +249,7 @@ struct mlx4_caps { | |||
| 229 | u64 trans_code[MLX4_MAX_PORTS + 1]; | 249 | u64 trans_code[MLX4_MAX_PORTS + 1]; |
| 230 | int local_ca_ack_delay; | 250 | int local_ca_ack_delay; |
| 231 | int num_uars; | 251 | int num_uars; |
| 252 | u32 uar_page_size; | ||
| 232 | int bf_reg_size; | 253 | int bf_reg_size; |
| 233 | int bf_regs_per_page; | 254 | int bf_regs_per_page; |
| 234 | int max_sq_sg; | 255 | int max_sq_sg; |
| @@ -252,8 +273,7 @@ struct mlx4_caps { | |||
| 252 | int num_comp_vectors; | 273 | int num_comp_vectors; |
| 253 | int comp_pool; | 274 | int comp_pool; |
| 254 | int num_mpts; | 275 | int num_mpts; |
| 255 | int num_mtt_segs; | 276 | int num_mtts; |
| 256 | int mtts_per_seg; | ||
| 257 | int fmr_reserved_mtts; | 277 | int fmr_reserved_mtts; |
| 258 | int reserved_mtts; | 278 | int reserved_mtts; |
| 259 | int reserved_mrws; | 279 | int reserved_mrws; |
| @@ -283,7 +303,9 @@ struct mlx4_caps { | |||
| 283 | int log_num_prios; | 303 | int log_num_prios; |
| 284 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; | 304 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; |
| 285 | u8 supported_type[MLX4_MAX_PORTS + 1]; | 305 | u8 supported_type[MLX4_MAX_PORTS + 1]; |
| 286 | u32 port_mask; | 306 | u8 suggested_type[MLX4_MAX_PORTS + 1]; |
| 307 | u8 default_sense[MLX4_MAX_PORTS + 1]; | ||
| 308 | u32 port_mask[MLX4_MAX_PORTS + 1]; | ||
| 287 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; | 309 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; |
| 288 | u32 max_counters; | 310 | u32 max_counters; |
| 289 | u8 ext_port_cap[MLX4_MAX_PORTS + 1]; | 311 | u8 ext_port_cap[MLX4_MAX_PORTS + 1]; |
| @@ -303,7 +325,7 @@ struct mlx4_buf { | |||
| 303 | }; | 325 | }; |
| 304 | 326 | ||
| 305 | struct mlx4_mtt { | 327 | struct mlx4_mtt { |
| 306 | u32 first_seg; | 328 | u32 offset; |
| 307 | int order; | 329 | int order; |
| 308 | int page_shift; | 330 | int page_shift; |
| 309 | }; | 331 | }; |
| @@ -465,10 +487,12 @@ struct mlx4_counter { | |||
| 465 | struct mlx4_dev { | 487 | struct mlx4_dev { |
| 466 | struct pci_dev *pdev; | 488 | struct pci_dev *pdev; |
| 467 | unsigned long flags; | 489 | unsigned long flags; |
| 490 | unsigned long num_slaves; | ||
| 468 | struct mlx4_caps caps; | 491 | struct mlx4_caps caps; |
| 469 | struct radix_tree_root qp_table_tree; | 492 | struct radix_tree_root qp_table_tree; |
| 470 | u8 rev_id; | 493 | u8 rev_id; |
| 471 | char board_id[MLX4_BOARD_ID_LEN]; | 494 | char board_id[MLX4_BOARD_ID_LEN]; |
| 495 | int num_vfs; | ||
| 472 | }; | 496 | }; |
| 473 | 497 | ||
| 474 | struct mlx4_init_port_param { | 498 | struct mlx4_init_port_param { |
| @@ -487,14 +511,32 @@ struct mlx4_init_port_param { | |||
| 487 | 511 | ||
| 488 | #define mlx4_foreach_port(port, dev, type) \ | 512 | #define mlx4_foreach_port(port, dev, type) \ |
| 489 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | 513 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ |
| 490 | if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ | 514 | if ((type) == (dev)->caps.port_mask[(port)]) |
| 491 | ~(dev)->caps.port_mask) & 1 << ((port) - 1)) | ||
| 492 | 515 | ||
| 493 | #define mlx4_foreach_ib_transport_port(port, dev) \ | 516 | #define mlx4_foreach_ib_transport_port(port, dev) \ |
| 494 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | 517 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ |
| 495 | if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ | 518 | if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ |
| 496 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) | 519 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
| 497 | 520 | ||
| 521 | static inline int mlx4_is_master(struct mlx4_dev *dev) | ||
| 522 | { | ||
| 523 | return dev->flags & MLX4_FLAG_MASTER; | ||
| 524 | } | ||
| 525 | |||
| 526 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) | ||
| 527 | { | ||
| 528 | return (qpn < dev->caps.sqp_start + 8); | ||
| 529 | } | ||
| 530 | |||
| 531 | static inline int mlx4_is_mfunc(struct mlx4_dev *dev) | ||
| 532 | { | ||
| 533 | return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); | ||
| 534 | } | ||
| 535 | |||
| 536 | static inline int mlx4_is_slave(struct mlx4_dev *dev) | ||
| 537 | { | ||
| 538 | return dev->flags & MLX4_FLAG_SLAVE; | ||
| 539 | } | ||
| 498 | 540 | ||
| 499 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 541 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
| 500 | struct mlx4_buf *buf); | 542 | struct mlx4_buf *buf); |
| @@ -560,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm | |||
| 560 | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); | 602 | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); |
| 561 | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); | 603 | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); |
| 562 | 604 | ||
| 605 | int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 606 | int block_mcast_loopback, enum mlx4_protocol prot); | ||
| 607 | int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 608 | enum mlx4_protocol prot); | ||
| 563 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 609 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
| 564 | int block_mcast_loopback, enum mlx4_protocol protocol); | 610 | int block_mcast_loopback, enum mlx4_protocol protocol); |
| 565 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 611 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
| @@ -570,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); | |||
| 570 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); | 616 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); |
| 571 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | 617 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); |
| 572 | 618 | ||
| 573 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); | 619 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); |
| 574 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); | 620 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); |
| 575 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); | 621 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); |
| 622 | int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); | ||
| 623 | void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); | ||
| 576 | 624 | ||
| 577 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); | 625 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); |
| 578 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | 626 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 48cc4cb97858..bee8fa231276 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -97,6 +97,33 @@ enum { | |||
| 97 | MLX4_QP_BIT_RIC = 1 << 4, | 97 | MLX4_QP_BIT_RIC = 1 << 4, |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | enum { | ||
| 101 | MLX4_RSS_HASH_XOR = 0, | ||
| 102 | MLX4_RSS_HASH_TOP = 1, | ||
| 103 | |||
| 104 | MLX4_RSS_UDP_IPV6 = 1 << 0, | ||
| 105 | MLX4_RSS_UDP_IPV4 = 1 << 1, | ||
| 106 | MLX4_RSS_TCP_IPV6 = 1 << 2, | ||
| 107 | MLX4_RSS_IPV6 = 1 << 3, | ||
| 108 | MLX4_RSS_TCP_IPV4 = 1 << 4, | ||
| 109 | MLX4_RSS_IPV4 = 1 << 5, | ||
| 110 | |||
| 111 | /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ | ||
| 112 | MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, | ||
| 113 | /* offset of being RSS indirection QP within mlx4_qp_context.flags */ | ||
| 114 | MLX4_RSS_QPC_FLAG_OFFSET = 13, | ||
| 115 | }; | ||
| 116 | |||
| 117 | struct mlx4_rss_context { | ||
| 118 | __be32 base_qpn; | ||
| 119 | __be32 default_qpn; | ||
| 120 | u16 reserved; | ||
| 121 | u8 hash_fn; | ||
| 122 | u8 flags; | ||
| 123 | __be32 rss_key[10]; | ||
| 124 | __be32 base_qpn_udp; | ||
| 125 | }; | ||
| 126 | |||
| 100 | struct mlx4_qp_path { | 127 | struct mlx4_qp_path { |
| 101 | u8 fl; | 128 | u8 fl; |
| 102 | u8 reserved1[2]; | 129 | u8 reserved1[2]; |
| @@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg { | |||
| 183 | * [4] IP checksum | 210 | * [4] IP checksum |
| 184 | * [3:2] C (generate completion queue entry) | 211 | * [3:2] C (generate completion queue entry) |
| 185 | * [1] SE (solicited event) | 212 | * [1] SE (solicited event) |
| 213 | * [0] FL (force loopback) | ||
| 186 | */ | 214 | */ |
| 187 | __be32 srcrb_flags; | 215 | __be32 srcrb_flags; |
| 188 | /* | 216 | /* |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 4baadd18f4ad..5d9b4c9813bd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page) | |||
| 1253 | extern void free_area_init(unsigned long * zones_size); | 1253 | extern void free_area_init(unsigned long * zones_size); |
| 1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
| 1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
| 1256 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 1256 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 1257 | /* | 1257 | /* |
| 1258 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 1258 | * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its |
| 1259 | * zones, allocate the backing mem_map and account for memory holes in a more | 1259 | * zones, allocate the backing mem_map and account for memory holes in a more |
| 1260 | * architecture independent manner. This is a substitute for creating the | 1260 | * architecture independent manner. This is a substitute for creating the |
| 1261 | * zone_sizes[] and zholes_size[] arrays and passing them to | 1261 | * zone_sizes[] and zholes_size[] arrays and passing them to |
| 1262 | * free_area_init_node() | 1262 | * free_area_init_node() |
| 1263 | * | 1263 | * |
| 1264 | * An architecture is expected to register range of page frames backed by | 1264 | * An architecture is expected to register range of page frames backed by |
| 1265 | * physical memory with add_active_range() before calling | 1265 | * physical memory with memblock_add[_node]() before calling |
| 1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic | 1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic |
| 1267 | * usage, an architecture is expected to do something like | 1267 | * usage, an architecture is expected to do something like |
| 1268 | * | 1268 | * |
| 1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, | 1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, |
| 1270 | * max_highmem_pfn}; | 1270 | * max_highmem_pfn}; |
| 1271 | * for_each_valid_physical_page_range() | 1271 | * for_each_valid_physical_page_range() |
| 1272 | * add_active_range(node_id, start_pfn, end_pfn) | 1272 | * memblock_add_node(base, size, nid) |
| 1273 | * free_area_init_nodes(max_zone_pfns); | 1273 | * free_area_init_nodes(max_zone_pfns); |
| 1274 | * | 1274 | * |
| 1275 | * If the architecture guarantees that there are no holes in the ranges | 1275 | * free_bootmem_with_active_regions() calls free_bootmem_node() for each |
| 1276 | * registered with add_active_range(), free_bootmem_active_regions() | 1276 | * registered physical page range. Similarly |
| 1277 | * will call free_bootmem_node() for each registered physical page range. | 1277 | * sparse_memory_present_with_active_regions() calls memory_present() for |
| 1278 | * Similarly sparse_memory_present_with_active_regions() calls | 1278 | * each range when SPARSEMEM is enabled. |
| 1279 | * memory_present() for each range when SPARSEMEM is enabled. | ||
| 1280 | * | 1279 | * |
| 1281 | * See mm/page_alloc.c for more information on each function exposed by | 1280 | * See mm/page_alloc.c for more information on each function exposed by |
| 1282 | * CONFIG_ARCH_POPULATES_NODE_MAP | 1281 | * CONFIG_HAVE_MEMBLOCK_NODE_MAP. |
| 1283 | */ | 1282 | */ |
| 1284 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | 1283 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); |
| 1285 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 1286 | unsigned long end_pfn); | ||
| 1287 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 1288 | unsigned long end_pfn); | ||
| 1289 | extern void remove_all_active_ranges(void); | ||
| 1290 | void sort_node_map(void); | ||
| 1291 | unsigned long node_map_pfn_alignment(void); | 1284 | unsigned long node_map_pfn_alignment(void); |
| 1292 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | 1285 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, |
| 1293 | unsigned long end_pfn); | 1286 | unsigned long end_pfn); |
| @@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid, | |||
| 1300 | unsigned long max_low_pfn); | 1293 | unsigned long max_low_pfn); |
| 1301 | int add_from_early_node_map(struct range *range, int az, | 1294 | int add_from_early_node_map(struct range *range, int az, |
| 1302 | int nr_range, int nid); | 1295 | int nr_range, int nid); |
| 1303 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
| 1304 | u64 goal, u64 limit); | ||
| 1305 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | ||
| 1306 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | ||
| 1307 | extern void sparse_memory_present_with_active_regions(int nid); | 1296 | extern void sparse_memory_present_with_active_regions(int nid); |
| 1308 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | ||
| 1309 | 1297 | ||
| 1310 | #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ | 1298 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 1299 | |||
| 1300 | #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ | ||
| 1311 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) | 1301 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) |
| 1312 | static inline int __early_pfn_to_nid(unsigned long pfn) | 1302 | static inline int __early_pfn_to_nid(unsigned long pfn) |
| 1313 | { | 1303 | { |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 415f2db414e1..c8ef9bc54d50 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -218,6 +218,7 @@ struct mmc_card { | |||
| 218 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ | 218 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ |
| 219 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ | 219 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ |
| 220 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ | 220 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ |
| 221 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ | ||
| 221 | /* byte mode */ | 222 | /* byte mode */ |
| 222 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ | 223 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ |
| 223 | #define MMC_NO_POWER_NOTIFICATION 0 | 224 | #define MMC_NO_POWER_NOTIFICATION 0 |
| @@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) | |||
| 433 | return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; | 434 | return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; |
| 434 | } | 435 | } |
| 435 | 436 | ||
| 437 | static inline int mmc_card_long_read_time(const struct mmc_card *c) | ||
| 438 | { | ||
| 439 | return c->quirks & MMC_QUIRK_LONG_READ_TIME; | ||
| 440 | } | ||
| 441 | |||
| 436 | #define mmc_card_name(c) ((c)->cid.prod_name) | 442 | #define mmc_card_name(c) ((c)->cid.prod_name) |
| 437 | #define mmc_card_id(c) (dev_name(&(c)->dev)) | 443 | #define mmc_card_id(c) (dev_name(&(c)->dev)) |
| 438 | 444 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 188cb2ffe8db..3ac040f19369 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -598,13 +598,13 @@ struct zonelist { | |||
| 598 | #endif | 598 | #endif |
| 599 | }; | 599 | }; |
| 600 | 600 | ||
| 601 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 601 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 602 | struct node_active_region { | 602 | struct node_active_region { |
| 603 | unsigned long start_pfn; | 603 | unsigned long start_pfn; |
| 604 | unsigned long end_pfn; | 604 | unsigned long end_pfn; |
| 605 | int nid; | 605 | int nid; |
| 606 | }; | 606 | }; |
| 607 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 607 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 608 | 608 | ||
| 609 | #ifndef CONFIG_DISCONTIGMEM | 609 | #ifndef CONFIG_DISCONTIGMEM |
| 610 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 610 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
| @@ -720,7 +720,7 @@ extern int movable_zone; | |||
| 720 | 720 | ||
| 721 | static inline int zone_movable_is_highmem(void) | 721 | static inline int zone_movable_is_highmem(void) |
| 722 | { | 722 | { |
| 723 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 723 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) |
| 724 | return movable_zone == ZONE_HIGHMEM; | 724 | return movable_zone == ZONE_HIGHMEM; |
| 725 | #else | 725 | #else |
| 726 | return 0; | 726 | return 0; |
| @@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |||
| 938 | #endif | 938 | #endif |
| 939 | 939 | ||
| 940 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 940 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
| 941 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 941 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
| 942 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | 942 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
| 943 | { | 943 | { |
| 944 | return 0; | 944 | return 0; |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 468819cdde87..83ac0713ed0a 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -542,4 +542,22 @@ struct isapnp_device_id { | |||
| 542 | kernel_ulong_t driver_data; /* data private to the driver */ | 542 | kernel_ulong_t driver_data; /* data private to the driver */ |
| 543 | }; | 543 | }; |
| 544 | 544 | ||
| 545 | /** | ||
| 546 | * struct amba_id - identifies a device on an AMBA bus | ||
| 547 | * @id: The significant bits if the hardware device ID | ||
| 548 | * @mask: Bitmask specifying which bits of the id field are significant when | ||
| 549 | * matching. A driver binds to a device when ((hardware device ID) & mask) | ||
| 550 | * == id. | ||
| 551 | * @data: Private data used by the driver. | ||
| 552 | */ | ||
| 553 | struct amba_id { | ||
| 554 | unsigned int id; | ||
| 555 | unsigned int mask; | ||
| 556 | #ifndef __KERNEL__ | ||
| 557 | kernel_ulong_t data; | ||
| 558 | #else | ||
| 559 | void *data; | ||
| 560 | #endif | ||
| 561 | }; | ||
| 562 | |||
| 545 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 563 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index a7003b7a695d..b188f68a08c9 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h | |||
| @@ -116,6 +116,7 @@ enum { | |||
| 116 | NDTPA_PROXY_DELAY, /* u64, msecs */ | 116 | NDTPA_PROXY_DELAY, /* u64, msecs */ |
| 117 | NDTPA_PROXY_QLEN, /* u32 */ | 117 | NDTPA_PROXY_QLEN, /* u32 */ |
| 118 | NDTPA_LOCKTIME, /* u64, msecs */ | 118 | NDTPA_LOCKTIME, /* u64, msecs */ |
| 119 | NDTPA_QUEUE_LENBYTES, /* u32 */ | ||
| 119 | __NDTPA_MAX | 120 | __NDTPA_MAX |
| 120 | }; | 121 | }; |
| 121 | #define NDTPA_MAX (__NDTPA_MAX - 1) | 122 | #define NDTPA_MAX (__NDTPA_MAX - 1) |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h new file mode 100644 index 000000000000..77f5202977ce --- /dev/null +++ b/include/linux/netdev_features.h | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | /* | ||
| 2 | * Network device features. | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or | ||
| 6 | * modify it under the terms of the GNU General Public License | ||
| 7 | * as published by the Free Software Foundation; either version | ||
| 8 | * 2 of the License, or (at your option) any later version. | ||
| 9 | */ | ||
| 10 | #ifndef _LINUX_NETDEV_FEATURES_H | ||
| 11 | #define _LINUX_NETDEV_FEATURES_H | ||
| 12 | |||
| 13 | #include <linux/types.h> | ||
| 14 | |||
| 15 | typedef u64 netdev_features_t; | ||
| 16 | |||
| 17 | enum { | ||
| 18 | NETIF_F_SG_BIT, /* Scatter/gather IO. */ | ||
| 19 | NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ | ||
| 20 | __UNUSED_NETIF_F_1, | ||
| 21 | NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ | ||
| 22 | NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ | ||
| 23 | NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ | ||
| 24 | NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ | ||
| 25 | NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ | ||
| 26 | NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ | ||
| 27 | NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ | ||
| 28 | NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ | ||
| 29 | NETIF_F_GSO_BIT, /* Enable software GSO. */ | ||
| 30 | NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ | ||
| 31 | /* do not use LLTX in new drivers */ | ||
| 32 | NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ | ||
| 33 | NETIF_F_GRO_BIT, /* Generic receive offload */ | ||
| 34 | NETIF_F_LRO_BIT, /* large receive offload */ | ||
| 35 | |||
| 36 | /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ | ||
| 37 | NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ | ||
| 38 | = NETIF_F_GSO_SHIFT, | ||
| 39 | NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ | ||
| 40 | NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ | ||
| 41 | NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ | ||
| 42 | NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ | ||
| 43 | NETIF_F_FSO_BIT, /* ... FCoE segmentation */ | ||
| 44 | NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */ | ||
| 45 | /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ | ||
| 46 | NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ | ||
| 47 | = NETIF_F_GSO_LAST, | ||
| 48 | |||
| 49 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ | ||
| 50 | NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ | ||
| 51 | NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ | ||
| 52 | NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ | ||
| 53 | NETIF_F_RXHASH_BIT, /* Receive hashing offload */ | ||
| 54 | NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ | ||
| 55 | NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ | ||
| 56 | NETIF_F_LOOPBACK_BIT, /* Enable loopback */ | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Add your fresh new feature above and remember to update | ||
| 60 | * netdev_features_strings[] in net/core/ethtool.c and maybe | ||
| 61 | * some feature mask #defines below. Please also describe it | ||
| 62 | * in Documentation/networking/netdev-features.txt. | ||
| 63 | */ | ||
| 64 | |||
| 65 | /**/NETDEV_FEATURE_COUNT | ||
| 66 | }; | ||
| 67 | |||
| 68 | /* copy'n'paste compression ;) */ | ||
| 69 | #define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) | ||
| 70 | #define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) | ||
| 71 | |||
| 72 | #define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) | ||
| 73 | #define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) | ||
| 74 | #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) | ||
| 75 | #define NETIF_F_FSO __NETIF_F(FSO) | ||
| 76 | #define NETIF_F_GRO __NETIF_F(GRO) | ||
| 77 | #define NETIF_F_GSO __NETIF_F(GSO) | ||
| 78 | #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) | ||
| 79 | #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) | ||
| 80 | #define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) | ||
| 81 | #define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) | ||
| 82 | #define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) | ||
| 83 | #define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) | ||
| 84 | #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) | ||
| 85 | #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) | ||
| 86 | #define NETIF_F_LLTX __NETIF_F(LLTX) | ||
| 87 | #define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) | ||
| 88 | #define NETIF_F_LRO __NETIF_F(LRO) | ||
| 89 | #define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) | ||
| 90 | #define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) | ||
| 91 | #define NETIF_F_NTUPLE __NETIF_F(NTUPLE) | ||
| 92 | #define NETIF_F_RXCSUM __NETIF_F(RXCSUM) | ||
| 93 | #define NETIF_F_RXHASH __NETIF_F(RXHASH) | ||
| 94 | #define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) | ||
| 95 | #define NETIF_F_SG __NETIF_F(SG) | ||
| 96 | #define NETIF_F_TSO6 __NETIF_F(TSO6) | ||
| 97 | #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) | ||
| 98 | #define NETIF_F_TSO __NETIF_F(TSO) | ||
| 99 | #define NETIF_F_UFO __NETIF_F(UFO) | ||
| 100 | #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) | ||
| 101 | |||
| 102 | /* Features valid for ethtool to change */ | ||
| 103 | /* = all defined minus driver/device-class-related */ | ||
| 104 | #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ | ||
| 105 | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) | ||
| 106 | |||
| 107 | /* remember that ((t)1 << t_BITS) is undefined in C99 */ | ||
| 108 | #define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ | ||
| 109 | (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ | ||
| 110 | ~NETIF_F_NEVER_CHANGE) | ||
| 111 | |||
| 112 | /* Segmentation offload feature mask */ | ||
| 113 | #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ | ||
| 114 | __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) | ||
| 115 | |||
| 116 | /* List of features with software fallbacks. */ | ||
| 117 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ | ||
| 118 | NETIF_F_TSO6 | NETIF_F_UFO) | ||
| 119 | |||
| 120 | #define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM | ||
| 121 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) | ||
| 122 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | ||
| 123 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | ||
| 124 | |||
| 125 | #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) | ||
| 126 | |||
| 127 | #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ | ||
| 128 | NETIF_F_FSO) | ||
| 129 | |||
| 130 | /* | ||
| 131 | * If one device supports one of these features, then enable them | ||
| 132 | * for all in netdev_increment_features. | ||
| 133 | */ | ||
| 134 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | ||
| 135 | NETIF_F_SG | NETIF_F_HIGHDMA | \ | ||
| 136 | NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) | ||
| 137 | /* | ||
| 138 | * If one device doesn't support one of these features, then disable it | ||
| 139 | * for all in netdev_increment_features. | ||
| 140 | */ | ||
| 141 | #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) | ||
| 142 | |||
| 143 | /* changeable features with no special hardware requirements */ | ||
| 144 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | ||
| 145 | |||
| 146 | #endif /* _LINUX_NETDEV_FEATURES_H */ | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a82ad4dd306a..a1d109590da4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/rculist.h> | 43 | #include <linux/rculist.h> |
| 44 | #include <linux/dmaengine.h> | 44 | #include <linux/dmaengine.h> |
| 45 | #include <linux/workqueue.h> | 45 | #include <linux/workqueue.h> |
| 46 | #include <linux/dynamic_queue_limits.h> | ||
| 46 | 47 | ||
| 47 | #include <linux/ethtool.h> | 48 | #include <linux/ethtool.h> |
| 48 | #include <net/net_namespace.h> | 49 | #include <net/net_namespace.h> |
| @@ -50,8 +51,10 @@ | |||
| 50 | #ifdef CONFIG_DCB | 51 | #ifdef CONFIG_DCB |
| 51 | #include <net/dcbnl.h> | 52 | #include <net/dcbnl.h> |
| 52 | #endif | 53 | #endif |
| 54 | #include <net/netprio_cgroup.h> | ||
| 55 | |||
| 56 | #include <linux/netdev_features.h> | ||
| 53 | 57 | ||
| 54 | struct vlan_group; | ||
| 55 | struct netpoll_info; | 58 | struct netpoll_info; |
| 56 | struct phy_device; | 59 | struct phy_device; |
| 57 | /* 802.11 specific */ | 60 | /* 802.11 specific */ |
| @@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc) | |||
| 141 | * used. | 144 | * used. |
| 142 | */ | 145 | */ |
| 143 | 146 | ||
| 144 | #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 147 | #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) |
| 145 | # if defined(CONFIG_MAC80211_MESH) | 148 | # if defined(CONFIG_MAC80211_MESH) |
| 146 | # define LL_MAX_HEADER 128 | 149 | # define LL_MAX_HEADER 128 |
| 147 | # else | 150 | # else |
| 148 | # define LL_MAX_HEADER 96 | 151 | # define LL_MAX_HEADER 96 |
| 149 | # endif | 152 | # endif |
| 150 | #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) | 153 | #elif IS_ENABLED(CONFIG_TR) |
| 151 | # define LL_MAX_HEADER 48 | 154 | # define LL_MAX_HEADER 48 |
| 152 | #else | 155 | #else |
| 153 | # define LL_MAX_HEADER 32 | 156 | # define LL_MAX_HEADER 32 |
| 154 | #endif | 157 | #endif |
| 155 | 158 | ||
| 156 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ | 159 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
| 157 | !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ | 160 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) |
| 158 | !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | ||
| 159 | !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | ||
| 160 | #define MAX_HEADER LL_MAX_HEADER | 161 | #define MAX_HEADER LL_MAX_HEADER |
| 161 | #else | 162 | #else |
| 162 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 163 | #define MAX_HEADER (LL_MAX_HEADER + 48) |
| @@ -212,6 +213,11 @@ enum { | |||
| 212 | #include <linux/cache.h> | 213 | #include <linux/cache.h> |
| 213 | #include <linux/skbuff.h> | 214 | #include <linux/skbuff.h> |
| 214 | 215 | ||
| 216 | #ifdef CONFIG_RPS | ||
| 217 | #include <linux/jump_label.h> | ||
| 218 | extern struct jump_label_key rps_needed; | ||
| 219 | #endif | ||
| 220 | |||
| 215 | struct neighbour; | 221 | struct neighbour; |
| 216 | struct neigh_parms; | 222 | struct neigh_parms; |
| 217 | struct sk_buff; | 223 | struct sk_buff; |
| @@ -272,16 +278,11 @@ struct hh_cache { | |||
| 272 | * | 278 | * |
| 273 | * We could use other alignment values, but we must maintain the | 279 | * We could use other alignment values, but we must maintain the |
| 274 | * relationship HH alignment <= LL alignment. | 280 | * relationship HH alignment <= LL alignment. |
| 275 | * | ||
| 276 | * LL_ALLOCATED_SPACE also takes into account the tailroom the device | ||
| 277 | * may need. | ||
| 278 | */ | 281 | */ |
| 279 | #define LL_RESERVED_SPACE(dev) \ | 282 | #define LL_RESERVED_SPACE(dev) \ |
| 280 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 283 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
| 281 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 284 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
| 282 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 285 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
| 283 | #define LL_ALLOCATED_SPACE(dev) \ | ||
| 284 | ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | ||
| 285 | 286 | ||
| 286 | struct header_ops { | 287 | struct header_ops { |
| 287 | int (*create) (struct sk_buff *skb, struct net_device *dev, | 288 | int (*create) (struct sk_buff *skb, struct net_device *dev, |
| @@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
| 516 | #endif | 517 | #endif |
| 517 | 518 | ||
| 518 | enum netdev_queue_state_t { | 519 | enum netdev_queue_state_t { |
| 519 | __QUEUE_STATE_XOFF, | 520 | __QUEUE_STATE_DRV_XOFF, |
| 521 | __QUEUE_STATE_STACK_XOFF, | ||
| 520 | __QUEUE_STATE_FROZEN, | 522 | __QUEUE_STATE_FROZEN, |
| 521 | #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ | 523 | #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ |
| 522 | (1 << __QUEUE_STATE_FROZEN)) | 524 | (1 << __QUEUE_STATE_STACK_XOFF)) |
| 525 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ | ||
| 526 | (1 << __QUEUE_STATE_FROZEN)) | ||
| 523 | }; | 527 | }; |
| 528 | /* | ||
| 529 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The | ||
| 530 | * netif_tx_* functions below are used to manipulate this flag. The | ||
| 531 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | ||
| 532 | * queue independently. The netif_xmit_*stopped functions below are called | ||
| 533 | * to check if the queue has been stopped by the driver or stack (either | ||
| 534 | * of the XOFF bits are set in the state). Drivers should not need to call | ||
| 535 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | ||
| 536 | */ | ||
| 524 | 537 | ||
| 525 | struct netdev_queue { | 538 | struct netdev_queue { |
| 526 | /* | 539 | /* |
| @@ -528,9 +541,8 @@ struct netdev_queue { | |||
| 528 | */ | 541 | */ |
| 529 | struct net_device *dev; | 542 | struct net_device *dev; |
| 530 | struct Qdisc *qdisc; | 543 | struct Qdisc *qdisc; |
| 531 | unsigned long state; | ||
| 532 | struct Qdisc *qdisc_sleeping; | 544 | struct Qdisc *qdisc_sleeping; |
| 533 | #if defined(CONFIG_RPS) || defined(CONFIG_XPS) | 545 | #ifdef CONFIG_SYSFS |
| 534 | struct kobject kobj; | 546 | struct kobject kobj; |
| 535 | #endif | 547 | #endif |
| 536 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 548 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
| @@ -545,6 +557,18 @@ struct netdev_queue { | |||
| 545 | * please use this field instead of dev->trans_start | 557 | * please use this field instead of dev->trans_start |
| 546 | */ | 558 | */ |
| 547 | unsigned long trans_start; | 559 | unsigned long trans_start; |
| 560 | |||
| 561 | /* | ||
| 562 | * Number of TX timeouts for this queue | ||
| 563 | * (/sys/class/net/DEV/Q/trans_timeout) | ||
| 564 | */ | ||
| 565 | unsigned long trans_timeout; | ||
| 566 | |||
| 567 | unsigned long state; | ||
| 568 | |||
| 569 | #ifdef CONFIG_BQL | ||
| 570 | struct dql dql; | ||
| 571 | #endif | ||
| 548 | } ____cacheline_aligned_in_smp; | 572 | } ____cacheline_aligned_in_smp; |
| 549 | 573 | ||
| 550 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) | 574 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
| @@ -573,7 +597,7 @@ struct rps_map { | |||
| 573 | struct rcu_head rcu; | 597 | struct rcu_head rcu; |
| 574 | u16 cpus[0]; | 598 | u16 cpus[0]; |
| 575 | }; | 599 | }; |
| 576 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | 600 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
| 577 | 601 | ||
| 578 | /* | 602 | /* |
| 579 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the | 603 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
| @@ -597,7 +621,7 @@ struct rps_dev_flow_table { | |||
| 597 | struct rps_dev_flow flows[0]; | 621 | struct rps_dev_flow flows[0]; |
| 598 | }; | 622 | }; |
| 599 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | 623 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ |
| 600 | (_num * sizeof(struct rps_dev_flow))) | 624 | ((_num) * sizeof(struct rps_dev_flow))) |
| 601 | 625 | ||
| 602 | /* | 626 | /* |
| 603 | * The rps_sock_flow_table contains mappings of flows to the last CPU | 627 | * The rps_sock_flow_table contains mappings of flows to the last CPU |
| @@ -608,7 +632,7 @@ struct rps_sock_flow_table { | |||
| 608 | u16 ents[0]; | 632 | u16 ents[0]; |
| 609 | }; | 633 | }; |
| 610 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | 634 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ |
| 611 | (_num * sizeof(u16))) | 635 | ((_num) * sizeof(u16))) |
| 612 | 636 | ||
| 613 | #define RPS_NO_CPU 0xffff | 637 | #define RPS_NO_CPU 0xffff |
| 614 | 638 | ||
| @@ -660,7 +684,7 @@ struct xps_map { | |||
| 660 | struct rcu_head rcu; | 684 | struct rcu_head rcu; |
| 661 | u16 queues[0]; | 685 | u16 queues[0]; |
| 662 | }; | 686 | }; |
| 663 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) | 687 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
| 664 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ | 688 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ |
| 665 | / sizeof(u16)) | 689 | / sizeof(u16)) |
| 666 | 690 | ||
| @@ -683,6 +707,23 @@ struct netdev_tc_txq { | |||
| 683 | u16 offset; | 707 | u16 offset; |
| 684 | }; | 708 | }; |
| 685 | 709 | ||
| 710 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
| 711 | /* | ||
| 712 | * This structure is to hold information about the device | ||
| 713 | * configured to run FCoE protocol stack. | ||
| 714 | */ | ||
| 715 | struct netdev_fcoe_hbainfo { | ||
| 716 | char manufacturer[64]; | ||
| 717 | char serial_number[64]; | ||
| 718 | char hardware_version[64]; | ||
| 719 | char driver_version[64]; | ||
| 720 | char optionrom_version[64]; | ||
| 721 | char firmware_version[64]; | ||
| 722 | char model[256]; | ||
| 723 | char model_description[256]; | ||
| 724 | }; | ||
| 725 | #endif | ||
| 726 | |||
| 686 | /* | 727 | /* |
| 687 | * This structure defines the management hooks for network devices. | 728 | * This structure defines the management hooks for network devices. |
| 688 | * The following hooks can be defined; unless noted otherwise, they are | 729 | * The following hooks can be defined; unless noted otherwise, they are |
| @@ -767,11 +808,11 @@ struct netdev_tc_txq { | |||
| 767 | * 3. Update dev->stats asynchronously and atomically, and define | 808 | * 3. Update dev->stats asynchronously and atomically, and define |
| 768 | * neither operation. | 809 | * neither operation. |
| 769 | * | 810 | * |
| 770 | * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); | 811 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); |
| 771 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | 812 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) |
| 772 | * this function is called when a VLAN id is registered. | 813 | * this function is called when a VLAN id is registered. |
| 773 | * | 814 | * |
| 774 | * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | 815 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); |
| 775 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | 816 | * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) |
| 776 | * this function is called when a VLAN id is unregistered. | 817 | * this function is called when a VLAN id is unregistered. |
| 777 | * | 818 | * |
| @@ -823,6 +864,13 @@ struct netdev_tc_txq { | |||
| 823 | * perform necessary setup and returns 1 to indicate the device is set up | 864 | * perform necessary setup and returns 1 to indicate the device is set up |
| 824 | * successfully to perform DDP on this I/O, otherwise this returns 0. | 865 | * successfully to perform DDP on this I/O, otherwise this returns 0. |
| 825 | * | 866 | * |
| 867 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, | ||
| 868 | * struct netdev_fcoe_hbainfo *hbainfo); | ||
| 869 | * Called when the FCoE Protocol stack wants information on the underlying | ||
| 870 | * device. This information is utilized by the FCoE protocol stack to | ||
| 871 | * register attributes with Fiber Channel management service as per the | ||
| 872 | * FC-GS Fabric Device Management Information(FDMI) specification. | ||
| 873 | * | ||
| 826 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); | 874 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
| 827 | * Called when the underlying device wants to override default World Wide | 875 | * Called when the underlying device wants to override default World Wide |
| 828 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | 876 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own |
| @@ -845,12 +893,13 @@ struct netdev_tc_txq { | |||
| 845 | * Called to release previously enslaved netdev. | 893 | * Called to release previously enslaved netdev. |
| 846 | * | 894 | * |
| 847 | * Feature/offload setting functions. | 895 | * Feature/offload setting functions. |
| 848 | * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); | 896 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
| 897 | * netdev_features_t features); | ||
| 849 | * Adjusts the requested feature flags according to device-specific | 898 | * Adjusts the requested feature flags according to device-specific |
| 850 | * constraints, and returns the resulting flags. Must not modify | 899 | * constraints, and returns the resulting flags. Must not modify |
| 851 | * the device state. | 900 | * the device state. |
| 852 | * | 901 | * |
| 853 | * int (*ndo_set_features)(struct net_device *dev, u32 features); | 902 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
| 854 | * Called to update device configuration to new features. Passed | 903 | * Called to update device configuration to new features. Passed |
| 855 | * feature set might be less than what was returned by ndo_fix_features()). | 904 | * feature set might be less than what was returned by ndo_fix_features()). |
| 856 | * Must return >0 or -errno if it changed dev->features itself. | 905 | * Must return >0 or -errno if it changed dev->features itself. |
| @@ -885,9 +934,9 @@ struct net_device_ops { | |||
| 885 | struct rtnl_link_stats64 *storage); | 934 | struct rtnl_link_stats64 *storage); |
| 886 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | 935 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
| 887 | 936 | ||
| 888 | void (*ndo_vlan_rx_add_vid)(struct net_device *dev, | 937 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
| 889 | unsigned short vid); | 938 | unsigned short vid); |
| 890 | void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, | 939 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
| 891 | unsigned short vid); | 940 | unsigned short vid); |
| 892 | #ifdef CONFIG_NET_POLL_CONTROLLER | 941 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 893 | void (*ndo_poll_controller)(struct net_device *dev); | 942 | void (*ndo_poll_controller)(struct net_device *dev); |
| @@ -912,7 +961,7 @@ struct net_device_ops { | |||
| 912 | int (*ndo_get_vf_port)(struct net_device *dev, | 961 | int (*ndo_get_vf_port)(struct net_device *dev, |
| 913 | int vf, struct sk_buff *skb); | 962 | int vf, struct sk_buff *skb); |
| 914 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); | 963 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); |
| 915 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 964 | #if IS_ENABLED(CONFIG_FCOE) |
| 916 | int (*ndo_fcoe_enable)(struct net_device *dev); | 965 | int (*ndo_fcoe_enable)(struct net_device *dev); |
| 917 | int (*ndo_fcoe_disable)(struct net_device *dev); | 966 | int (*ndo_fcoe_disable)(struct net_device *dev); |
| 918 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, | 967 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
| @@ -925,9 +974,11 @@ struct net_device_ops { | |||
| 925 | u16 xid, | 974 | u16 xid, |
| 926 | struct scatterlist *sgl, | 975 | struct scatterlist *sgl, |
| 927 | unsigned int sgc); | 976 | unsigned int sgc); |
| 977 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, | ||
| 978 | struct netdev_fcoe_hbainfo *hbainfo); | ||
| 928 | #endif | 979 | #endif |
| 929 | 980 | ||
| 930 | #if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE) | 981 | #if IS_ENABLED(CONFIG_LIBFCOE) |
| 931 | #define NETDEV_FCOE_WWNN 0 | 982 | #define NETDEV_FCOE_WWNN 0 |
| 932 | #define NETDEV_FCOE_WWPN 1 | 983 | #define NETDEV_FCOE_WWPN 1 |
| 933 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | 984 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, |
| @@ -944,10 +995,12 @@ struct net_device_ops { | |||
| 944 | struct net_device *slave_dev); | 995 | struct net_device *slave_dev); |
| 945 | int (*ndo_del_slave)(struct net_device *dev, | 996 | int (*ndo_del_slave)(struct net_device *dev, |
| 946 | struct net_device *slave_dev); | 997 | struct net_device *slave_dev); |
| 947 | u32 (*ndo_fix_features)(struct net_device *dev, | 998 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
| 948 | u32 features); | 999 | netdev_features_t features); |
| 949 | int (*ndo_set_features)(struct net_device *dev, | 1000 | int (*ndo_set_features)(struct net_device *dev, |
| 950 | u32 features); | 1001 | netdev_features_t features); |
| 1002 | int (*ndo_neigh_construct)(struct neighbour *n); | ||
| 1003 | void (*ndo_neigh_destroy)(struct neighbour *n); | ||
| 951 | }; | 1004 | }; |
| 952 | 1005 | ||
| 953 | /* | 1006 | /* |
| @@ -997,91 +1050,13 @@ struct net_device { | |||
| 997 | struct list_head unreg_list; | 1050 | struct list_head unreg_list; |
| 998 | 1051 | ||
| 999 | /* currently active device features */ | 1052 | /* currently active device features */ |
| 1000 | u32 features; | 1053 | netdev_features_t features; |
| 1001 | /* user-changeable features */ | 1054 | /* user-changeable features */ |
| 1002 | u32 hw_features; | 1055 | netdev_features_t hw_features; |
| 1003 | /* user-requested features */ | 1056 | /* user-requested features */ |
| 1004 | u32 wanted_features; | 1057 | netdev_features_t wanted_features; |
| 1005 | /* mask of features inheritable by VLAN devices */ | 1058 | /* mask of features inheritable by VLAN devices */ |
| 1006 | u32 vlan_features; | 1059 | netdev_features_t vlan_features; |
| 1007 | |||
| 1008 | /* Net device feature bits; if you change something, | ||
| 1009 | * also update netdev_features_strings[] in ethtool.c */ | ||
| 1010 | |||
| 1011 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | ||
| 1012 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ | ||
| 1013 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ | ||
| 1014 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | ||
| 1015 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ | ||
| 1016 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ | ||
| 1017 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | ||
| 1018 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | ||
| 1019 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | ||
| 1020 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | ||
| 1021 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | ||
| 1022 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ | ||
| 1023 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ | ||
| 1024 | /* do not use LLTX in new drivers */ | ||
| 1025 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ | ||
| 1026 | #define NETIF_F_GRO 16384 /* Generic receive offload */ | ||
| 1027 | #define NETIF_F_LRO 32768 /* large receive offload */ | ||
| 1028 | |||
| 1029 | /* the GSO_MASK reserves bits 16 through 23 */ | ||
| 1030 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ | ||
| 1031 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ | ||
| 1032 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ | ||
| 1033 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ | ||
| 1034 | #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ | ||
| 1035 | #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ | ||
| 1036 | #define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */ | ||
| 1037 | #define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */ | ||
| 1038 | |||
| 1039 | /* Segmentation offload features */ | ||
| 1040 | #define NETIF_F_GSO_SHIFT 16 | ||
| 1041 | #define NETIF_F_GSO_MASK 0x00ff0000 | ||
| 1042 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | ||
| 1043 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | ||
| 1044 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | ||
| 1045 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) | ||
| 1046 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | ||
| 1047 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) | ||
| 1048 | |||
| 1049 | /* Features valid for ethtool to change */ | ||
| 1050 | /* = all defined minus driver/device-class-related */ | ||
| 1051 | #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ | ||
| 1052 | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) | ||
| 1053 | #define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE) | ||
| 1054 | |||
| 1055 | /* List of features with software fallbacks. */ | ||
| 1056 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ | ||
| 1057 | NETIF_F_TSO6 | NETIF_F_UFO) | ||
| 1058 | |||
| 1059 | |||
| 1060 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | ||
| 1061 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) | ||
| 1062 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | ||
| 1063 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | ||
| 1064 | |||
| 1065 | #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) | ||
| 1066 | |||
| 1067 | #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ | ||
| 1068 | NETIF_F_FSO) | ||
| 1069 | |||
| 1070 | /* | ||
| 1071 | * If one device supports one of these features, then enable them | ||
| 1072 | * for all in netdev_increment_features. | ||
| 1073 | */ | ||
| 1074 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | ||
| 1075 | NETIF_F_SG | NETIF_F_HIGHDMA | \ | ||
| 1076 | NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) | ||
| 1077 | /* | ||
| 1078 | * If one device doesn't support one of these features, then disable it | ||
| 1079 | * for all in netdev_increment_features. | ||
| 1080 | */ | ||
| 1081 | #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) | ||
| 1082 | |||
| 1083 | /* changeable features with no special hardware requirements */ | ||
| 1084 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | ||
| 1085 | 1060 | ||
| 1086 | /* Interface index. Unique device identifier */ | 1061 | /* Interface index. Unique device identifier */ |
| 1087 | int ifindex; | 1062 | int ifindex; |
| @@ -1132,6 +1107,7 @@ struct net_device { | |||
| 1132 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 1107 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
| 1133 | unsigned char addr_assign_type; /* hw address assignment type */ | 1108 | unsigned char addr_assign_type; /* hw address assignment type */ |
| 1134 | unsigned char addr_len; /* hardware address length */ | 1109 | unsigned char addr_len; /* hardware address length */ |
| 1110 | unsigned char neigh_priv_len; | ||
| 1135 | unsigned short dev_id; /* for shared network cards */ | 1111 | unsigned short dev_id; /* for shared network cards */ |
| 1136 | 1112 | ||
| 1137 | spinlock_t addr_list_lock; | 1113 | spinlock_t addr_list_lock; |
| @@ -1144,11 +1120,11 @@ struct net_device { | |||
| 1144 | 1120 | ||
| 1145 | /* Protocol specific pointers */ | 1121 | /* Protocol specific pointers */ |
| 1146 | 1122 | ||
| 1147 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 1123 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
| 1148 | struct vlan_group __rcu *vlgrp; /* VLAN group */ | 1124 | struct vlan_info __rcu *vlan_info; /* VLAN info */ |
| 1149 | #endif | 1125 | #endif |
| 1150 | #ifdef CONFIG_NET_DSA | 1126 | #if IS_ENABLED(CONFIG_NET_DSA) |
| 1151 | void *dsa_ptr; /* dsa specific data */ | 1127 | struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ |
| 1152 | #endif | 1128 | #endif |
| 1153 | void *atalk_ptr; /* AppleTalk link */ | 1129 | void *atalk_ptr; /* AppleTalk link */ |
| 1154 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ | 1130 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ |
| @@ -1184,9 +1160,11 @@ struct net_device { | |||
| 1184 | 1160 | ||
| 1185 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 1161 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
| 1186 | 1162 | ||
| 1187 | #if defined(CONFIG_RPS) || defined(CONFIG_XPS) | 1163 | #ifdef CONFIG_SYSFS |
| 1188 | struct kset *queues_kset; | 1164 | struct kset *queues_kset; |
| 1165 | #endif | ||
| 1189 | 1166 | ||
| 1167 | #ifdef CONFIG_RPS | ||
| 1190 | struct netdev_rx_queue *_rx; | 1168 | struct netdev_rx_queue *_rx; |
| 1191 | 1169 | ||
| 1192 | /* Number of RX queues allocated at register_netdev() time */ | 1170 | /* Number of RX queues allocated at register_netdev() time */ |
| @@ -1308,10 +1286,13 @@ struct net_device { | |||
| 1308 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | 1286 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
| 1309 | u8 prio_tc_map[TC_BITMASK + 1]; | 1287 | u8 prio_tc_map[TC_BITMASK + 1]; |
| 1310 | 1288 | ||
| 1311 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 1289 | #if IS_ENABLED(CONFIG_FCOE) |
| 1312 | /* max exchange id for FCoE LRO by ddp */ | 1290 | /* max exchange id for FCoE LRO by ddp */ |
| 1313 | unsigned int fcoe_ddp_xid; | 1291 | unsigned int fcoe_ddp_xid; |
| 1314 | #endif | 1292 | #endif |
| 1293 | #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) | ||
| 1294 | struct netprio_map __rcu *priomap; | ||
| 1295 | #endif | ||
| 1315 | /* phy device may attach itself for hardware timestamping */ | 1296 | /* phy device may attach itself for hardware timestamping */ |
| 1316 | struct phy_device *phydev; | 1297 | struct phy_device *phydev; |
| 1317 | 1298 | ||
| @@ -1515,7 +1496,7 @@ struct packet_type { | |||
| 1515 | struct packet_type *, | 1496 | struct packet_type *, |
| 1516 | struct net_device *); | 1497 | struct net_device *); |
| 1517 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 1498 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
| 1518 | u32 features); | 1499 | netdev_features_t features); |
| 1519 | int (*gso_send_check)(struct sk_buff *skb); | 1500 | int (*gso_send_check)(struct sk_buff *skb); |
| 1520 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | 1501 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
| 1521 | struct sk_buff *skb); | 1502 | struct sk_buff *skb); |
| @@ -1783,7 +1764,7 @@ extern void __netif_schedule(struct Qdisc *q); | |||
| 1783 | 1764 | ||
| 1784 | static inline void netif_schedule_queue(struct netdev_queue *txq) | 1765 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
| 1785 | { | 1766 | { |
| 1786 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | 1767 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) |
| 1787 | __netif_schedule(txq->qdisc); | 1768 | __netif_schedule(txq->qdisc); |
| 1788 | } | 1769 | } |
| 1789 | 1770 | ||
| @@ -1797,7 +1778,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) | |||
| 1797 | 1778 | ||
| 1798 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | 1779 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
| 1799 | { | 1780 | { |
| 1800 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1781 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
| 1801 | } | 1782 | } |
| 1802 | 1783 | ||
| 1803 | /** | 1784 | /** |
| @@ -1829,7 +1810,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
| 1829 | return; | 1810 | return; |
| 1830 | } | 1811 | } |
| 1831 | #endif | 1812 | #endif |
| 1832 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) | 1813 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) |
| 1833 | __netif_schedule(dev_queue->qdisc); | 1814 | __netif_schedule(dev_queue->qdisc); |
| 1834 | } | 1815 | } |
| 1835 | 1816 | ||
| @@ -1861,7 +1842,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | |||
| 1861 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); | 1842 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); |
| 1862 | return; | 1843 | return; |
| 1863 | } | 1844 | } |
| 1864 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1845 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
| 1865 | } | 1846 | } |
| 1866 | 1847 | ||
| 1867 | /** | 1848 | /** |
| @@ -1888,7 +1869,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev) | |||
| 1888 | 1869 | ||
| 1889 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | 1870 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
| 1890 | { | 1871 | { |
| 1891 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1872 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
| 1892 | } | 1873 | } |
| 1893 | 1874 | ||
| 1894 | /** | 1875 | /** |
| @@ -1902,9 +1883,68 @@ static inline int netif_queue_stopped(const struct net_device *dev) | |||
| 1902 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1883 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
| 1903 | } | 1884 | } |
| 1904 | 1885 | ||
| 1905 | static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) | 1886 | static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue) |
| 1887 | { | ||
| 1888 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; | ||
| 1889 | } | ||
| 1890 | |||
| 1891 | static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) | ||
| 1892 | { | ||
| 1893 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, | ||
| 1897 | unsigned int bytes) | ||
| 1898 | { | ||
| 1899 | #ifdef CONFIG_BQL | ||
| 1900 | dql_queued(&dev_queue->dql, bytes); | ||
| 1901 | if (unlikely(dql_avail(&dev_queue->dql) < 0)) { | ||
| 1902 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | ||
| 1903 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | ||
| 1904 | clear_bit(__QUEUE_STATE_STACK_XOFF, | ||
| 1905 | &dev_queue->state); | ||
| 1906 | } | ||
| 1907 | #endif | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) | ||
| 1906 | { | 1911 | { |
| 1907 | return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; | 1912 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); |
| 1913 | } | ||
| 1914 | |||
| 1915 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | ||
| 1916 | unsigned pkts, unsigned bytes) | ||
| 1917 | { | ||
| 1918 | #ifdef CONFIG_BQL | ||
| 1919 | if (likely(bytes)) { | ||
| 1920 | dql_completed(&dev_queue->dql, bytes); | ||
| 1921 | if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF, | ||
| 1922 | &dev_queue->state) && | ||
| 1923 | dql_avail(&dev_queue->dql) >= 0)) { | ||
| 1924 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, | ||
| 1925 | &dev_queue->state)) | ||
| 1926 | netif_schedule_queue(dev_queue); | ||
| 1927 | } | ||
| 1928 | } | ||
| 1929 | #endif | ||
| 1930 | } | ||
| 1931 | |||
| 1932 | static inline void netdev_completed_queue(struct net_device *dev, | ||
| 1933 | unsigned pkts, unsigned bytes) | ||
| 1934 | { | ||
| 1935 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | ||
| 1939 | { | ||
| 1940 | #ifdef CONFIG_BQL | ||
| 1941 | dql_reset(&q->dql); | ||
| 1942 | #endif | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | static inline void netdev_reset_queue(struct net_device *dev_queue) | ||
| 1946 | { | ||
| 1947 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | ||
| 1908 | } | 1948 | } |
| 1909 | 1949 | ||
| 1910 | /** | 1950 | /** |
| @@ -1991,7 +2031,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
| 1991 | if (netpoll_trap()) | 2031 | if (netpoll_trap()) |
| 1992 | return; | 2032 | return; |
| 1993 | #endif | 2033 | #endif |
| 1994 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) | 2034 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) |
| 1995 | __netif_schedule(txq->qdisc); | 2035 | __netif_schedule(txq->qdisc); |
| 1996 | } | 2036 | } |
| 1997 | 2037 | ||
| @@ -2520,7 +2560,8 @@ extern int netdev_set_master(struct net_device *dev, struct net_device *master) | |||
| 2520 | extern int netdev_set_bond_master(struct net_device *dev, | 2560 | extern int netdev_set_bond_master(struct net_device *dev, |
| 2521 | struct net_device *master); | 2561 | struct net_device *master); |
| 2522 | extern int skb_checksum_help(struct sk_buff *skb); | 2562 | extern int skb_checksum_help(struct sk_buff *skb); |
| 2523 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); | 2563 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, |
| 2564 | netdev_features_t features); | ||
| 2524 | #ifdef CONFIG_BUG | 2565 | #ifdef CONFIG_BUG |
| 2525 | extern void netdev_rx_csum_fault(struct net_device *dev); | 2566 | extern void netdev_rx_csum_fault(struct net_device *dev); |
| 2526 | #else | 2567 | #else |
| @@ -2549,11 +2590,13 @@ extern const char *netdev_drivername(const struct net_device *dev); | |||
| 2549 | 2590 | ||
| 2550 | extern void linkwatch_run_queue(void); | 2591 | extern void linkwatch_run_queue(void); |
| 2551 | 2592 | ||
| 2552 | static inline u32 netdev_get_wanted_features(struct net_device *dev) | 2593 | static inline netdev_features_t netdev_get_wanted_features( |
| 2594 | struct net_device *dev) | ||
| 2553 | { | 2595 | { |
| 2554 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | 2596 | return (dev->features & ~dev->hw_features) | dev->wanted_features; |
| 2555 | } | 2597 | } |
| 2556 | u32 netdev_increment_features(u32 all, u32 one, u32 mask); | 2598 | netdev_features_t netdev_increment_features(netdev_features_t all, |
| 2599 | netdev_features_t one, netdev_features_t mask); | ||
| 2557 | int __netdev_update_features(struct net_device *dev); | 2600 | int __netdev_update_features(struct net_device *dev); |
| 2558 | void netdev_update_features(struct net_device *dev); | 2601 | void netdev_update_features(struct net_device *dev); |
| 2559 | void netdev_change_features(struct net_device *dev); | 2602 | void netdev_change_features(struct net_device *dev); |
| @@ -2561,21 +2604,31 @@ void netdev_change_features(struct net_device *dev); | |||
| 2561 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 2604 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
| 2562 | struct net_device *dev); | 2605 | struct net_device *dev); |
| 2563 | 2606 | ||
| 2564 | u32 netif_skb_features(struct sk_buff *skb); | 2607 | netdev_features_t netif_skb_features(struct sk_buff *skb); |
| 2565 | 2608 | ||
| 2566 | static inline int net_gso_ok(u32 features, int gso_type) | 2609 | static inline int net_gso_ok(netdev_features_t features, int gso_type) |
| 2567 | { | 2610 | { |
| 2568 | int feature = gso_type << NETIF_F_GSO_SHIFT; | 2611 | netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; |
| 2612 | |||
| 2613 | /* check flags correspondence */ | ||
| 2614 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | ||
| 2615 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); | ||
| 2616 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); | ||
| 2617 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | ||
| 2618 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | ||
| 2619 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | ||
| 2620 | |||
| 2569 | return (features & feature) == feature; | 2621 | return (features & feature) == feature; |
| 2570 | } | 2622 | } |
| 2571 | 2623 | ||
| 2572 | static inline int skb_gso_ok(struct sk_buff *skb, u32 features) | 2624 | static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
| 2573 | { | 2625 | { |
| 2574 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 2626 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
| 2575 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); | 2627 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
| 2576 | } | 2628 | } |
| 2577 | 2629 | ||
| 2578 | static inline int netif_needs_gso(struct sk_buff *skb, int features) | 2630 | static inline int netif_needs_gso(struct sk_buff *skb, |
| 2631 | netdev_features_t features) | ||
| 2579 | { | 2632 | { |
| 2580 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || | 2633 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
| 2581 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 2634 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); |
| @@ -2594,22 +2647,6 @@ static inline int netif_is_bond_slave(struct net_device *dev) | |||
| 2594 | 2647 | ||
| 2595 | extern struct pernet_operations __net_initdata loopback_net_ops; | 2648 | extern struct pernet_operations __net_initdata loopback_net_ops; |
| 2596 | 2649 | ||
| 2597 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) | ||
| 2598 | { | ||
| 2599 | if (dev->features & NETIF_F_RXCSUM) | ||
| 2600 | return 1; | ||
| 2601 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) | ||
| 2602 | return 0; | ||
| 2603 | return dev->ethtool_ops->get_rx_csum(dev); | ||
| 2604 | } | ||
| 2605 | |||
| 2606 | static inline u32 dev_ethtool_get_flags(struct net_device *dev) | ||
| 2607 | { | ||
| 2608 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) | ||
| 2609 | return 0; | ||
| 2610 | return dev->ethtool_ops->get_flags(dev); | ||
| 2611 | } | ||
| 2612 | |||
| 2613 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ | 2650 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
| 2614 | 2651 | ||
| 2615 | /* netdev_printk helpers, similar to dev_printk */ | 2652 | /* netdev_printk helpers, similar to dev_printk */ |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 857f5026ced6..b809265607d0 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
| @@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; | |||
| 162 | 162 | ||
| 163 | extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 163 | extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
| 164 | 164 | ||
| 165 | #if defined(CONFIG_JUMP_LABEL) | ||
| 166 | #include <linux/jump_label.h> | ||
| 167 | extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | ||
| 168 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | ||
| 169 | { | ||
| 170 | if (__builtin_constant_p(pf) && | ||
| 171 | __builtin_constant_p(hook)) | ||
| 172 | return static_branch(&nf_hooks_needed[pf][hook]); | ||
| 173 | |||
| 174 | return !list_empty(&nf_hooks[pf][hook]); | ||
| 175 | } | ||
| 176 | #else | ||
| 177 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | ||
| 178 | { | ||
| 179 | return !list_empty(&nf_hooks[pf][hook]); | ||
| 180 | } | ||
| 181 | #endif | ||
| 182 | |||
| 165 | int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, | 183 | int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, |
| 166 | struct net_device *indev, struct net_device *outdev, | 184 | struct net_device *indev, struct net_device *outdev, |
| 167 | int (*okfn)(struct sk_buff *), int thresh); | 185 | int (*okfn)(struct sk_buff *), int thresh); |
| @@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | |||
| 179 | struct net_device *outdev, | 197 | struct net_device *outdev, |
| 180 | int (*okfn)(struct sk_buff *), int thresh) | 198 | int (*okfn)(struct sk_buff *), int thresh) |
| 181 | { | 199 | { |
| 182 | #ifndef CONFIG_NETFILTER_DEBUG | 200 | if (nf_hooks_active(pf, hook)) |
| 183 | if (list_empty(&nf_hooks[pf][hook])) | 201 | return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); |
| 184 | return 1; | 202 | return 1; |
| 185 | #endif | ||
| 186 | return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); | ||
| 187 | } | 203 | } |
| 188 | 204 | ||
| 189 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, | 205 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, |
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index a1b410c76fc3..e144f54185c0 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild | |||
| @@ -5,7 +5,9 @@ header-y += nf_conntrack_ftp.h | |||
| 5 | header-y += nf_conntrack_sctp.h | 5 | header-y += nf_conntrack_sctp.h |
| 6 | header-y += nf_conntrack_tcp.h | 6 | header-y += nf_conntrack_tcp.h |
| 7 | header-y += nf_conntrack_tuple_common.h | 7 | header-y += nf_conntrack_tuple_common.h |
| 8 | header-y += nf_nat.h | ||
| 8 | header-y += nfnetlink.h | 9 | header-y += nfnetlink.h |
| 10 | header-y += nfnetlink_acct.h | ||
| 9 | header-y += nfnetlink_compat.h | 11 | header-y += nfnetlink_compat.h |
| 10 | header-y += nfnetlink_conntrack.h | 12 | header-y += nfnetlink_conntrack.h |
| 11 | header-y += nfnetlink_log.h | 13 | header-y += nfnetlink_log.h |
| @@ -21,6 +23,7 @@ header-y += xt_DSCP.h | |||
| 21 | header-y += xt_IDLETIMER.h | 23 | header-y += xt_IDLETIMER.h |
| 22 | header-y += xt_LED.h | 24 | header-y += xt_LED.h |
| 23 | header-y += xt_MARK.h | 25 | header-y += xt_MARK.h |
| 26 | header-y += xt_nfacct.h | ||
| 24 | header-y += xt_NFLOG.h | 27 | header-y += xt_NFLOG.h |
| 25 | header-y += xt_NFQUEUE.h | 28 | header-y += xt_NFQUEUE.h |
| 26 | header-y += xt_RATEEST.h | 29 | header-y += xt_RATEEST.h |
| @@ -40,6 +43,7 @@ header-y += xt_cpu.h | |||
| 40 | header-y += xt_dccp.h | 43 | header-y += xt_dccp.h |
| 41 | header-y += xt_devgroup.h | 44 | header-y += xt_devgroup.h |
| 42 | header-y += xt_dscp.h | 45 | header-y += xt_dscp.h |
| 46 | header-y += xt_ecn.h | ||
| 43 | header-y += xt_esp.h | 47 | header-y += xt_esp.h |
| 44 | header-y += xt_hashlimit.h | 48 | header-y += xt_hashlimit.h |
| 45 | header-y += xt_helper.h | 49 | header-y += xt_helper.h |
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 0d3dd66322ec..9e3a2838291b 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h | |||
| @@ -83,6 +83,10 @@ enum ip_conntrack_status { | |||
| 83 | /* Conntrack is a fake untracked entry */ | 83 | /* Conntrack is a fake untracked entry */ |
| 84 | IPS_UNTRACKED_BIT = 12, | 84 | IPS_UNTRACKED_BIT = 12, |
| 85 | IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), | 85 | IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), |
| 86 | |||
| 87 | /* Conntrack has a userspace helper. */ | ||
| 88 | IPS_USERSPACE_HELPER_BIT = 13, | ||
| 89 | IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT), | ||
| 86 | }; | 90 | }; |
| 87 | 91 | ||
| 88 | /* Connection tracking event types */ | 92 | /* Connection tracking event types */ |
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h index 2ea22b018a87..2f6bbc5b8125 100644 --- a/include/linux/netfilter/nf_conntrack_tuple_common.h +++ b/include/linux/netfilter/nf_conntrack_tuple_common.h | |||
| @@ -7,6 +7,33 @@ enum ip_conntrack_dir { | |||
| 7 | IP_CT_DIR_MAX | 7 | IP_CT_DIR_MAX |
| 8 | }; | 8 | }; |
| 9 | 9 | ||
| 10 | /* The protocol-specific manipulable parts of the tuple: always in | ||
| 11 | * network order | ||
| 12 | */ | ||
| 13 | union nf_conntrack_man_proto { | ||
| 14 | /* Add other protocols here. */ | ||
| 15 | __be16 all; | ||
| 16 | |||
| 17 | struct { | ||
| 18 | __be16 port; | ||
| 19 | } tcp; | ||
| 20 | struct { | ||
| 21 | __be16 port; | ||
| 22 | } udp; | ||
| 23 | struct { | ||
| 24 | __be16 id; | ||
| 25 | } icmp; | ||
| 26 | struct { | ||
| 27 | __be16 port; | ||
| 28 | } dccp; | ||
| 29 | struct { | ||
| 30 | __be16 port; | ||
| 31 | } sctp; | ||
| 32 | struct { | ||
| 33 | __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ | ||
| 34 | } gre; | ||
| 35 | }; | ||
| 36 | |||
| 10 | #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) | 37 | #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) |
| 11 | 38 | ||
| 12 | #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ | 39 | #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ |
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h new file mode 100644 index 000000000000..8df2d13730b2 --- /dev/null +++ b/include/linux/netfilter/nf_nat.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #ifndef _NETFILTER_NF_NAT_H | ||
| 2 | #define _NETFILTER_NF_NAT_H | ||
| 3 | |||
| 4 | #include <linux/netfilter.h> | ||
| 5 | #include <linux/netfilter/nf_conntrack_tuple_common.h> | ||
| 6 | |||
| 7 | #define NF_NAT_RANGE_MAP_IPS 1 | ||
| 8 | #define NF_NAT_RANGE_PROTO_SPECIFIED 2 | ||
| 9 | #define NF_NAT_RANGE_PROTO_RANDOM 4 | ||
| 10 | #define NF_NAT_RANGE_PERSISTENT 8 | ||
| 11 | |||
| 12 | struct nf_nat_ipv4_range { | ||
| 13 | unsigned int flags; | ||
| 14 | __be32 min_ip; | ||
| 15 | __be32 max_ip; | ||
| 16 | union nf_conntrack_man_proto min; | ||
| 17 | union nf_conntrack_man_proto max; | ||
| 18 | }; | ||
| 19 | |||
| 20 | struct nf_nat_ipv4_multi_range_compat { | ||
| 21 | unsigned int rangesize; | ||
| 22 | struct nf_nat_ipv4_range range[1]; | ||
| 23 | }; | ||
| 24 | |||
| 25 | #endif /* _NETFILTER_NF_NAT_H */ | ||
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 74d33861473c..b64454c2f79f 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
| @@ -48,7 +48,8 @@ struct nfgenmsg { | |||
| 48 | #define NFNL_SUBSYS_ULOG 4 | 48 | #define NFNL_SUBSYS_ULOG 4 |
| 49 | #define NFNL_SUBSYS_OSF 5 | 49 | #define NFNL_SUBSYS_OSF 5 |
| 50 | #define NFNL_SUBSYS_IPSET 6 | 50 | #define NFNL_SUBSYS_IPSET 6 |
| 51 | #define NFNL_SUBSYS_COUNT 7 | 51 | #define NFNL_SUBSYS_ACCT 7 |
| 52 | #define NFNL_SUBSYS_COUNT 8 | ||
| 52 | 53 | ||
| 53 | #ifdef __KERNEL__ | 54 | #ifdef __KERNEL__ |
| 54 | 55 | ||
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h new file mode 100644 index 000000000000..7c4279b4ae7a --- /dev/null +++ b/include/linux/netfilter/nfnetlink_acct.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | #ifndef _NFNL_ACCT_H_ | ||
| 2 | #define _NFNL_ACCT_H_ | ||
| 3 | |||
| 4 | #ifndef NFACCT_NAME_MAX | ||
| 5 | #define NFACCT_NAME_MAX 32 | ||
| 6 | #endif | ||
| 7 | |||
| 8 | enum nfnl_acct_msg_types { | ||
| 9 | NFNL_MSG_ACCT_NEW, | ||
| 10 | NFNL_MSG_ACCT_GET, | ||
| 11 | NFNL_MSG_ACCT_GET_CTRZERO, | ||
| 12 | NFNL_MSG_ACCT_DEL, | ||
| 13 | NFNL_MSG_ACCT_MAX | ||
| 14 | }; | ||
| 15 | |||
| 16 | enum nfnl_acct_type { | ||
| 17 | NFACCT_UNSPEC, | ||
| 18 | NFACCT_NAME, | ||
| 19 | NFACCT_PKTS, | ||
| 20 | NFACCT_BYTES, | ||
| 21 | NFACCT_USE, | ||
| 22 | __NFACCT_MAX | ||
| 23 | }; | ||
| 24 | #define NFACCT_MAX (__NFACCT_MAX - 1) | ||
| 25 | |||
| 26 | #ifdef __KERNEL__ | ||
| 27 | |||
| 28 | struct nf_acct; | ||
| 29 | |||
| 30 | extern struct nf_acct *nfnl_acct_find_get(const char *filter_name); | ||
| 31 | extern void nfnl_acct_put(struct nf_acct *acct); | ||
| 32 | extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); | ||
| 33 | |||
| 34 | #endif /* __KERNEL__ */ | ||
| 35 | |||
| 36 | #endif /* _NFNL_ACCT_H */ | ||
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h index b56e76811c04..6390f0992f36 100644 --- a/include/linux/netfilter/xt_CT.h +++ b/include/linux/netfilter/xt_CT.h | |||
| @@ -3,7 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | 5 | ||
| 6 | #define XT_CT_NOTRACK 0x1 | 6 | #define XT_CT_NOTRACK 0x1 |
| 7 | #define XT_CT_USERSPACE_HELPER 0x2 | ||
| 7 | 8 | ||
| 8 | struct xt_ct_target_info { | 9 | struct xt_ct_target_info { |
| 9 | __u16 flags; | 10 | __u16 flags; |
diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h new file mode 100644 index 000000000000..7158fca364f2 --- /dev/null +++ b/include/linux/netfilter/xt_ecn.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* iptables module for matching the ECN header in IPv4 and TCP header | ||
| 2 | * | ||
| 3 | * (C) 2002 Harald Welte <laforge@gnumonks.org> | ||
| 4 | * | ||
| 5 | * This software is distributed under GNU GPL v2, 1991 | ||
| 6 | * | ||
| 7 | * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp | ||
| 8 | */ | ||
| 9 | #ifndef _XT_ECN_H | ||
| 10 | #define _XT_ECN_H | ||
| 11 | |||
| 12 | #include <linux/types.h> | ||
| 13 | #include <linux/netfilter/xt_dscp.h> | ||
| 14 | |||
| 15 | #define XT_ECN_IP_MASK (~XT_DSCP_MASK) | ||
| 16 | |||
| 17 | #define XT_ECN_OP_MATCH_IP 0x01 | ||
| 18 | #define XT_ECN_OP_MATCH_ECE 0x10 | ||
| 19 | #define XT_ECN_OP_MATCH_CWR 0x20 | ||
| 20 | |||
| 21 | #define XT_ECN_OP_MATCH_MASK 0xce | ||
| 22 | |||
| 23 | /* match info */ | ||
| 24 | struct xt_ecn_info { | ||
| 25 | __u8 operation; | ||
| 26 | __u8 invert; | ||
| 27 | __u8 ip_ect; | ||
| 28 | union { | ||
| 29 | struct { | ||
| 30 | __u8 ect; | ||
| 31 | } tcp; | ||
| 32 | } proto; | ||
| 33 | }; | ||
| 34 | |||
| 35 | #endif /* _XT_ECN_H */ | ||
diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h new file mode 100644 index 000000000000..3e19c8a86576 --- /dev/null +++ b/include/linux/netfilter/xt_nfacct.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef _XT_NFACCT_MATCH_H | ||
| 2 | #define _XT_NFACCT_MATCH_H | ||
| 3 | |||
| 4 | #include <linux/netfilter/nfnetlink_acct.h> | ||
| 5 | |||
| 6 | struct nf_acct; | ||
| 7 | |||
| 8 | struct xt_nfacct_match_info { | ||
| 9 | char name[NFACCT_NAME_MAX]; | ||
| 10 | struct nf_acct *nfacct; | ||
| 11 | }; | ||
| 12 | |||
| 13 | #endif /* _XT_NFACCT_MATCH_H */ | ||
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h new file mode 100644 index 000000000000..8358d4f71952 --- /dev/null +++ b/include/linux/netfilter/xt_rpfilter.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef _XT_RPATH_H | ||
| 2 | #define _XT_RPATH_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | enum { | ||
| 7 | XT_RPFILTER_LOOSE = 1 << 0, | ||
| 8 | XT_RPFILTER_VALID_MARK = 1 << 1, | ||
| 9 | XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, | ||
| 10 | XT_RPFILTER_INVERT = 1 << 3, | ||
| 11 | #ifdef __KERNEL__ | ||
| 12 | XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | | ||
| 13 | XT_RPFILTER_VALID_MARK | | ||
| 14 | XT_RPFILTER_ACCEPT_LOCAL | | ||
| 15 | XT_RPFILTER_INVERT, | ||
| 16 | #endif | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct xt_rpfilter_info { | ||
| 20 | __u8 flags; | ||
| 21 | }; | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index c3b45480ecf7..f9930c87fff3 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild | |||
| @@ -12,4 +12,3 @@ header-y += ipt_ah.h | |||
| 12 | header-y += ipt_ecn.h | 12 | header-y += ipt_ecn.h |
| 13 | header-y += ipt_realm.h | 13 | header-y += ipt_realm.h |
| 14 | header-y += ipt_ttl.h | 14 | header-y += ipt_ttl.h |
| 15 | header-y += nf_nat.h | ||
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h index eabf95fb7d3e..0e0c063dbf60 100644 --- a/include/linux/netfilter_ipv4/ipt_ecn.h +++ b/include/linux/netfilter_ipv4/ipt_ecn.h | |||
| @@ -1,35 +1,15 @@ | |||
| 1 | /* iptables module for matching the ECN header in IPv4 and TCP header | ||
| 2 | * | ||
| 3 | * (C) 2002 Harald Welte <laforge@gnumonks.org> | ||
| 4 | * | ||
| 5 | * This software is distributed under GNU GPL v2, 1991 | ||
| 6 | * | ||
| 7 | * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp | ||
| 8 | */ | ||
| 9 | #ifndef _IPT_ECN_H | 1 | #ifndef _IPT_ECN_H |
| 10 | #define _IPT_ECN_H | 2 | #define _IPT_ECN_H |
| 11 | 3 | ||
| 12 | #include <linux/types.h> | 4 | #include <linux/netfilter/xt_ecn.h> |
| 13 | #include <linux/netfilter/xt_dscp.h> | 5 | #define ipt_ecn_info xt_ecn_info |
| 14 | 6 | ||
| 15 | #define IPT_ECN_IP_MASK (~XT_DSCP_MASK) | 7 | enum { |
| 16 | 8 | IPT_ECN_IP_MASK = XT_ECN_IP_MASK, | |
| 17 | #define IPT_ECN_OP_MATCH_IP 0x01 | 9 | IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP, |
| 18 | #define IPT_ECN_OP_MATCH_ECE 0x10 | 10 | IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE, |
| 19 | #define IPT_ECN_OP_MATCH_CWR 0x20 | 11 | IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR, |
| 20 | 12 | IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK, | |
| 21 | #define IPT_ECN_OP_MATCH_MASK 0xce | ||
| 22 | |||
| 23 | /* match info */ | ||
| 24 | struct ipt_ecn_info { | ||
| 25 | __u8 operation; | ||
| 26 | __u8 invert; | ||
| 27 | __u8 ip_ect; | ||
| 28 | union { | ||
| 29 | struct { | ||
| 30 | __u8 ect; | ||
| 31 | } tcp; | ||
| 32 | } proto; | ||
| 33 | }; | 13 | }; |
| 34 | 14 | ||
| 35 | #endif /* _IPT_ECN_H */ | 15 | #endif /* IPT_ECN_H */ |
diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h deleted file mode 100644 index 7a861d09fc86..000000000000 --- a/include/linux/netfilter_ipv4/nf_nat.h +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | #ifndef _LINUX_NF_NAT_H | ||
| 2 | #define _LINUX_NF_NAT_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | #define IP_NAT_RANGE_MAP_IPS 1 | ||
| 7 | #define IP_NAT_RANGE_PROTO_SPECIFIED 2 | ||
| 8 | #define IP_NAT_RANGE_PROTO_RANDOM 4 | ||
| 9 | #define IP_NAT_RANGE_PERSISTENT 8 | ||
| 10 | |||
| 11 | /* The protocol-specific manipulable parts of the tuple. */ | ||
| 12 | union nf_conntrack_man_proto { | ||
| 13 | /* Add other protocols here. */ | ||
| 14 | __be16 all; | ||
| 15 | |||
| 16 | struct { | ||
| 17 | __be16 port; | ||
| 18 | } tcp; | ||
| 19 | struct { | ||
| 20 | __be16 port; | ||
| 21 | } udp; | ||
| 22 | struct { | ||
| 23 | __be16 id; | ||
| 24 | } icmp; | ||
| 25 | struct { | ||
| 26 | __be16 port; | ||
| 27 | } dccp; | ||
| 28 | struct { | ||
| 29 | __be16 port; | ||
| 30 | } sctp; | ||
| 31 | struct { | ||
| 32 | __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ | ||
| 33 | } gre; | ||
| 34 | }; | ||
| 35 | |||
| 36 | /* Single range specification. */ | ||
| 37 | struct nf_nat_range { | ||
| 38 | /* Set to OR of flags above. */ | ||
| 39 | unsigned int flags; | ||
| 40 | |||
| 41 | /* Inclusive: network order. */ | ||
| 42 | __be32 min_ip, max_ip; | ||
| 43 | |||
| 44 | /* Inclusive: network order */ | ||
| 45 | union nf_conntrack_man_proto min, max; | ||
| 46 | }; | ||
| 47 | |||
| 48 | /* For backwards compat: don't use in modern code. */ | ||
| 49 | struct nf_nat_multi_range_compat { | ||
| 50 | unsigned int rangesize; /* Must be 1. */ | ||
| 51 | |||
| 52 | /* hangs off end. */ | ||
| 53 | struct nf_nat_range range[1]; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define nf_nat_multi_range nf_nat_multi_range_compat | ||
| 57 | |||
| 58 | #endif | ||
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 8374d2967362..52e48959cfa1 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #define NETLINK_UNUSED 1 /* Unused number */ | 8 | #define NETLINK_UNUSED 1 /* Unused number */ |
| 9 | #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ | 9 | #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ |
| 10 | #define NETLINK_FIREWALL 3 /* Firewalling hook */ | 10 | #define NETLINK_FIREWALL 3 /* Firewalling hook */ |
| 11 | #define NETLINK_INET_DIAG 4 /* INET socket monitoring */ | 11 | #define NETLINK_SOCK_DIAG 4 /* socket monitoring */ |
| 12 | #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ | 12 | #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ |
| 13 | #define NETLINK_XFRM 6 /* ipsec */ | 13 | #define NETLINK_XFRM 6 /* ipsec */ |
| 14 | #define NETLINK_SELINUX 7 /* SELinux event notifications */ | 14 | #define NETLINK_SELINUX 7 /* SELinux event notifications */ |
| @@ -27,6 +27,8 @@ | |||
| 27 | #define NETLINK_RDMA 20 | 27 | #define NETLINK_RDMA 20 |
| 28 | #define NETLINK_CRYPTO 21 /* Crypto layer */ | 28 | #define NETLINK_CRYPTO 21 /* Crypto layer */ |
| 29 | 29 | ||
| 30 | #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG | ||
| 31 | |||
| 30 | #define MAX_LINKS 32 | 32 | #define MAX_LINKS 32 |
| 31 | 33 | ||
| 32 | struct sockaddr_nl { | 34 | struct sockaddr_nl { |
diff --git a/include/linux/nfc.h b/include/linux/nfc.h index 36cb955b05cc..01d4e5d60325 100644 --- a/include/linux/nfc.h +++ b/include/linux/nfc.h | |||
| @@ -62,6 +62,8 @@ enum nfc_commands { | |||
| 62 | NFC_CMD_GET_DEVICE, | 62 | NFC_CMD_GET_DEVICE, |
| 63 | NFC_CMD_DEV_UP, | 63 | NFC_CMD_DEV_UP, |
| 64 | NFC_CMD_DEV_DOWN, | 64 | NFC_CMD_DEV_DOWN, |
| 65 | NFC_CMD_DEP_LINK_UP, | ||
| 66 | NFC_CMD_DEP_LINK_DOWN, | ||
| 65 | NFC_CMD_START_POLL, | 67 | NFC_CMD_START_POLL, |
| 66 | NFC_CMD_STOP_POLL, | 68 | NFC_CMD_STOP_POLL, |
| 67 | NFC_CMD_GET_TARGET, | 69 | NFC_CMD_GET_TARGET, |
| @@ -86,6 +88,9 @@ enum nfc_commands { | |||
| 86 | * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID | 88 | * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID |
| 87 | * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the | 89 | * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the |
| 88 | * target is not NFC-Forum compliant) | 90 | * target is not NFC-Forum compliant) |
| 91 | * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes | ||
| 92 | * @NFC_ATTR_COMM_MODE: Passive or active mode | ||
| 93 | * @NFC_ATTR_RF_MODE: Initiator or target | ||
| 89 | */ | 94 | */ |
| 90 | enum nfc_attrs { | 95 | enum nfc_attrs { |
| 91 | NFC_ATTR_UNSPEC, | 96 | NFC_ATTR_UNSPEC, |
| @@ -95,6 +100,9 @@ enum nfc_attrs { | |||
| 95 | NFC_ATTR_TARGET_INDEX, | 100 | NFC_ATTR_TARGET_INDEX, |
| 96 | NFC_ATTR_TARGET_SENS_RES, | 101 | NFC_ATTR_TARGET_SENS_RES, |
| 97 | NFC_ATTR_TARGET_SEL_RES, | 102 | NFC_ATTR_TARGET_SEL_RES, |
| 103 | NFC_ATTR_TARGET_NFCID1, | ||
| 104 | NFC_ATTR_COMM_MODE, | ||
| 105 | NFC_ATTR_RF_MODE, | ||
| 98 | /* private: internal use only */ | 106 | /* private: internal use only */ |
| 99 | __NFC_ATTR_AFTER_LAST | 107 | __NFC_ATTR_AFTER_LAST |
| 100 | }; | 108 | }; |
| @@ -111,6 +119,14 @@ enum nfc_attrs { | |||
| 111 | 119 | ||
| 112 | #define NFC_PROTO_MAX 6 | 120 | #define NFC_PROTO_MAX 6 |
| 113 | 121 | ||
| 122 | /* NFC communication modes */ | ||
| 123 | #define NFC_COMM_ACTIVE 0 | ||
| 124 | #define NFC_COMM_PASSIVE 1 | ||
| 125 | |||
| 126 | /* NFC RF modes */ | ||
| 127 | #define NFC_RF_INITIATOR 0 | ||
| 128 | #define NFC_RF_TARGET 1 | ||
| 129 | |||
| 114 | /* NFC protocols masks used in bitsets */ | 130 | /* NFC protocols masks used in bitsets */ |
| 115 | #define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) | 131 | #define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) |
| 116 | #define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) | 132 | #define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) |
| @@ -125,9 +141,22 @@ struct sockaddr_nfc { | |||
| 125 | __u32 nfc_protocol; | 141 | __u32 nfc_protocol; |
| 126 | }; | 142 | }; |
| 127 | 143 | ||
| 144 | #define NFC_LLCP_MAX_SERVICE_NAME 63 | ||
| 145 | struct sockaddr_nfc_llcp { | ||
| 146 | sa_family_t sa_family; | ||
| 147 | __u32 dev_idx; | ||
| 148 | __u32 target_idx; | ||
| 149 | __u32 nfc_protocol; | ||
| 150 | __u8 dsap; /* Destination SAP, if known */ | ||
| 151 | __u8 ssap; /* Source SAP to be bound to */ | ||
| 152 | char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; | ||
| 153 | size_t service_name_len; | ||
| 154 | }; | ||
| 155 | |||
| 128 | /* NFC socket protocols */ | 156 | /* NFC socket protocols */ |
| 129 | #define NFC_SOCKPROTO_RAW 0 | 157 | #define NFC_SOCKPROTO_RAW 0 |
| 130 | #define NFC_SOCKPROTO_MAX 1 | 158 | #define NFC_SOCKPROTO_LLCP 1 |
| 159 | #define NFC_SOCKPROTO_MAX 2 | ||
| 131 | 160 | ||
| 132 | #define NFC_HEADER_SIZE 1 | 161 | #define NFC_HEADER_SIZE 1 |
| 133 | 162 | ||
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 8049bf77d799..0f5ff3739820 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h | |||
| @@ -509,6 +509,38 @@ | |||
| 509 | * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). | 509 | * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). |
| 510 | * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. | 510 | * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. |
| 511 | * | 511 | * |
| 512 | * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP | ||
| 513 | * (or GO) interface (i.e. hostapd) to ask for unexpected frames to | ||
| 514 | * implement sending deauth to stations that send unexpected class 3 | ||
| 515 | * frames. Also used as the event sent by the kernel when such a frame | ||
| 516 | * is received. | ||
| 517 | * For the event, the %NL80211_ATTR_MAC attribute carries the TA and | ||
| 518 | * other attributes like the interface index are present. | ||
| 519 | * If used as the command it must have an interface index and you can | ||
| 520 | * only unsubscribe from the event by closing the socket. Subscription | ||
| 521 | * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events. | ||
| 522 | * | ||
| 523 | * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the | ||
| 524 | * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame | ||
| 525 | * and wasn't already in a 4-addr VLAN. The event will be sent similarly | ||
| 526 | * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener. | ||
| 527 | * | ||
| 528 | * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface | ||
| 529 | * by sending a null data frame to it and reporting when the frame is | ||
| 530 | * acknowleged. This is used to allow timing out inactive clients. Uses | ||
| 531 | * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a | ||
| 532 | * direct reply with an %NL80211_ATTR_COOKIE that is later used to match | ||
| 533 | * up the event with the request. The event includes the same data and | ||
| 534 | * has %NL80211_ATTR_ACK set if the frame was ACKed. | ||
| 535 | * | ||
| 536 | * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from | ||
| 537 | * other BSSes when any interfaces are in AP mode. This helps implement | ||
| 538 | * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME | ||
| 539 | * messages. Note that per PHY only one application may register. | ||
| 540 | * | ||
| 541 | * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether | ||
| 542 | * No Acknowledgement Policy should be applied. | ||
| 543 | * | ||
| 512 | * @NL80211_CMD_MAX: highest used command number | 544 | * @NL80211_CMD_MAX: highest used command number |
| 513 | * @__NL80211_CMD_AFTER_LAST: internal use | 545 | * @__NL80211_CMD_AFTER_LAST: internal use |
| 514 | */ | 546 | */ |
| @@ -638,6 +670,16 @@ enum nl80211_commands { | |||
| 638 | NL80211_CMD_TDLS_OPER, | 670 | NL80211_CMD_TDLS_OPER, |
| 639 | NL80211_CMD_TDLS_MGMT, | 671 | NL80211_CMD_TDLS_MGMT, |
| 640 | 672 | ||
| 673 | NL80211_CMD_UNEXPECTED_FRAME, | ||
| 674 | |||
| 675 | NL80211_CMD_PROBE_CLIENT, | ||
| 676 | |||
| 677 | NL80211_CMD_REGISTER_BEACONS, | ||
| 678 | |||
| 679 | NL80211_CMD_UNEXPECTED_4ADDR_FRAME, | ||
| 680 | |||
| 681 | NL80211_CMD_SET_NOACK_MAP, | ||
| 682 | |||
| 641 | /* add new commands above here */ | 683 | /* add new commands above here */ |
| 642 | 684 | ||
| 643 | /* used to define NL80211_CMD_MAX below */ | 685 | /* used to define NL80211_CMD_MAX below */ |
| @@ -658,6 +700,8 @@ enum nl80211_commands { | |||
| 658 | #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE | 700 | #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE |
| 659 | #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT | 701 | #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT |
| 660 | 702 | ||
| 703 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS | ||
| 704 | |||
| 661 | /* source-level API compatibility */ | 705 | /* source-level API compatibility */ |
| 662 | #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG | 706 | #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG |
| 663 | #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG | 707 | #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG |
| @@ -1109,6 +1153,46 @@ enum nl80211_commands { | |||
| 1109 | * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be | 1153 | * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be |
| 1110 | * used for asking the driver to perform a TDLS operation. | 1154 | * used for asking the driver to perform a TDLS operation. |
| 1111 | * | 1155 | * |
| 1156 | * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices | ||
| 1157 | * that have AP support to indicate that they have the AP SME integrated | ||
| 1158 | * with support for the features listed in this attribute, see | ||
| 1159 | * &enum nl80211_ap_sme_features. | ||
| 1160 | * | ||
| 1161 | * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells | ||
| 1162 | * the driver to not wait for an acknowledgement. Note that due to this, | ||
| 1163 | * it will also not give a status callback nor return a cookie. This is | ||
| 1164 | * mostly useful for probe responses to save airtime. | ||
| 1165 | * | ||
| 1166 | * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from | ||
| 1167 | * &enum nl80211_feature_flags and is advertised in wiphy information. | ||
| 1168 | * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe | ||
| 1169 | * | ||
| 1170 | * requests while operating in AP-mode. | ||
| 1171 | * This attribute holds a bitmap of the supported protocols for | ||
| 1172 | * offloading (see &enum nl80211_probe_resp_offload_support_attr). | ||
| 1173 | * | ||
| 1174 | * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire | ||
| 1175 | * probe-response frame. The DA field in the 802.11 header is zero-ed out, | ||
| 1176 | * to be filled by the FW. | ||
| 1177 | * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable | ||
| 1178 | * this feature. Currently, only supported in mac80211 drivers. | ||
| 1179 | * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the | ||
| 1180 | * ATTR_HT_CAPABILITY to which attention should be paid. | ||
| 1181 | * Currently, only mac80211 NICs support this feature. | ||
| 1182 | * The values that may be configured are: | ||
| 1183 | * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40 | ||
| 1184 | * AMPDU density and AMPDU factor. | ||
| 1185 | * All values are treated as suggestions and may be ignored | ||
| 1186 | * by the driver as required. The actual values may be seen in | ||
| 1187 | * the station debugfs ht_caps file. | ||
| 1188 | * | ||
| 1189 | * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country | ||
| 1190 | * abides to when initiating radiation on DFS channels. A country maps | ||
| 1191 | * to one DFS region. | ||
| 1192 | * | ||
| 1193 | * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of | ||
| 1194 | * up to 16 TIDs. | ||
| 1195 | * | ||
| 1112 | * @NL80211_ATTR_MAX: highest attribute number currently defined | 1196 | * @NL80211_ATTR_MAX: highest attribute number currently defined |
| 1113 | * @__NL80211_ATTR_AFTER_LAST: internal use | 1197 | * @__NL80211_ATTR_AFTER_LAST: internal use |
| 1114 | */ | 1198 | */ |
| @@ -1337,6 +1421,23 @@ enum nl80211_attrs { | |||
| 1337 | NL80211_ATTR_TDLS_SUPPORT, | 1421 | NL80211_ATTR_TDLS_SUPPORT, |
| 1338 | NL80211_ATTR_TDLS_EXTERNAL_SETUP, | 1422 | NL80211_ATTR_TDLS_EXTERNAL_SETUP, |
| 1339 | 1423 | ||
| 1424 | NL80211_ATTR_DEVICE_AP_SME, | ||
| 1425 | |||
| 1426 | NL80211_ATTR_DONT_WAIT_FOR_ACK, | ||
| 1427 | |||
| 1428 | NL80211_ATTR_FEATURE_FLAGS, | ||
| 1429 | |||
| 1430 | NL80211_ATTR_PROBE_RESP_OFFLOAD, | ||
| 1431 | |||
| 1432 | NL80211_ATTR_PROBE_RESP, | ||
| 1433 | |||
| 1434 | NL80211_ATTR_DFS_REGION, | ||
| 1435 | |||
| 1436 | NL80211_ATTR_DISABLE_HT, | ||
| 1437 | NL80211_ATTR_HT_CAPABILITY_MASK, | ||
| 1438 | |||
| 1439 | NL80211_ATTR_NOACK_MAP, | ||
| 1440 | |||
| 1340 | /* add attributes here, update the policy in nl80211.c */ | 1441 | /* add attributes here, update the policy in nl80211.c */ |
| 1341 | 1442 | ||
| 1342 | __NL80211_ATTR_AFTER_LAST, | 1443 | __NL80211_ATTR_AFTER_LAST, |
| @@ -1371,6 +1472,7 @@ enum nl80211_attrs { | |||
| 1371 | #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES | 1472 | #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES |
| 1372 | #define NL80211_ATTR_KEY NL80211_ATTR_KEY | 1473 | #define NL80211_ATTR_KEY NL80211_ATTR_KEY |
| 1373 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS | 1474 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS |
| 1475 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS | ||
| 1374 | 1476 | ||
| 1375 | #define NL80211_MAX_SUPP_RATES 32 | 1477 | #define NL80211_MAX_SUPP_RATES 32 |
| 1376 | #define NL80211_MAX_SUPP_REG_RULES 32 | 1478 | #define NL80211_MAX_SUPP_REG_RULES 32 |
| @@ -1434,7 +1536,11 @@ enum nl80211_iftype { | |||
| 1434 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable | 1536 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable |
| 1435 | * @NL80211_STA_FLAG_MFP: station uses management frame protection | 1537 | * @NL80211_STA_FLAG_MFP: station uses management frame protection |
| 1436 | * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated | 1538 | * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated |
| 1437 | * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer | 1539 | * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should |
| 1540 | * only be used in managed mode (even in the flags mask). Note that the | ||
| 1541 | * flag can't be changed, it is only valid while adding a station, and | ||
| 1542 | * attempts to change it will silently be ignored (rather than rejected | ||
| 1543 | * as errors.) | ||
| 1438 | * @NL80211_STA_FLAG_MAX: highest station flag number currently defined | 1544 | * @NL80211_STA_FLAG_MAX: highest station flag number currently defined |
| 1439 | * @__NL80211_STA_FLAG_AFTER_LAST: internal use | 1545 | * @__NL80211_STA_FLAG_AFTER_LAST: internal use |
| 1440 | */ | 1546 | */ |
| @@ -1549,6 +1655,7 @@ enum nl80211_sta_bss_param { | |||
| 1549 | * containing info as possible, see &enum nl80211_sta_bss_param | 1655 | * containing info as possible, see &enum nl80211_sta_bss_param |
| 1550 | * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected | 1656 | * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected |
| 1551 | * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. | 1657 | * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. |
| 1658 | * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) | ||
| 1552 | * @__NL80211_STA_INFO_AFTER_LAST: internal | 1659 | * @__NL80211_STA_INFO_AFTER_LAST: internal |
| 1553 | * @NL80211_STA_INFO_MAX: highest possible station info attribute | 1660 | * @NL80211_STA_INFO_MAX: highest possible station info attribute |
| 1554 | */ | 1661 | */ |
| @@ -1571,6 +1678,7 @@ enum nl80211_sta_info { | |||
| 1571 | NL80211_STA_INFO_BSS_PARAM, | 1678 | NL80211_STA_INFO_BSS_PARAM, |
| 1572 | NL80211_STA_INFO_CONNECTED_TIME, | 1679 | NL80211_STA_INFO_CONNECTED_TIME, |
| 1573 | NL80211_STA_INFO_STA_FLAGS, | 1680 | NL80211_STA_INFO_STA_FLAGS, |
| 1681 | NL80211_STA_INFO_BEACON_LOSS, | ||
| 1574 | 1682 | ||
| 1575 | /* keep last */ | 1683 | /* keep last */ |
| 1576 | __NL80211_STA_INFO_AFTER_LAST, | 1684 | __NL80211_STA_INFO_AFTER_LAST, |
| @@ -1845,6 +1953,21 @@ enum nl80211_reg_rule_flags { | |||
| 1845 | }; | 1953 | }; |
| 1846 | 1954 | ||
| 1847 | /** | 1955 | /** |
| 1956 | * enum nl80211_dfs_regions - regulatory DFS regions | ||
| 1957 | * | ||
| 1958 | * @NL80211_DFS_UNSET: Country has no DFS master region specified | ||
| 1959 | * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC | ||
| 1960 | * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI | ||
| 1961 | * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec | ||
| 1962 | */ | ||
| 1963 | enum nl80211_dfs_regions { | ||
| 1964 | NL80211_DFS_UNSET = 0, | ||
| 1965 | NL80211_DFS_FCC = 1, | ||
| 1966 | NL80211_DFS_ETSI = 2, | ||
| 1967 | NL80211_DFS_JP = 3, | ||
| 1968 | }; | ||
| 1969 | |||
| 1970 | /** | ||
| 1848 | * enum nl80211_survey_info - survey information | 1971 | * enum nl80211_survey_info - survey information |
| 1849 | * | 1972 | * |
| 1850 | * These attribute types are used with %NL80211_ATTR_SURVEY_INFO | 1973 | * These attribute types are used with %NL80211_ATTR_SURVEY_INFO |
| @@ -1977,6 +2100,10 @@ enum nl80211_mntr_flags { | |||
| 1977 | * access to a broader network beyond the MBSS. This is done via Root | 2100 | * access to a broader network beyond the MBSS. This is done via Root |
| 1978 | * Announcement frames. | 2101 | * Announcement frames. |
| 1979 | * | 2102 | * |
| 2103 | * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in | ||
| 2104 | * TUs) during which a mesh STA can send only one Action frame containing a | ||
| 2105 | * PERR element. | ||
| 2106 | * | ||
| 1980 | * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute | 2107 | * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute |
| 1981 | * | 2108 | * |
| 1982 | * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use | 2109 | * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use |
| @@ -2000,6 +2127,7 @@ enum nl80211_meshconf_params { | |||
| 2000 | NL80211_MESHCONF_ELEMENT_TTL, | 2127 | NL80211_MESHCONF_ELEMENT_TTL, |
| 2001 | NL80211_MESHCONF_HWMP_RANN_INTERVAL, | 2128 | NL80211_MESHCONF_HWMP_RANN_INTERVAL, |
| 2002 | NL80211_MESHCONF_GATE_ANNOUNCEMENTS, | 2129 | NL80211_MESHCONF_GATE_ANNOUNCEMENTS, |
| 2130 | NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, | ||
| 2003 | 2131 | ||
| 2004 | /* keep last */ | 2132 | /* keep last */ |
| 2005 | __NL80211_MESHCONF_ATTR_AFTER_LAST, | 2133 | __NL80211_MESHCONF_ATTR_AFTER_LAST, |
| @@ -2650,4 +2778,45 @@ enum nl80211_tdls_operation { | |||
| 2650 | NL80211_TDLS_DISABLE_LINK, | 2778 | NL80211_TDLS_DISABLE_LINK, |
| 2651 | }; | 2779 | }; |
| 2652 | 2780 | ||
| 2781 | /* | ||
| 2782 | * enum nl80211_ap_sme_features - device-integrated AP features | ||
| 2783 | * Reserved for future use, no bits are defined in | ||
| 2784 | * NL80211_ATTR_DEVICE_AP_SME yet. | ||
| 2785 | enum nl80211_ap_sme_features { | ||
| 2786 | }; | ||
| 2787 | */ | ||
| 2788 | |||
| 2789 | /** | ||
| 2790 | * enum nl80211_feature_flags - device/driver features | ||
| 2791 | * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back | ||
| 2792 | * TX status to the socket error queue when requested with the | ||
| 2793 | * socket option. | ||
| 2794 | * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates. | ||
| 2795 | */ | ||
| 2796 | enum nl80211_feature_flags { | ||
| 2797 | NL80211_FEATURE_SK_TX_STATUS = 1 << 0, | ||
| 2798 | NL80211_FEATURE_HT_IBSS = 1 << 1, | ||
| 2799 | }; | ||
| 2800 | |||
| 2801 | /** | ||
| 2802 | * enum nl80211_probe_resp_offload_support_attr - optional supported | ||
| 2803 | * protocols for probe-response offloading by the driver/FW. | ||
| 2804 | * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute. | ||
| 2805 | * Each enum value represents a bit in the bitmap of supported | ||
| 2806 | * protocols. Typically a subset of probe-requests belonging to a | ||
| 2807 | * supported protocol will be excluded from offload and uploaded | ||
| 2808 | * to the host. | ||
| 2809 | * | ||
| 2810 | * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1 | ||
| 2811 | * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2 | ||
| 2812 | * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P | ||
| 2813 | * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u | ||
| 2814 | */ | ||
| 2815 | enum nl80211_probe_resp_offload_support_attr { | ||
| 2816 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0, | ||
| 2817 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1, | ||
| 2818 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2, | ||
| 2819 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, | ||
| 2820 | }; | ||
| 2821 | |||
| 2653 | #endif /* __LINUX_NL80211_H */ | 2822 | #endif /* __LINUX_NL80211_H */ |
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h new file mode 100644 index 000000000000..eb1efa54fe84 --- /dev/null +++ b/include/linux/openvswitch.h | |||
| @@ -0,0 +1,452 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007-2011 Nicira Networks. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of version 2 of the GNU General Public | ||
| 6 | * License as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 16 | * 02110-1301, USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _LINUX_OPENVSWITCH_H | ||
| 20 | #define _LINUX_OPENVSWITCH_H 1 | ||
| 21 | |||
| 22 | #include <linux/types.h> | ||
| 23 | |||
| 24 | /** | ||
| 25 | * struct ovs_header - header for OVS Generic Netlink messages. | ||
| 26 | * @dp_ifindex: ifindex of local port for datapath (0 to make a request not | ||
| 27 | * specific to a datapath). | ||
| 28 | * | ||
| 29 | * Attributes following the header are specific to a particular OVS Generic | ||
| 30 | * Netlink family, but all of the OVS families use this header. | ||
| 31 | */ | ||
| 32 | |||
| 33 | struct ovs_header { | ||
| 34 | int dp_ifindex; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* Datapaths. */ | ||
| 38 | |||
| 39 | #define OVS_DATAPATH_FAMILY "ovs_datapath" | ||
| 40 | #define OVS_DATAPATH_MCGROUP "ovs_datapath" | ||
| 41 | #define OVS_DATAPATH_VERSION 0x1 | ||
| 42 | |||
| 43 | enum ovs_datapath_cmd { | ||
| 44 | OVS_DP_CMD_UNSPEC, | ||
| 45 | OVS_DP_CMD_NEW, | ||
| 46 | OVS_DP_CMD_DEL, | ||
| 47 | OVS_DP_CMD_GET, | ||
| 48 | OVS_DP_CMD_SET | ||
| 49 | }; | ||
| 50 | |||
| 51 | /** | ||
| 52 | * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. | ||
| 53 | * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local | ||
| 54 | * port". This is the name of the network device whose dp_ifindex is given in | ||
| 55 | * the &struct ovs_header. Always present in notifications. Required in | ||
| 56 | * %OVS_DP_NEW requests. May be used as an alternative to specifying | ||
| 57 | * dp_ifindex in other requests (with a dp_ifindex of 0). | ||
| 58 | * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially | ||
| 59 | * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on | ||
| 60 | * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should | ||
| 61 | * not be sent. | ||
| 62 | * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the | ||
| 63 | * datapath. Always present in notifications. | ||
| 64 | * | ||
| 65 | * These attributes follow the &struct ovs_header within the Generic Netlink | ||
| 66 | * payload for %OVS_DP_* commands. | ||
| 67 | */ | ||
| 68 | enum ovs_datapath_attr { | ||
| 69 | OVS_DP_ATTR_UNSPEC, | ||
| 70 | OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ | ||
| 71 | OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ | ||
| 72 | OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ | ||
| 73 | __OVS_DP_ATTR_MAX | ||
| 74 | }; | ||
| 75 | |||
| 76 | #define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) | ||
| 77 | |||
| 78 | struct ovs_dp_stats { | ||
| 79 | __u64 n_hit; /* Number of flow table matches. */ | ||
| 80 | __u64 n_missed; /* Number of flow table misses. */ | ||
| 81 | __u64 n_lost; /* Number of misses not sent to userspace. */ | ||
| 82 | __u64 n_flows; /* Number of flows present */ | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct ovs_vport_stats { | ||
| 86 | __u64 rx_packets; /* total packets received */ | ||
| 87 | __u64 tx_packets; /* total packets transmitted */ | ||
| 88 | __u64 rx_bytes; /* total bytes received */ | ||
| 89 | __u64 tx_bytes; /* total bytes transmitted */ | ||
| 90 | __u64 rx_errors; /* bad packets received */ | ||
| 91 | __u64 tx_errors; /* packet transmit problems */ | ||
| 92 | __u64 rx_dropped; /* no space in linux buffers */ | ||
| 93 | __u64 tx_dropped; /* no space available in linux */ | ||
| 94 | }; | ||
| 95 | |||
| 96 | /* Fixed logical ports. */ | ||
| 97 | #define OVSP_LOCAL ((__u16)0) | ||
| 98 | |||
| 99 | /* Packet transfer. */ | ||
| 100 | |||
| 101 | #define OVS_PACKET_FAMILY "ovs_packet" | ||
| 102 | #define OVS_PACKET_VERSION 0x1 | ||
| 103 | |||
| 104 | enum ovs_packet_cmd { | ||
| 105 | OVS_PACKET_CMD_UNSPEC, | ||
| 106 | |||
| 107 | /* Kernel-to-user notifications. */ | ||
| 108 | OVS_PACKET_CMD_MISS, /* Flow table miss. */ | ||
| 109 | OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ | ||
| 110 | |||
| 111 | /* Userspace commands. */ | ||
| 112 | OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ | ||
| 113 | }; | ||
| 114 | |||
| 115 | /** | ||
| 116 | * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. | ||
| 117 | * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire | ||
| 118 | * packet as received, from the start of the Ethernet header onward. For | ||
| 119 | * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by | ||
| 120 | * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is | ||
| 121 | * the flow key extracted from the packet as originally received. | ||
| 122 | * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key | ||
| 123 | * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows | ||
| 124 | * userspace to adapt its flow setup strategy by comparing its notion of the | ||
| 125 | * flow key against the kernel's. | ||
| 126 | * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used | ||
| 127 | * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. | ||
| 128 | * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION | ||
| 129 | * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an | ||
| 130 | * %OVS_USERSPACE_ATTR_USERDATA attribute. | ||
| 131 | * | ||
| 132 | * These attributes follow the &struct ovs_header within the Generic Netlink | ||
| 133 | * payload for %OVS_PACKET_* commands. | ||
| 134 | */ | ||
| 135 | enum ovs_packet_attr { | ||
| 136 | OVS_PACKET_ATTR_UNSPEC, | ||
| 137 | OVS_PACKET_ATTR_PACKET, /* Packet data. */ | ||
| 138 | OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ | ||
| 139 | OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ | ||
| 140 | OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ | ||
| 141 | __OVS_PACKET_ATTR_MAX | ||
| 142 | }; | ||
| 143 | |||
| 144 | #define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) | ||
| 145 | |||
| 146 | /* Virtual ports. */ | ||
| 147 | |||
| 148 | #define OVS_VPORT_FAMILY "ovs_vport" | ||
| 149 | #define OVS_VPORT_MCGROUP "ovs_vport" | ||
| 150 | #define OVS_VPORT_VERSION 0x1 | ||
| 151 | |||
| 152 | enum ovs_vport_cmd { | ||
| 153 | OVS_VPORT_CMD_UNSPEC, | ||
| 154 | OVS_VPORT_CMD_NEW, | ||
| 155 | OVS_VPORT_CMD_DEL, | ||
| 156 | OVS_VPORT_CMD_GET, | ||
| 157 | OVS_VPORT_CMD_SET | ||
| 158 | }; | ||
| 159 | |||
| 160 | enum ovs_vport_type { | ||
| 161 | OVS_VPORT_TYPE_UNSPEC, | ||
| 162 | OVS_VPORT_TYPE_NETDEV, /* network device */ | ||
| 163 | OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ | ||
| 164 | __OVS_VPORT_TYPE_MAX | ||
| 165 | }; | ||
| 166 | |||
| 167 | #define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) | ||
| 168 | |||
| 169 | /** | ||
| 170 | * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. | ||
| 171 | * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. | ||
| 172 | * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type | ||
| 173 | * of vport. | ||
| 174 | * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device | ||
| 175 | * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes | ||
| 176 | * plus a null terminator. | ||
| 177 | * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. | ||
| 178 | * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that | ||
| 179 | * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on | ||
| 180 | * this port. A value of zero indicates that upcalls should not be sent. | ||
| 181 | * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for | ||
| 182 | * packets sent or received through the vport. | ||
| 183 | * | ||
| 184 | * These attributes follow the &struct ovs_header within the Generic Netlink | ||
| 185 | * payload for %OVS_VPORT_* commands. | ||
| 186 | * | ||
| 187 | * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and | ||
| 188 | * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is | ||
| 189 | * optional; if not specified a free port number is automatically selected. | ||
| 190 | * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type | ||
| 191 | * of vport. | ||
| 192 | * and other attributes are ignored. | ||
| 193 | * | ||
| 194 | * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to | ||
| 195 | * look up the vport to operate on; otherwise dp_idx from the &struct | ||
| 196 | * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. | ||
| 197 | */ | ||
| 198 | enum ovs_vport_attr { | ||
| 199 | OVS_VPORT_ATTR_UNSPEC, | ||
| 200 | OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ | ||
| 201 | OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ | ||
| 202 | OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ | ||
| 203 | OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ | ||
| 204 | OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ | ||
| 205 | OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ | ||
| 206 | __OVS_VPORT_ATTR_MAX | ||
| 207 | }; | ||
| 208 | |||
| 209 | #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) | ||
| 210 | |||
| 211 | /* Flows. */ | ||
| 212 | |||
| 213 | #define OVS_FLOW_FAMILY "ovs_flow" | ||
| 214 | #define OVS_FLOW_MCGROUP "ovs_flow" | ||
| 215 | #define OVS_FLOW_VERSION 0x1 | ||
| 216 | |||
| 217 | enum ovs_flow_cmd { | ||
| 218 | OVS_FLOW_CMD_UNSPEC, | ||
| 219 | OVS_FLOW_CMD_NEW, | ||
| 220 | OVS_FLOW_CMD_DEL, | ||
| 221 | OVS_FLOW_CMD_GET, | ||
| 222 | OVS_FLOW_CMD_SET | ||
| 223 | }; | ||
| 224 | |||
| 225 | struct ovs_flow_stats { | ||
| 226 | __u64 n_packets; /* Number of matched packets. */ | ||
| 227 | __u64 n_bytes; /* Number of matched bytes. */ | ||
| 228 | }; | ||
| 229 | |||
| 230 | enum ovs_key_attr { | ||
| 231 | OVS_KEY_ATTR_UNSPEC, | ||
| 232 | OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ | ||
| 233 | OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ | ||
| 234 | OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ | ||
| 235 | OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ | ||
| 236 | OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ | ||
| 237 | OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ | ||
| 238 | OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ | ||
| 239 | OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ | ||
| 240 | OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ | ||
| 241 | OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ | ||
| 242 | OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ | ||
| 243 | OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ | ||
| 244 | OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ | ||
| 245 | OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ | ||
| 246 | __OVS_KEY_ATTR_MAX | ||
| 247 | }; | ||
| 248 | |||
| 249 | #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) | ||
| 250 | |||
| 251 | /** | ||
| 252 | * enum ovs_frag_type - IPv4 and IPv6 fragment type | ||
| 253 | * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. | ||
| 254 | * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. | ||
| 255 | * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. | ||
| 256 | * | ||
| 257 | * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct | ||
| 258 | * ovs_key_ipv6. | ||
| 259 | */ | ||
| 260 | enum ovs_frag_type { | ||
| 261 | OVS_FRAG_TYPE_NONE, | ||
| 262 | OVS_FRAG_TYPE_FIRST, | ||
| 263 | OVS_FRAG_TYPE_LATER, | ||
| 264 | __OVS_FRAG_TYPE_MAX | ||
| 265 | }; | ||
| 266 | |||
| 267 | #define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) | ||
| 268 | |||
| 269 | struct ovs_key_ethernet { | ||
| 270 | __u8 eth_src[6]; | ||
| 271 | __u8 eth_dst[6]; | ||
| 272 | }; | ||
| 273 | |||
| 274 | struct ovs_key_ipv4 { | ||
| 275 | __be32 ipv4_src; | ||
| 276 | __be32 ipv4_dst; | ||
| 277 | __u8 ipv4_proto; | ||
| 278 | __u8 ipv4_tos; | ||
| 279 | __u8 ipv4_ttl; | ||
| 280 | __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ | ||
| 281 | }; | ||
| 282 | |||
| 283 | struct ovs_key_ipv6 { | ||
| 284 | __be32 ipv6_src[4]; | ||
| 285 | __be32 ipv6_dst[4]; | ||
| 286 | __be32 ipv6_label; /* 20-bits in least-significant bits. */ | ||
| 287 | __u8 ipv6_proto; | ||
| 288 | __u8 ipv6_tclass; | ||
| 289 | __u8 ipv6_hlimit; | ||
| 290 | __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ | ||
| 291 | }; | ||
| 292 | |||
| 293 | struct ovs_key_tcp { | ||
| 294 | __be16 tcp_src; | ||
| 295 | __be16 tcp_dst; | ||
| 296 | }; | ||
| 297 | |||
| 298 | struct ovs_key_udp { | ||
| 299 | __be16 udp_src; | ||
| 300 | __be16 udp_dst; | ||
| 301 | }; | ||
| 302 | |||
| 303 | struct ovs_key_icmp { | ||
| 304 | __u8 icmp_type; | ||
| 305 | __u8 icmp_code; | ||
| 306 | }; | ||
| 307 | |||
| 308 | struct ovs_key_icmpv6 { | ||
| 309 | __u8 icmpv6_type; | ||
| 310 | __u8 icmpv6_code; | ||
| 311 | }; | ||
| 312 | |||
| 313 | struct ovs_key_arp { | ||
| 314 | __be32 arp_sip; | ||
| 315 | __be32 arp_tip; | ||
| 316 | __be16 arp_op; | ||
| 317 | __u8 arp_sha[6]; | ||
| 318 | __u8 arp_tha[6]; | ||
| 319 | }; | ||
| 320 | |||
| 321 | struct ovs_key_nd { | ||
| 322 | __u32 nd_target[4]; | ||
| 323 | __u8 nd_sll[6]; | ||
| 324 | __u8 nd_tll[6]; | ||
| 325 | }; | ||
| 326 | |||
| 327 | /** | ||
| 328 | * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. | ||
| 329 | * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow | ||
| 330 | * key. Always present in notifications. Required for all requests (except | ||
| 331 | * dumps). | ||
| 332 | * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying | ||
| 333 | * the actions to take for packets that match the key. Always present in | ||
| 334 | * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for | ||
| 335 | * %OVS_FLOW_CMD_SET requests. | ||
| 336 | * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this | ||
| 337 | * flow. Present in notifications if the stats would be nonzero. Ignored in | ||
| 338 | * requests. | ||
| 339 | * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the | ||
| 340 | * TCP flags seen on packets in this flow. Only present in notifications for | ||
| 341 | * TCP flows, and only if it would be nonzero. Ignored in requests. | ||
| 342 | * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on | ||
| 343 | * the system monotonic clock, at which a packet was last processed for this | ||
| 344 | * flow. Only present in notifications if a packet has been processed for this | ||
| 345 | * flow. Ignored in requests. | ||
| 346 | * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the | ||
| 347 | * last-used time, accumulated TCP flags, and statistics for this flow. | ||
| 348 | * Otherwise ignored in requests. Never present in notifications. | ||
| 349 | * | ||
| 350 | * These attributes follow the &struct ovs_header within the Generic Netlink | ||
| 351 | * payload for %OVS_FLOW_* commands. | ||
| 352 | */ | ||
| 353 | enum ovs_flow_attr { | ||
| 354 | OVS_FLOW_ATTR_UNSPEC, | ||
| 355 | OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ | ||
| 356 | OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ | ||
| 357 | OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ | ||
| 358 | OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ | ||
| 359 | OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ | ||
| 360 | OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ | ||
| 361 | __OVS_FLOW_ATTR_MAX | ||
| 362 | }; | ||
| 363 | |||
| 364 | #define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) | ||
| 365 | |||
| 366 | /** | ||
| 367 | * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. | ||
| 368 | * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with | ||
| 369 | * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of | ||
| 370 | * %UINT32_MAX samples all packets and intermediate values sample intermediate | ||
| 371 | * fractions of packets. | ||
| 372 | * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. | ||
| 373 | * Actions are passed as nested attributes. | ||
| 374 | * | ||
| 375 | * Executes the specified actions with the given probability on a per-packet | ||
| 376 | * basis. | ||
| 377 | */ | ||
| 378 | enum ovs_sample_attr { | ||
| 379 | OVS_SAMPLE_ATTR_UNSPEC, | ||
| 380 | OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ | ||
| 381 | OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ | ||
| 382 | __OVS_SAMPLE_ATTR_MAX, | ||
| 383 | }; | ||
| 384 | |||
| 385 | #define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) | ||
| 386 | |||
| 387 | /** | ||
| 388 | * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. | ||
| 389 | * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION | ||
| 390 | * message should be sent. Required. | ||
| 391 | * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the | ||
| 392 | * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, | ||
| 393 | */ | ||
| 394 | enum ovs_userspace_attr { | ||
| 395 | OVS_USERSPACE_ATTR_UNSPEC, | ||
| 396 | OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ | ||
| 397 | OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ | ||
| 398 | __OVS_USERSPACE_ATTR_MAX | ||
| 399 | }; | ||
| 400 | |||
| 401 | #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) | ||
| 402 | |||
| 403 | /** | ||
| 404 | * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. | ||
| 405 | * @vlan_tpid: Tag protocol identifier (TPID) to push. | ||
| 406 | * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set | ||
| 407 | * (but it will not be set in the 802.1Q header that is pushed). | ||
| 408 | * | ||
| 409 | * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID | ||
| 410 | * values are those that the kernel module also parses as 802.1Q headers, to | ||
| 411 | * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN | ||
| 412 | * from having surprising results. | ||
| 413 | */ | ||
| 414 | struct ovs_action_push_vlan { | ||
| 415 | __be16 vlan_tpid; /* 802.1Q TPID. */ | ||
| 416 | __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ | ||
| 417 | }; | ||
| 418 | |||
| 419 | /** | ||
| 420 | * enum ovs_action_attr - Action types. | ||
| 421 | * | ||
| 422 | * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. | ||
| 423 | * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested | ||
| 424 | * %OVS_USERSPACE_ATTR_* attributes. | ||
| 425 | * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The | ||
| 426 | * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its | ||
| 427 | * value. | ||
| 428 | * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the | ||
| 429 | * packet. | ||
| 430 | * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. | ||
| 431 | * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in | ||
| 432 | * the nested %OVS_SAMPLE_ATTR_* attributes. | ||
| 433 | * | ||
| 434 | * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all | ||
| 435 | * fields within a header are modifiable, e.g. the IPv4 protocol and fragment | ||
| 436 | * type may not be changed. | ||
| 437 | */ | ||
| 438 | |||
| 439 | enum ovs_action_attr { | ||
| 440 | OVS_ACTION_ATTR_UNSPEC, | ||
| 441 | OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ | ||
| 442 | OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ | ||
| 443 | OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ | ||
| 444 | OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ | ||
| 445 | OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ | ||
| 446 | OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ | ||
| 447 | __OVS_ACTION_ATTR_MAX | ||
| 448 | }; | ||
| 449 | |||
| 450 | #define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) | ||
| 451 | |||
| 452 | #endif /* _LINUX_OPENVSWITCH_H */ | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b1f89122bf6a..08855613ceb3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -54,6 +54,7 @@ enum perf_hw_id { | |||
| 54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
| 55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | 55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, |
| 56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | 56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, |
| 57 | PERF_COUNT_HW_REF_CPU_CYCLES = 9, | ||
| 57 | 58 | ||
| 58 | PERF_COUNT_HW_MAX, /* non-ABI */ | 59 | PERF_COUNT_HW_MAX, /* non-ABI */ |
| 59 | }; | 60 | }; |
| @@ -890,6 +891,7 @@ struct perf_event_context { | |||
| 890 | int nr_active; | 891 | int nr_active; |
| 891 | int is_active; | 892 | int is_active; |
| 892 | int nr_stat; | 893 | int nr_stat; |
| 894 | int nr_freq; | ||
| 893 | int rotate_disable; | 895 | int rotate_disable; |
| 894 | atomic_t refcount; | 896 | atomic_t refcount; |
| 895 | struct task_struct *task; | 897 | struct task_struct *task; |
| @@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
| 1063 | } | 1065 | } |
| 1064 | } | 1066 | } |
| 1065 | 1067 | ||
| 1066 | extern struct jump_label_key perf_sched_events; | 1068 | extern struct jump_label_key_deferred perf_sched_events; |
| 1067 | 1069 | ||
| 1068 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 1070 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
| 1069 | struct task_struct *task) | 1071 | struct task_struct *task) |
| 1070 | { | 1072 | { |
| 1071 | if (static_branch(&perf_sched_events)) | 1073 | if (static_branch(&perf_sched_events.key)) |
| 1072 | __perf_event_task_sched_in(prev, task); | 1074 | __perf_event_task_sched_in(prev, task); |
| 1073 | } | 1075 | } |
| 1074 | 1076 | ||
| @@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, | |||
| 1077 | { | 1079 | { |
| 1078 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 1080 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
| 1079 | 1081 | ||
| 1080 | if (static_branch(&perf_sched_events)) | 1082 | if (static_branch(&perf_sched_events.key)) |
| 1081 | __perf_event_task_sched_out(prev, next); | 1083 | __perf_event_task_sched_out(prev, next); |
| 1082 | } | 1084 | } |
| 1083 | 1085 | ||
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index f53a4167c5f4..f48bfc80cb4b 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #define PNPIPE_ENCAP 1 | 38 | #define PNPIPE_ENCAP 1 |
| 39 | #define PNPIPE_IFINDEX 2 | 39 | #define PNPIPE_IFINDEX 2 |
| 40 | #define PNPIPE_HANDLE 3 | 40 | #define PNPIPE_HANDLE 3 |
| 41 | #define PNPIPE_INITSTATE 4 | ||
| 41 | 42 | ||
| 42 | #define PNADDR_ANY 0 | 43 | #define PNADDR_ANY 0 |
| 43 | #define PNADDR_BROADCAST 0xFC | 44 | #define PNADDR_BROADCAST 0xFC |
| @@ -49,6 +50,7 @@ | |||
| 49 | 50 | ||
| 50 | /* ioctls */ | 51 | /* ioctls */ |
| 51 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) | 52 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) |
| 53 | #define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13) | ||
| 52 | #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) | 54 | #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) |
| 53 | #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) | 55 | #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) |
| 54 | 56 | ||
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 7281d5acf2f9..8f1b928f777c 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h | |||
| @@ -162,25 +162,24 @@ struct tc_sfq_qopt { | |||
| 162 | unsigned flows; /* Maximal number of flows */ | 162 | unsigned flows; /* Maximal number of flows */ |
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| 165 | struct tc_sfq_qopt_v1 { | ||
| 166 | struct tc_sfq_qopt v0; | ||
| 167 | unsigned int depth; /* max number of packets per flow */ | ||
| 168 | unsigned int headdrop; | ||
| 169 | }; | ||
| 170 | |||
| 171 | |||
| 165 | struct tc_sfq_xstats { | 172 | struct tc_sfq_xstats { |
| 166 | __s32 allot; | 173 | __s32 allot; |
| 167 | }; | 174 | }; |
| 168 | 175 | ||
| 169 | /* | ||
| 170 | * NOTE: limit, divisor and flows are hardwired to code at the moment. | ||
| 171 | * | ||
| 172 | * limit=flows=128, divisor=1024; | ||
| 173 | * | ||
| 174 | * The only reason for this is efficiency, it is possible | ||
| 175 | * to change these parameters in compile time. | ||
| 176 | */ | ||
| 177 | |||
| 178 | /* RED section */ | 176 | /* RED section */ |
| 179 | 177 | ||
| 180 | enum { | 178 | enum { |
| 181 | TCA_RED_UNSPEC, | 179 | TCA_RED_UNSPEC, |
| 182 | TCA_RED_PARMS, | 180 | TCA_RED_PARMS, |
| 183 | TCA_RED_STAB, | 181 | TCA_RED_STAB, |
| 182 | TCA_RED_MAX_P, | ||
| 184 | __TCA_RED_MAX, | 183 | __TCA_RED_MAX, |
| 185 | }; | 184 | }; |
| 186 | 185 | ||
| @@ -194,8 +193,9 @@ struct tc_red_qopt { | |||
| 194 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | 193 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ |
| 195 | unsigned char Scell_log; /* cell size for idle damping */ | 194 | unsigned char Scell_log; /* cell size for idle damping */ |
| 196 | unsigned char flags; | 195 | unsigned char flags; |
| 197 | #define TC_RED_ECN 1 | 196 | #define TC_RED_ECN 1 |
| 198 | #define TC_RED_HARDDROP 2 | 197 | #define TC_RED_HARDDROP 2 |
| 198 | #define TC_RED_ADAPTATIVE 4 | ||
| 199 | }; | 199 | }; |
| 200 | 200 | ||
| 201 | struct tc_red_xstats { | 201 | struct tc_red_xstats { |
| @@ -214,6 +214,7 @@ enum { | |||
| 214 | TCA_GRED_PARMS, | 214 | TCA_GRED_PARMS, |
| 215 | TCA_GRED_STAB, | 215 | TCA_GRED_STAB, |
| 216 | TCA_GRED_DPS, | 216 | TCA_GRED_DPS, |
| 217 | TCA_GRED_MAX_P, | ||
| 217 | __TCA_GRED_MAX, | 218 | __TCA_GRED_MAX, |
| 218 | }; | 219 | }; |
| 219 | 220 | ||
| @@ -253,6 +254,7 @@ enum { | |||
| 253 | TCA_CHOKE_UNSPEC, | 254 | TCA_CHOKE_UNSPEC, |
| 254 | TCA_CHOKE_PARMS, | 255 | TCA_CHOKE_PARMS, |
| 255 | TCA_CHOKE_STAB, | 256 | TCA_CHOKE_STAB, |
| 257 | TCA_CHOKE_MAX_P, | ||
| 256 | __TCA_CHOKE_MAX, | 258 | __TCA_CHOKE_MAX, |
| 257 | }; | 259 | }; |
| 258 | 260 | ||
| @@ -465,6 +467,7 @@ enum { | |||
| 465 | TCA_NETEM_REORDER, | 467 | TCA_NETEM_REORDER, |
| 466 | TCA_NETEM_CORRUPT, | 468 | TCA_NETEM_CORRUPT, |
| 467 | TCA_NETEM_LOSS, | 469 | TCA_NETEM_LOSS, |
| 470 | TCA_NETEM_RATE, | ||
| 468 | __TCA_NETEM_MAX, | 471 | __TCA_NETEM_MAX, |
| 469 | }; | 472 | }; |
| 470 | 473 | ||
| @@ -495,6 +498,13 @@ struct tc_netem_corrupt { | |||
| 495 | __u32 correlation; | 498 | __u32 correlation; |
| 496 | }; | 499 | }; |
| 497 | 500 | ||
| 501 | struct tc_netem_rate { | ||
| 502 | __u32 rate; /* byte/s */ | ||
| 503 | __s32 packet_overhead; | ||
| 504 | __u32 cell_size; | ||
| 505 | __s32 cell_overhead; | ||
| 506 | }; | ||
| 507 | |||
| 498 | enum { | 508 | enum { |
| 499 | NETEM_LOSS_UNSPEC, | 509 | NETEM_LOSS_UNSPEC, |
| 500 | NETEM_LOSS_GI, /* General Intuitive - 4 state model */ | 510 | NETEM_LOSS_GI, /* General Intuitive - 4 state model */ |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 79159de0e341..2110a81c5e2a 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -40,12 +40,6 @@ | |||
| 40 | #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ | 40 | #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ |
| 41 | #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ | 41 | #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ |
| 42 | 42 | ||
| 43 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
| 44 | #define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL | ||
| 45 | #else | ||
| 46 | #define MEMBLOCK_INACTIVE 0x44c9e71bUL | ||
| 47 | #endif | ||
| 48 | |||
| 49 | #define SLUB_RED_INACTIVE 0xbb | 43 | #define SLUB_RED_INACTIVE 0xbb |
| 50 | #define SLUB_RED_ACTIVE 0xcc | 44 | #define SLUB_RED_ACTIVE 0xcc |
| 51 | 45 | ||
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 2ca8cde5459d..e1461e143be2 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
| @@ -22,6 +22,9 @@ | |||
| 22 | #ifndef _LINUX_PSTORE_H | 22 | #ifndef _LINUX_PSTORE_H |
| 23 | #define _LINUX_PSTORE_H | 23 | #define _LINUX_PSTORE_H |
| 24 | 24 | ||
| 25 | #include <linux/time.h> | ||
| 26 | #include <linux/kmsg_dump.h> | ||
| 27 | |||
| 25 | /* types */ | 28 | /* types */ |
| 26 | enum pstore_type_id { | 29 | enum pstore_type_id { |
| 27 | PSTORE_TYPE_DMESG = 0, | 30 | PSTORE_TYPE_DMESG = 0, |
| @@ -41,7 +44,8 @@ struct pstore_info { | |||
| 41 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, | 44 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, |
| 42 | struct timespec *time, char **buf, | 45 | struct timespec *time, char **buf, |
| 43 | struct pstore_info *psi); | 46 | struct pstore_info *psi); |
| 44 | int (*write)(enum pstore_type_id type, u64 *id, | 47 | int (*write)(enum pstore_type_id type, |
| 48 | enum kmsg_dump_reason reason, u64 *id, | ||
| 45 | unsigned int part, size_t size, struct pstore_info *psi); | 49 | unsigned int part, size_t size, struct pstore_info *psi); |
| 46 | int (*erase)(enum pstore_type_id type, u64 id, | 50 | int (*erase)(enum pstore_type_id type, u64 id, |
| 47 | struct pstore_info *psi); | 51 | struct pstore_info *psi); |
| @@ -50,18 +54,12 @@ struct pstore_info { | |||
| 50 | 54 | ||
| 51 | #ifdef CONFIG_PSTORE | 55 | #ifdef CONFIG_PSTORE |
| 52 | extern int pstore_register(struct pstore_info *); | 56 | extern int pstore_register(struct pstore_info *); |
| 53 | extern int pstore_write(enum pstore_type_id type, char *buf, size_t size); | ||
| 54 | #else | 57 | #else |
| 55 | static inline int | 58 | static inline int |
| 56 | pstore_register(struct pstore_info *psi) | 59 | pstore_register(struct pstore_info *psi) |
| 57 | { | 60 | { |
| 58 | return -ENODEV; | 61 | return -ENODEV; |
| 59 | } | 62 | } |
| 60 | static inline int | ||
| 61 | pstore_write(enum pstore_type_id type, char *buf, size_t size) | ||
| 62 | { | ||
| 63 | return -ENODEV; | ||
| 64 | } | ||
| 65 | #endif | 63 | #endif |
| 66 | 64 | ||
| 67 | #endif /*_LINUX_PSTORE_H*/ | 65 | #endif /*_LINUX_PSTORE_H*/ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2cf4226ade7e..81c04f4348ec 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */ | |||
| 51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 52 | extern void rcutorture_record_test_transition(void); | 52 | extern void rcutorture_record_test_transition(void); |
| 53 | extern void rcutorture_record_progress(unsigned long vernum); | 53 | extern void rcutorture_record_progress(unsigned long vernum); |
| 54 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
| 55 | struct rcu_head *rhp); | ||
| 54 | #else | 56 | #else |
| 55 | static inline void rcutorture_record_test_transition(void) | 57 | static inline void rcutorture_record_test_transition(void) |
| 56 | { | 58 | { |
| @@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void) | |||
| 58 | static inline void rcutorture_record_progress(unsigned long vernum) | 60 | static inline void rcutorture_record_progress(unsigned long vernum) |
| 59 | { | 61 | { |
| 60 | } | 62 | } |
| 63 | #ifdef CONFIG_RCU_TRACE | ||
| 64 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
| 65 | struct rcu_head *rhp); | ||
| 66 | #else | ||
| 67 | #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) | ||
| 68 | #endif | ||
| 61 | #endif | 69 | #endif |
| 62 | 70 | ||
| 63 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 71 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
| @@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu); | |||
| 177 | extern void rcu_bh_qs(int cpu); | 185 | extern void rcu_bh_qs(int cpu); |
| 178 | extern void rcu_check_callbacks(int cpu, int user); | 186 | extern void rcu_check_callbacks(int cpu, int user); |
| 179 | struct notifier_block; | 187 | struct notifier_block; |
| 180 | 188 | extern void rcu_idle_enter(void); | |
| 181 | #ifdef CONFIG_NO_HZ | 189 | extern void rcu_idle_exit(void); |
| 182 | 190 | extern void rcu_irq_enter(void); | |
| 183 | extern void rcu_enter_nohz(void); | 191 | extern void rcu_irq_exit(void); |
| 184 | extern void rcu_exit_nohz(void); | ||
| 185 | |||
| 186 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 187 | |||
| 188 | static inline void rcu_enter_nohz(void) | ||
| 189 | { | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline void rcu_exit_nohz(void) | ||
| 193 | { | ||
| 194 | } | ||
| 195 | |||
| 196 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 197 | 192 | ||
| 198 | /* | 193 | /* |
| 199 | * Infrastructure to implement the synchronize_() primitives in | 194 | * Infrastructure to implement the synchronize_() primitives in |
| @@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
| 233 | 228 | ||
| 234 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 229 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 235 | 230 | ||
| 236 | extern struct lockdep_map rcu_lock_map; | 231 | #ifdef CONFIG_PROVE_RCU |
| 237 | # define rcu_read_acquire() \ | 232 | extern int rcu_is_cpu_idle(void); |
| 238 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 233 | #else /* !CONFIG_PROVE_RCU */ |
| 239 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 234 | static inline int rcu_is_cpu_idle(void) |
| 235 | { | ||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | #endif /* else !CONFIG_PROVE_RCU */ | ||
| 240 | 239 | ||
| 241 | extern struct lockdep_map rcu_bh_lock_map; | 240 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
| 242 | # define rcu_read_acquire_bh() \ | 241 | { |
| 243 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 242 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
| 244 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | 243 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); |
| 244 | } | ||
| 245 | 245 | ||
| 246 | extern struct lockdep_map rcu_sched_lock_map; | 246 | static inline void rcu_lock_release(struct lockdep_map *map) |
| 247 | # define rcu_read_acquire_sched() \ | 247 | { |
| 248 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 248 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
| 249 | # define rcu_read_release_sched() \ | 249 | lock_release(map, 1, _THIS_IP_); |
| 250 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 250 | } |
| 251 | 251 | ||
| 252 | extern struct lockdep_map rcu_lock_map; | ||
| 253 | extern struct lockdep_map rcu_bh_lock_map; | ||
| 254 | extern struct lockdep_map rcu_sched_lock_map; | ||
| 252 | extern int debug_lockdep_rcu_enabled(void); | 255 | extern int debug_lockdep_rcu_enabled(void); |
| 253 | 256 | ||
| 254 | /** | 257 | /** |
| @@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void); | |||
| 262 | * | 265 | * |
| 263 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | 266 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 264 | * and while lockdep is disabled. | 267 | * and while lockdep is disabled. |
| 268 | * | ||
| 269 | * Note that rcu_read_lock() and the matching rcu_read_unlock() must | ||
| 270 | * occur in the same context, for example, it is illegal to invoke | ||
| 271 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | ||
| 272 | * was invoked from within an irq handler. | ||
| 265 | */ | 273 | */ |
| 266 | static inline int rcu_read_lock_held(void) | 274 | static inline int rcu_read_lock_held(void) |
| 267 | { | 275 | { |
| 268 | if (!debug_lockdep_rcu_enabled()) | 276 | if (!debug_lockdep_rcu_enabled()) |
| 269 | return 1; | 277 | return 1; |
| 278 | if (rcu_is_cpu_idle()) | ||
| 279 | return 0; | ||
| 270 | return lock_is_held(&rcu_lock_map); | 280 | return lock_is_held(&rcu_lock_map); |
| 271 | } | 281 | } |
| 272 | 282 | ||
| @@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void); | |||
| 290 | * | 300 | * |
| 291 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 301 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
| 292 | * and while lockdep is disabled. | 302 | * and while lockdep is disabled. |
| 303 | * | ||
| 304 | * Note that if the CPU is in the idle loop from an RCU point of | ||
| 305 | * view (ie: that we are in the section between rcu_idle_enter() and | ||
| 306 | * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU | ||
| 307 | * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs | ||
| 308 | * that are in such a section, considering these as in extended quiescent | ||
| 309 | * state, so such a CPU is effectively never in an RCU read-side critical | ||
| 310 | * section regardless of what RCU primitives it invokes. This state of | ||
| 311 | * affairs is required --- we need to keep an RCU-free window in idle | ||
| 312 | * where the CPU may possibly enter into low power mode. This way we can | ||
| 313 | * notice an extended quiescent state to other CPUs that started a grace | ||
| 314 | * period. Otherwise we would delay any grace period as long as we run in | ||
| 315 | * the idle task. | ||
| 293 | */ | 316 | */ |
| 294 | #ifdef CONFIG_PREEMPT_COUNT | 317 | #ifdef CONFIG_PREEMPT_COUNT |
| 295 | static inline int rcu_read_lock_sched_held(void) | 318 | static inline int rcu_read_lock_sched_held(void) |
| @@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 298 | 321 | ||
| 299 | if (!debug_lockdep_rcu_enabled()) | 322 | if (!debug_lockdep_rcu_enabled()) |
| 300 | return 1; | 323 | return 1; |
| 324 | if (rcu_is_cpu_idle()) | ||
| 325 | return 0; | ||
| 301 | if (debug_locks) | 326 | if (debug_locks) |
| 302 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 327 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
| 303 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); | 328 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
| @@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 311 | 336 | ||
| 312 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 337 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 313 | 338 | ||
| 314 | # define rcu_read_acquire() do { } while (0) | 339 | # define rcu_lock_acquire(a) do { } while (0) |
| 315 | # define rcu_read_release() do { } while (0) | 340 | # define rcu_lock_release(a) do { } while (0) |
| 316 | # define rcu_read_acquire_bh() do { } while (0) | ||
| 317 | # define rcu_read_release_bh() do { } while (0) | ||
| 318 | # define rcu_read_acquire_sched() do { } while (0) | ||
| 319 | # define rcu_read_release_sched() do { } while (0) | ||
| 320 | 341 | ||
| 321 | static inline int rcu_read_lock_held(void) | 342 | static inline int rcu_read_lock_held(void) |
| 322 | { | 343 | { |
| @@ -637,7 +658,7 @@ static inline void rcu_read_lock(void) | |||
| 637 | { | 658 | { |
| 638 | __rcu_read_lock(); | 659 | __rcu_read_lock(); |
| 639 | __acquire(RCU); | 660 | __acquire(RCU); |
| 640 | rcu_read_acquire(); | 661 | rcu_lock_acquire(&rcu_lock_map); |
| 641 | } | 662 | } |
| 642 | 663 | ||
| 643 | /* | 664 | /* |
| @@ -657,7 +678,7 @@ static inline void rcu_read_lock(void) | |||
| 657 | */ | 678 | */ |
| 658 | static inline void rcu_read_unlock(void) | 679 | static inline void rcu_read_unlock(void) |
| 659 | { | 680 | { |
| 660 | rcu_read_release(); | 681 | rcu_lock_release(&rcu_lock_map); |
| 661 | __release(RCU); | 682 | __release(RCU); |
| 662 | __rcu_read_unlock(); | 683 | __rcu_read_unlock(); |
| 663 | } | 684 | } |
| @@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void) | |||
| 673 | * critical sections in interrupt context can use just rcu_read_lock(), | 694 | * critical sections in interrupt context can use just rcu_read_lock(), |
| 674 | * though this should at least be commented to avoid confusing people | 695 | * though this should at least be commented to avoid confusing people |
| 675 | * reading the code. | 696 | * reading the code. |
| 697 | * | ||
| 698 | * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() | ||
| 699 | * must occur in the same context, for example, it is illegal to invoke | ||
| 700 | * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() | ||
| 701 | * was invoked from some other task. | ||
| 676 | */ | 702 | */ |
| 677 | static inline void rcu_read_lock_bh(void) | 703 | static inline void rcu_read_lock_bh(void) |
| 678 | { | 704 | { |
| 679 | local_bh_disable(); | 705 | local_bh_disable(); |
| 680 | __acquire(RCU_BH); | 706 | __acquire(RCU_BH); |
| 681 | rcu_read_acquire_bh(); | 707 | rcu_lock_acquire(&rcu_bh_lock_map); |
| 682 | } | 708 | } |
| 683 | 709 | ||
| 684 | /* | 710 | /* |
| @@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void) | |||
| 688 | */ | 714 | */ |
| 689 | static inline void rcu_read_unlock_bh(void) | 715 | static inline void rcu_read_unlock_bh(void) |
| 690 | { | 716 | { |
| 691 | rcu_read_release_bh(); | 717 | rcu_lock_release(&rcu_bh_lock_map); |
| 692 | __release(RCU_BH); | 718 | __release(RCU_BH); |
| 693 | local_bh_enable(); | 719 | local_bh_enable(); |
| 694 | } | 720 | } |
| @@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void) | |||
| 700 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). | 726 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
| 701 | * Read-side critical sections can also be introduced by anything that | 727 | * Read-side critical sections can also be introduced by anything that |
| 702 | * disables preemption, including local_irq_disable() and friends. | 728 | * disables preemption, including local_irq_disable() and friends. |
| 729 | * | ||
| 730 | * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() | ||
| 731 | * must occur in the same context, for example, it is illegal to invoke | ||
| 732 | * rcu_read_unlock_sched() from process context if the matching | ||
| 733 | * rcu_read_lock_sched() was invoked from an NMI handler. | ||
| 703 | */ | 734 | */ |
| 704 | static inline void rcu_read_lock_sched(void) | 735 | static inline void rcu_read_lock_sched(void) |
| 705 | { | 736 | { |
| 706 | preempt_disable(); | 737 | preempt_disable(); |
| 707 | __acquire(RCU_SCHED); | 738 | __acquire(RCU_SCHED); |
| 708 | rcu_read_acquire_sched(); | 739 | rcu_lock_acquire(&rcu_sched_lock_map); |
| 709 | } | 740 | } |
| 710 | 741 | ||
| 711 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 742 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
| @@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
| 722 | */ | 753 | */ |
| 723 | static inline void rcu_read_unlock_sched(void) | 754 | static inline void rcu_read_unlock_sched(void) |
| 724 | { | 755 | { |
| 725 | rcu_read_release_sched(); | 756 | rcu_lock_release(&rcu_sched_lock_map); |
| 726 | __release(RCU_SCHED); | 757 | __release(RCU_SCHED); |
| 727 | preempt_enable(); | 758 | preempt_enable(); |
| 728 | } | 759 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc5..cf0eb342bcba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); | |||
| 273 | 273 | ||
| 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
| 275 | extern void select_nohz_load_balancer(int stop_tick); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
| 276 | extern void set_cpu_sd_state_idle(void); | ||
| 276 | extern int get_nohz_timer_target(void); | 277 | extern int get_nohz_timer_target(void); |
| 277 | #else | 278 | #else |
| 278 | static inline void select_nohz_load_balancer(int stop_tick) { } | 279 | static inline void select_nohz_load_balancer(int stop_tick) { } |
| 280 | static inline void set_cpu_sd_state_idle(void) { } | ||
| 279 | #endif | 281 | #endif |
| 280 | 282 | ||
| 281 | /* | 283 | /* |
| @@ -483,8 +485,8 @@ struct task_cputime { | |||
| 483 | 485 | ||
| 484 | #define INIT_CPUTIME \ | 486 | #define INIT_CPUTIME \ |
| 485 | (struct task_cputime) { \ | 487 | (struct task_cputime) { \ |
| 486 | .utime = cputime_zero, \ | 488 | .utime = 0, \ |
| 487 | .stime = cputime_zero, \ | 489 | .stime = 0, \ |
| 488 | .sum_exec_runtime = 0, \ | 490 | .sum_exec_runtime = 0, \ |
| 489 | } | 491 | } |
| 490 | 492 | ||
| @@ -901,6 +903,10 @@ struct sched_group_power { | |||
| 901 | * single CPU. | 903 | * single CPU. |
| 902 | */ | 904 | */ |
| 903 | unsigned int power, power_orig; | 905 | unsigned int power, power_orig; |
| 906 | /* | ||
| 907 | * Number of busy cpus in this group. | ||
| 908 | */ | ||
| 909 | atomic_t nr_busy_cpus; | ||
| 904 | }; | 910 | }; |
| 905 | 911 | ||
| 906 | struct sched_group { | 912 | struct sched_group { |
| @@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
| 925 | return to_cpumask(sg->cpumask); | 931 | return to_cpumask(sg->cpumask); |
| 926 | } | 932 | } |
| 927 | 933 | ||
| 934 | /** | ||
| 935 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
| 936 | * @group: The group whose first cpu is to be returned. | ||
| 937 | */ | ||
| 938 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
| 939 | { | ||
| 940 | return cpumask_first(sched_group_cpus(group)); | ||
| 941 | } | ||
| 942 | |||
| 928 | struct sched_domain_attr { | 943 | struct sched_domain_attr { |
| 929 | int relax_domain_level; | 944 | int relax_domain_level; |
| 930 | }; | 945 | }; |
| @@ -1315,8 +1330,8 @@ struct task_struct { | |||
| 1315 | * older sibling, respectively. (p->father can be replaced with | 1330 | * older sibling, respectively. (p->father can be replaced with |
| 1316 | * p->real_parent->pid) | 1331 | * p->real_parent->pid) |
| 1317 | */ | 1332 | */ |
| 1318 | struct task_struct *real_parent; /* real parent process */ | 1333 | struct task_struct __rcu *real_parent; /* real parent process */ |
| 1319 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ | 1334 | struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ |
| 1320 | /* | 1335 | /* |
| 1321 | * children/sibling forms the list of my natural children | 1336 | * children/sibling forms the list of my natural children |
| 1322 | */ | 1337 | */ |
| @@ -2070,6 +2085,14 @@ extern int sched_setscheduler(struct task_struct *, int, | |||
| 2070 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 2085 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
| 2071 | const struct sched_param *); | 2086 | const struct sched_param *); |
| 2072 | extern struct task_struct *idle_task(int cpu); | 2087 | extern struct task_struct *idle_task(int cpu); |
| 2088 | /** | ||
| 2089 | * is_idle_task - is the specified task an idle task? | ||
| 2090 | * @tsk: the task in question. | ||
| 2091 | */ | ||
| 2092 | static inline bool is_idle_task(struct task_struct *p) | ||
| 2093 | { | ||
| 2094 | return p->pid == 0; | ||
| 2095 | } | ||
| 2073 | extern struct task_struct *curr_task(int cpu); | 2096 | extern struct task_struct *curr_task(int cpu); |
| 2074 | extern void set_curr_task(int cpu, struct task_struct *p); | 2097 | extern void set_curr_task(int cpu, struct task_struct *p); |
| 2075 | 2098 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 19d8e04e1688..e8c619d39291 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode, | |||
| 2056 | char **name, void **value, | 2056 | char **name, void **value, |
| 2057 | size_t *len) | 2057 | size_t *len) |
| 2058 | { | 2058 | { |
| 2059 | return 0; | 2059 | return -EOPNOTSUPP; |
| 2060 | } | 2060 | } |
| 2061 | 2061 | ||
| 2062 | static inline int security_inode_create(struct inode *dir, | 2062 | static inline int security_inode_create(struct inode *dir, |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fe864885c1ed..50db9b04a552 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/dmaengine.h> | 30 | #include <linux/dmaengine.h> |
| 31 | #include <linux/hrtimer.h> | 31 | #include <linux/hrtimer.h> |
| 32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
| 33 | #include <linux/netdev_features.h> | ||
| 33 | 34 | ||
| 34 | /* Don't change this without changing skb_csum_unnecessary! */ | 35 | /* Don't change this without changing skb_csum_unnecessary! */ |
| 35 | #define CHECKSUM_NONE 0 | 36 | #define CHECKSUM_NONE 0 |
| @@ -87,7 +88,6 @@ | |||
| 87 | * at device setup time. | 88 | * at device setup time. |
| 88 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum | 89 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum |
| 89 | * everything. | 90 | * everything. |
| 90 | * NETIF_F_NO_CSUM - loopback or reliable single hop media. | ||
| 91 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only | 91 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only |
| 92 | * TCP/UDP over IPv4. Sigh. Vendors like this | 92 | * TCP/UDP over IPv4. Sigh. Vendors like this |
| 93 | * way by an unknown reason. Though, see comment above | 93 | * way by an unknown reason. Though, see comment above |
| @@ -128,13 +128,17 @@ struct sk_buff_head { | |||
| 128 | 128 | ||
| 129 | struct sk_buff; | 129 | struct sk_buff; |
| 130 | 130 | ||
| 131 | /* To allow 64K frame to be packed as single skb without frag_list. Since | 131 | /* To allow 64K frame to be packed as single skb without frag_list we |
| 132 | * GRO uses frags we allocate at least 16 regardless of page size. | 132 | * require 64K/PAGE_SIZE pages plus 1 additional page to allow for |
| 133 | * buffers which do not start on a page boundary. | ||
| 134 | * | ||
| 135 | * Since GRO uses frags we allocate at least 16 regardless of page | ||
| 136 | * size. | ||
| 133 | */ | 137 | */ |
| 134 | #if (65536/PAGE_SIZE + 2) < 16 | 138 | #if (65536/PAGE_SIZE + 1) < 16 |
| 135 | #define MAX_SKB_FRAGS 16UL | 139 | #define MAX_SKB_FRAGS 16UL |
| 136 | #else | 140 | #else |
| 137 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) | 141 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) |
| 138 | #endif | 142 | #endif |
| 139 | 143 | ||
| 140 | typedef struct skb_frag_struct skb_frag_t; | 144 | typedef struct skb_frag_struct skb_frag_t; |
| @@ -218,6 +222,9 @@ enum { | |||
| 218 | 222 | ||
| 219 | /* device driver supports TX zero-copy buffers */ | 223 | /* device driver supports TX zero-copy buffers */ |
| 220 | SKBTX_DEV_ZEROCOPY = 1 << 4, | 224 | SKBTX_DEV_ZEROCOPY = 1 << 4, |
| 225 | |||
| 226 | /* generate wifi status information (where possible) */ | ||
| 227 | SKBTX_WIFI_STATUS = 1 << 5, | ||
| 221 | }; | 228 | }; |
| 222 | 229 | ||
| 223 | /* | 230 | /* |
| @@ -235,15 +242,15 @@ struct ubuf_info { | |||
| 235 | * the end of the header data, ie. at skb->end. | 242 | * the end of the header data, ie. at skb->end. |
| 236 | */ | 243 | */ |
| 237 | struct skb_shared_info { | 244 | struct skb_shared_info { |
| 238 | unsigned short nr_frags; | 245 | unsigned char nr_frags; |
| 246 | __u8 tx_flags; | ||
| 239 | unsigned short gso_size; | 247 | unsigned short gso_size; |
| 240 | /* Warning: this field is not always filled in (UFO)! */ | 248 | /* Warning: this field is not always filled in (UFO)! */ |
| 241 | unsigned short gso_segs; | 249 | unsigned short gso_segs; |
| 242 | unsigned short gso_type; | 250 | unsigned short gso_type; |
| 243 | __be32 ip6_frag_id; | ||
| 244 | __u8 tx_flags; | ||
| 245 | struct sk_buff *frag_list; | 251 | struct sk_buff *frag_list; |
| 246 | struct skb_shared_hwtstamps hwtstamps; | 252 | struct skb_shared_hwtstamps hwtstamps; |
| 253 | __be32 ip6_frag_id; | ||
| 247 | 254 | ||
| 248 | /* | 255 | /* |
| 249 | * Warning : all fields before dataref are cleared in __alloc_skb() | 256 | * Warning : all fields before dataref are cleared in __alloc_skb() |
| @@ -352,6 +359,8 @@ typedef unsigned char *sk_buff_data_t; | |||
| 352 | * @ooo_okay: allow the mapping of a socket to a queue to be changed | 359 | * @ooo_okay: allow the mapping of a socket to a queue to be changed |
| 353 | * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport | 360 | * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport |
| 354 | * ports. | 361 | * ports. |
| 362 | * @wifi_acked_valid: wifi_acked was set | ||
| 363 | * @wifi_acked: whether frame was acked on wifi or not | ||
| 355 | * @dma_cookie: a cookie to one of several possible DMA operations | 364 | * @dma_cookie: a cookie to one of several possible DMA operations |
| 356 | * done by skb DMA functions | 365 | * done by skb DMA functions |
| 357 | * @secmark: security marking | 366 | * @secmark: security marking |
| @@ -445,10 +454,11 @@ struct sk_buff { | |||
| 445 | #endif | 454 | #endif |
| 446 | __u8 ooo_okay:1; | 455 | __u8 ooo_okay:1; |
| 447 | __u8 l4_rxhash:1; | 456 | __u8 l4_rxhash:1; |
| 457 | __u8 wifi_acked_valid:1; | ||
| 458 | __u8 wifi_acked:1; | ||
| 459 | /* 10/12 bit hole (depending on ndisc_nodetype presence) */ | ||
| 448 | kmemcheck_bitfield_end(flags2); | 460 | kmemcheck_bitfield_end(flags2); |
| 449 | 461 | ||
| 450 | /* 0/13 bit hole */ | ||
| 451 | |||
| 452 | #ifdef CONFIG_NET_DMA | 462 | #ifdef CONFIG_NET_DMA |
| 453 | dma_cookie_t dma_cookie; | 463 | dma_cookie_t dma_cookie; |
| 454 | #endif | 464 | #endif |
| @@ -540,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb); | |||
| 540 | extern void __kfree_skb(struct sk_buff *skb); | 550 | extern void __kfree_skb(struct sk_buff *skb); |
| 541 | extern struct sk_buff *__alloc_skb(unsigned int size, | 551 | extern struct sk_buff *__alloc_skb(unsigned int size, |
| 542 | gfp_t priority, int fclone, int node); | 552 | gfp_t priority, int fclone, int node); |
| 553 | extern struct sk_buff *build_skb(void *data); | ||
| 543 | static inline struct sk_buff *alloc_skb(unsigned int size, | 554 | static inline struct sk_buff *alloc_skb(unsigned int size, |
| 544 | gfp_t priority) | 555 | gfp_t priority) |
| 545 | { | 556 | { |
| @@ -561,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb, | |||
| 561 | gfp_t priority); | 572 | gfp_t priority); |
| 562 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, | 573 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, |
| 563 | gfp_t priority); | 574 | gfp_t priority); |
| 564 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, | 575 | extern struct sk_buff *__pskb_copy(struct sk_buff *skb, |
| 565 | gfp_t gfp_mask); | 576 | int headroom, gfp_t gfp_mask); |
| 577 | |||
| 566 | extern int pskb_expand_head(struct sk_buff *skb, | 578 | extern int pskb_expand_head(struct sk_buff *skb, |
| 567 | int nhead, int ntail, | 579 | int nhead, int ntail, |
| 568 | gfp_t gfp_mask); | 580 | gfp_t gfp_mask); |
| @@ -1662,38 +1674,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
| 1662 | } | 1674 | } |
| 1663 | 1675 | ||
| 1664 | /** | 1676 | /** |
| 1665 | * __netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
| 1666 | * @dev: network device to receive on | ||
| 1667 | * @gfp_mask: alloc_pages_node mask | ||
| 1668 | * | ||
| 1669 | * Allocate a new page. dev currently unused. | ||
| 1670 | * | ||
| 1671 | * %NULL is returned if there is no free memory. | ||
| 1672 | */ | ||
| 1673 | static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) | ||
| 1674 | { | ||
| 1675 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | /** | ||
| 1679 | * netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
| 1680 | * @dev: network device to receive on | ||
| 1681 | * | ||
| 1682 | * Allocate a new page. dev currently unused. | ||
| 1683 | * | ||
| 1684 | * %NULL is returned if there is no free memory. | ||
| 1685 | */ | ||
| 1686 | static inline struct page *netdev_alloc_page(struct net_device *dev) | ||
| 1687 | { | ||
| 1688 | return __netdev_alloc_page(dev, GFP_ATOMIC); | ||
| 1689 | } | ||
| 1690 | |||
| 1691 | static inline void netdev_free_page(struct net_device *dev, struct page *page) | ||
| 1692 | { | ||
| 1693 | __free_page(page); | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | /** | ||
| 1697 | * skb_frag_page - retrieve the page refered to by a paged fragment | 1677 | * skb_frag_page - retrieve the page refered to by a paged fragment |
| 1698 | * @frag: the paged fragment | 1678 | * @frag: the paged fragment |
| 1699 | * | 1679 | * |
| @@ -1824,6 +1804,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, | |||
| 1824 | frag->page_offset + offset, size, dir); | 1804 | frag->page_offset + offset, size, dir); |
| 1825 | } | 1805 | } |
| 1826 | 1806 | ||
| 1807 | static inline struct sk_buff *pskb_copy(struct sk_buff *skb, | ||
| 1808 | gfp_t gfp_mask) | ||
| 1809 | { | ||
| 1810 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); | ||
| 1811 | } | ||
| 1812 | |||
| 1827 | /** | 1813 | /** |
| 1828 | * skb_clone_writable - is the header of a clone writable | 1814 | * skb_clone_writable - is the header of a clone writable |
| 1829 | * @skb: buffer to check | 1815 | * @skb: buffer to check |
| @@ -2105,7 +2091,8 @@ extern void skb_split(struct sk_buff *skb, | |||
| 2105 | extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, | 2091 | extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, |
| 2106 | int shiftlen); | 2092 | int shiftlen); |
| 2107 | 2093 | ||
| 2108 | extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); | 2094 | extern struct sk_buff *skb_segment(struct sk_buff *skb, |
| 2095 | netdev_features_t features); | ||
| 2109 | 2096 | ||
| 2110 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 2097 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
| 2111 | int len, void *buffer) | 2098 | int len, void *buffer) |
| @@ -2263,6 +2250,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb) | |||
| 2263 | sw_tx_timestamp(skb); | 2250 | sw_tx_timestamp(skb); |
| 2264 | } | 2251 | } |
| 2265 | 2252 | ||
| 2253 | /** | ||
| 2254 | * skb_complete_wifi_ack - deliver skb with wifi status | ||
| 2255 | * | ||
| 2256 | * @skb: the original outgoing packet | ||
| 2257 | * @acked: ack status | ||
| 2258 | * | ||
| 2259 | */ | ||
| 2260 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); | ||
| 2261 | |||
| 2266 | extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); | 2262 | extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); |
| 2267 | extern __sum16 __skb_checksum_complete(struct sk_buff *skb); | 2263 | extern __sum16 __skb_checksum_complete(struct sk_buff *skb); |
| 2268 | 2264 | ||
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h new file mode 100644 index 000000000000..ce718cbce435 --- /dev/null +++ b/include/linux/smscphy.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #ifndef __LINUX_SMSCPHY_H__ | ||
| 2 | #define __LINUX_SMSCPHY_H__ | ||
| 3 | |||
| 4 | #define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ | ||
| 5 | #define MII_LAN83C185_IM 30 /* Interrupt Mask */ | ||
| 6 | #define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ | ||
| 7 | |||
| 8 | #define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ | ||
| 9 | #define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ | ||
| 10 | #define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ | ||
| 11 | #define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ | ||
| 12 | #define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ | ||
| 13 | #define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ | ||
| 14 | #define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ | ||
| 15 | |||
| 16 | #define MII_LAN83C185_ISF_INT_ALL (0x0e) | ||
| 17 | |||
| 18 | #define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ | ||
| 19 | (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ | ||
| 20 | MII_LAN83C185_ISF_INT7) | ||
| 21 | |||
| 22 | #define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ | ||
| 23 | #define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */ | ||
| 24 | |||
| 25 | #endif /* __LINUX_SMSCPHY_H__ */ | ||
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h new file mode 100644 index 000000000000..251729a47880 --- /dev/null +++ b/include/linux/sock_diag.h | |||
| @@ -0,0 +1,48 @@ | |||
| 1 | #ifndef __SOCK_DIAG_H__ | ||
| 2 | #define __SOCK_DIAG_H__ | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | #define SOCK_DIAG_BY_FAMILY 20 | ||
| 7 | |||
| 8 | struct sock_diag_req { | ||
| 9 | __u8 sdiag_family; | ||
| 10 | __u8 sdiag_protocol; | ||
| 11 | }; | ||
| 12 | |||
| 13 | enum { | ||
| 14 | SK_MEMINFO_RMEM_ALLOC, | ||
| 15 | SK_MEMINFO_RCVBUF, | ||
| 16 | SK_MEMINFO_WMEM_ALLOC, | ||
| 17 | SK_MEMINFO_SNDBUF, | ||
| 18 | SK_MEMINFO_FWD_ALLOC, | ||
| 19 | SK_MEMINFO_WMEM_QUEUED, | ||
| 20 | SK_MEMINFO_OPTMEM, | ||
| 21 | |||
| 22 | SK_MEMINFO_VARS, | ||
| 23 | }; | ||
| 24 | |||
| 25 | #ifdef __KERNEL__ | ||
| 26 | struct sk_buff; | ||
| 27 | struct nlmsghdr; | ||
| 28 | struct sock; | ||
| 29 | |||
| 30 | struct sock_diag_handler { | ||
| 31 | __u8 family; | ||
| 32 | int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); | ||
| 33 | }; | ||
| 34 | |||
| 35 | int sock_diag_register(struct sock_diag_handler *h); | ||
| 36 | void sock_diag_unregister(struct sock_diag_handler *h); | ||
| 37 | |||
| 38 | void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); | ||
| 39 | void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); | ||
| 40 | |||
| 41 | int sock_diag_check_cookie(void *sk, __u32 *cookie); | ||
| 42 | void sock_diag_save_cookie(void *sk, __u32 *cookie); | ||
| 43 | |||
| 44 | int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); | ||
| 45 | |||
| 46 | extern struct sock *sock_diag_nlsk; | ||
| 47 | #endif /* KERNEL */ | ||
| 48 | #endif | ||
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 58971e891f48..e1b005918bbb 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H |
| 29 | 29 | ||
| 30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
| 31 | #include <linux/rcupdate.h> | ||
| 31 | 32 | ||
| 32 | struct srcu_struct_array { | 33 | struct srcu_struct_array { |
| 33 | int c[2]; | 34 | int c[2]; |
| @@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |||
| 60 | __init_srcu_struct((sp), #sp, &__srcu_key); \ | 61 | __init_srcu_struct((sp), #sp, &__srcu_key); \ |
| 61 | }) | 62 | }) |
| 62 | 63 | ||
| 63 | # define srcu_read_acquire(sp) \ | ||
| 64 | lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
| 65 | # define srcu_read_release(sp) \ | ||
| 66 | lock_release(&(sp)->dep_map, 1, _THIS_IP_) | ||
| 67 | |||
| 68 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 64 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 69 | 65 | ||
| 70 | int init_srcu_struct(struct srcu_struct *sp); | 66 | int init_srcu_struct(struct srcu_struct *sp); |
| 71 | 67 | ||
| 72 | # define srcu_read_acquire(sp) do { } while (0) | ||
| 73 | # define srcu_read_release(sp) do { } while (0) | ||
| 74 | |||
| 75 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 68 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 76 | 69 | ||
| 77 | void cleanup_srcu_struct(struct srcu_struct *sp); | 70 | void cleanup_srcu_struct(struct srcu_struct *sp); |
| @@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
| 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 83 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
| 91 | * this assumes we are in an SRCU read-side critical section unless it can | 84 | * this assumes we are in an SRCU read-side critical section unless it can |
| 92 | * prove otherwise. | 85 | * prove otherwise. |
| 86 | * | ||
| 87 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | ||
| 88 | * and while lockdep is disabled. | ||
| 89 | * | ||
| 90 | * Note that if the CPU is in the idle loop from an RCU point of view | ||
| 91 | * (ie: that we are in the section between rcu_idle_enter() and | ||
| 92 | * rcu_idle_exit()) then srcu_read_lock_held() returns false even if | ||
| 93 | * the CPU did an srcu_read_lock(). The reason for this is that RCU | ||
| 94 | * ignores CPUs that are in such a section, considering these as in | ||
| 95 | * extended quiescent state, so such a CPU is effectively never in an | ||
| 96 | * RCU read-side critical section regardless of what RCU primitives it | ||
| 97 | * invokes. This state of affairs is required --- we need to keep an | ||
| 98 | * RCU-free window in idle where the CPU may possibly enter into low | ||
| 99 | * power mode. This way we can notice an extended quiescent state to | ||
| 100 | * other CPUs that started a grace period. Otherwise we would delay any | ||
| 101 | * grace period as long as we run in the idle task. | ||
| 93 | */ | 102 | */ |
| 94 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 103 | static inline int srcu_read_lock_held(struct srcu_struct *sp) |
| 95 | { | 104 | { |
| 96 | if (debug_locks) | 105 | if (rcu_is_cpu_idle()) |
| 97 | return lock_is_held(&sp->dep_map); | 106 | return 0; |
| 98 | return 1; | 107 | |
| 108 | if (!debug_lockdep_rcu_enabled()) | ||
| 109 | return 1; | ||
| 110 | |||
| 111 | return lock_is_held(&sp->dep_map); | ||
| 99 | } | 112 | } |
| 100 | 113 | ||
| 101 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 114 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| @@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
| 145 | * one way to indirectly wait on an SRCU grace period is to acquire | 158 | * one way to indirectly wait on an SRCU grace period is to acquire |
| 146 | * a mutex that is held elsewhere while calling synchronize_srcu() or | 159 | * a mutex that is held elsewhere while calling synchronize_srcu() or |
| 147 | * synchronize_srcu_expedited(). | 160 | * synchronize_srcu_expedited(). |
| 161 | * | ||
| 162 | * Note that srcu_read_lock() and the matching srcu_read_unlock() must | ||
| 163 | * occur in the same context, for example, it is illegal to invoke | ||
| 164 | * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() | ||
| 165 | * was invoked in process context. | ||
| 148 | */ | 166 | */ |
| 149 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 167 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
| 150 | { | 168 | { |
| 151 | int retval = __srcu_read_lock(sp); | 169 | int retval = __srcu_read_lock(sp); |
| 152 | 170 | ||
| 153 | srcu_read_acquire(sp); | 171 | rcu_lock_acquire(&(sp)->dep_map); |
| 154 | return retval; | 172 | return retval; |
| 155 | } | 173 | } |
| 156 | 174 | ||
| @@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | |||
| 164 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | 182 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 165 | __releases(sp) | 183 | __releases(sp) |
| 166 | { | 184 | { |
| 167 | srcu_read_release(sp); | 185 | rcu_lock_release(&(sp)->dep_map); |
| 186 | __srcu_read_unlock(sp, idx); | ||
| 187 | } | ||
| 188 | |||
| 189 | /** | ||
| 190 | * srcu_read_lock_raw - register a new reader for an SRCU-protected structure. | ||
| 191 | * @sp: srcu_struct in which to register the new reader. | ||
| 192 | * | ||
| 193 | * Enter an SRCU read-side critical section. Similar to srcu_read_lock(), | ||
| 194 | * but avoids the RCU-lockdep checking. This means that it is legal to | ||
| 195 | * use srcu_read_lock_raw() in one context, for example, in an exception | ||
| 196 | * handler, and then have the matching srcu_read_unlock_raw() in another | ||
| 197 | * context, for example in the task that took the exception. | ||
| 198 | * | ||
| 199 | * However, the entire SRCU read-side critical section must reside within a | ||
| 200 | * single task. For example, beware of using srcu_read_lock_raw() in | ||
| 201 | * a device interrupt handler and srcu_read_unlock() in the interrupted | ||
| 202 | * task: This will not work if interrupts are threaded. | ||
| 203 | */ | ||
| 204 | static inline int srcu_read_lock_raw(struct srcu_struct *sp) | ||
| 205 | { | ||
| 206 | unsigned long flags; | ||
| 207 | int ret; | ||
| 208 | |||
| 209 | local_irq_save(flags); | ||
| 210 | ret = __srcu_read_lock(sp); | ||
| 211 | local_irq_restore(flags); | ||
| 212 | return ret; | ||
| 213 | } | ||
| 214 | |||
| 215 | /** | ||
| 216 | * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure. | ||
| 217 | * @sp: srcu_struct in which to unregister the old reader. | ||
| 218 | * @idx: return value from corresponding srcu_read_lock_raw(). | ||
| 219 | * | ||
| 220 | * Exit an SRCU read-side critical section without lockdep-RCU checking. | ||
| 221 | * See srcu_read_lock_raw() for more details. | ||
| 222 | */ | ||
| 223 | static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) | ||
| 224 | { | ||
| 225 | unsigned long flags; | ||
| 226 | |||
| 227 | local_irq_save(flags); | ||
| 168 | __srcu_read_unlock(sp, idx); | 228 | __srcu_read_unlock(sp, idx); |
| 229 | local_irq_restore(flags); | ||
| 169 | } | 230 | } |
| 170 | 231 | ||
| 171 | #endif | 232 | #endif |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 061e560251b4..dcf35b0f303a 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
| @@ -94,6 +94,15 @@ struct ssb_sprom { | |||
| 94 | } ghz5; /* 5GHz band */ | 94 | } ghz5; /* 5GHz band */ |
| 95 | } antenna_gain; | 95 | } antenna_gain; |
| 96 | 96 | ||
| 97 | struct { | ||
| 98 | struct { | ||
| 99 | u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; | ||
| 100 | } ghz2; | ||
| 101 | struct { | ||
| 102 | u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; | ||
| 103 | } ghz5; | ||
| 104 | } fem; | ||
| 105 | |||
| 97 | /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ | 106 | /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ |
| 98 | }; | 107 | }; |
| 99 | 108 | ||
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 98941203a27f..c814ae6eeb22 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h | |||
| @@ -432,6 +432,23 @@ | |||
| 432 | #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ | 432 | #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ |
| 433 | #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ | 433 | #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ |
| 434 | #define SSB_SPROM8_RXPO5G_SHIFT 8 | 434 | #define SSB_SPROM8_RXPO5G_SHIFT 8 |
| 435 | #define SSB_SPROM8_FEM2G 0x00AE | ||
| 436 | #define SSB_SPROM8_FEM5G 0x00B0 | ||
| 437 | #define SSB_SROM8_FEM_TSSIPOS 0x0001 | ||
| 438 | #define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 | ||
| 439 | #define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 | ||
| 440 | #define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 | ||
| 441 | #define SSB_SROM8_FEM_PDET_RANGE 0x00F8 | ||
| 442 | #define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 | ||
| 443 | #define SSB_SROM8_FEM_TR_ISO 0x0700 | ||
| 444 | #define SSB_SROM8_FEM_TR_ISO_SHIFT 8 | ||
| 445 | #define SSB_SROM8_FEM_ANTSWLUT 0xF800 | ||
| 446 | #define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 | ||
| 447 | #define SSB_SPROM8_THERMAL 0x00B2 | ||
| 448 | #define SSB_SPROM8_MPWR_RAWTS 0x00B4 | ||
| 449 | #define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6 | ||
| 450 | #define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8 | ||
| 451 | #define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA | ||
| 435 | #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ | 452 | #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ |
| 436 | #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ | 453 | #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ |
| 437 | #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ | 454 | #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 3d8f9c44e27d..2c5993a17c33 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst, | |||
| 215 | return true; | 215 | return true; |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 218 | #if IS_ENABLED(CONFIG_IPV6) |
| 219 | static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, | 219 | static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, |
| 220 | const struct sockaddr *sap2) | 220 | const struct sockaddr *sap2) |
| 221 | { | 221 | { |
| @@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, | |||
| 237 | struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; | 237 | struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; |
| 238 | 238 | ||
| 239 | dsin6->sin6_family = ssin6->sin6_family; | 239 | dsin6->sin6_family = ssin6->sin6_family; |
| 240 | ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); | 240 | dsin6->sin6_addr = ssin6->sin6_addr; |
| 241 | return true; | 241 | return true; |
| 242 | } | 242 | } |
| 243 | #else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ | 243 | #else /* !(IS_ENABLED(CONFIG_IPV6) */ |
| 244 | static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, | 244 | static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, |
| 245 | const struct sockaddr *sap2) | 245 | const struct sockaddr *sap2) |
| 246 | { | 246 | { |
| @@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, | |||
| 252 | { | 252 | { |
| 253 | return false; | 253 | return false; |
| 254 | } | 254 | } |
| 255 | #endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ | 255 | #endif /* !(IS_ENABLED(CONFIG_IPV6) */ |
| 256 | 256 | ||
| 257 | /** | 257 | /** |
| 258 | * rpc_cmp_addr - compare the address portion of two sockaddrs. | 258 | * rpc_cmp_addr - compare the address portion of two sockaddrs. |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 7f59ee946983..46a85c9e1f25 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -238,6 +238,11 @@ struct tcp_sack_block { | |||
| 238 | u32 end_seq; | 238 | u32 end_seq; |
| 239 | }; | 239 | }; |
| 240 | 240 | ||
| 241 | /*These are used to set the sack_ok field in struct tcp_options_received */ | ||
| 242 | #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ | ||
| 243 | #define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/ | ||
| 244 | #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ | ||
| 245 | |||
| 241 | struct tcp_options_received { | 246 | struct tcp_options_received { |
| 242 | /* PAWS/RTTM data */ | 247 | /* PAWS/RTTM data */ |
| 243 | long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ | 248 | long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ |
diff --git a/include/linux/tick.h b/include/linux/tick.h index b232ccc0ee29..ab8be90b5cc9 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #define _LINUX_TICK_H | 7 | #define _LINUX_TICK_H |
| 8 | 8 | ||
| 9 | #include <linux/clockchips.h> | 9 | #include <linux/clockchips.h> |
| 10 | #include <linux/irqflags.h> | ||
| 10 | 11 | ||
| 11 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 12 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 12 | 13 | ||
| @@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; } | |||
| 121 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ | 122 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ |
| 122 | 123 | ||
| 123 | # ifdef CONFIG_NO_HZ | 124 | # ifdef CONFIG_NO_HZ |
| 124 | extern void tick_nohz_stop_sched_tick(int inidle); | 125 | extern void tick_nohz_idle_enter(void); |
| 125 | extern void tick_nohz_restart_sched_tick(void); | 126 | extern void tick_nohz_idle_exit(void); |
| 127 | extern void tick_nohz_irq_exit(void); | ||
| 126 | extern ktime_t tick_nohz_get_sleep_length(void); | 128 | extern ktime_t tick_nohz_get_sleep_length(void); |
| 127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 129 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); |
| 128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | 130 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); |
| 129 | # else | 131 | # else |
| 130 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 132 | static inline void tick_nohz_idle_enter(void) { } |
| 131 | static inline void tick_nohz_restart_sched_tick(void) { } | 133 | static inline void tick_nohz_idle_exit(void) { } |
| 134 | |||
| 132 | static inline ktime_t tick_nohz_get_sleep_length(void) | 135 | static inline ktime_t tick_nohz_get_sleep_length(void) |
| 133 | { | 136 | { |
| 134 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; | 137 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; |
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h new file mode 100644 index 000000000000..b1d2bf16b33c --- /dev/null +++ b/include/linux/unix_diag.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | #ifndef __UNIX_DIAG_H__ | ||
| 2 | #define __UNIX_DIAG_H__ | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | struct unix_diag_req { | ||
| 7 | __u8 sdiag_family; | ||
| 8 | __u8 sdiag_protocol; | ||
| 9 | __u16 pad; | ||
| 10 | __u32 udiag_states; | ||
| 11 | __u32 udiag_ino; | ||
| 12 | __u32 udiag_show; | ||
| 13 | __u32 udiag_cookie[2]; | ||
| 14 | }; | ||
| 15 | |||
| 16 | #define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */ | ||
| 17 | #define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */ | ||
| 18 | #define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */ | ||
| 19 | #define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */ | ||
| 20 | #define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */ | ||
| 21 | #define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */ | ||
| 22 | |||
| 23 | struct unix_diag_msg { | ||
| 24 | __u8 udiag_family; | ||
| 25 | __u8 udiag_type; | ||
| 26 | __u8 udiag_state; | ||
| 27 | __u8 pad; | ||
| 28 | |||
| 29 | __u32 udiag_ino; | ||
| 30 | __u32 udiag_cookie[2]; | ||
| 31 | }; | ||
| 32 | |||
| 33 | enum { | ||
| 34 | UNIX_DIAG_NAME, | ||
| 35 | UNIX_DIAG_VFS, | ||
| 36 | UNIX_DIAG_PEER, | ||
| 37 | UNIX_DIAG_ICONS, | ||
| 38 | UNIX_DIAG_RQLEN, | ||
| 39 | UNIX_DIAG_MEMINFO, | ||
| 40 | |||
| 41 | UNIX_DIAG_MAX, | ||
| 42 | }; | ||
| 43 | |||
| 44 | struct unix_diag_vfs { | ||
| 45 | __u32 udiag_vfs_ino; | ||
| 46 | __u32 udiag_vfs_dev; | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct unix_diag_rqlen { | ||
| 50 | __u32 udiag_rqueue; | ||
| 51 | __u32 udiag_wqueue; | ||
| 52 | }; | ||
| 53 | |||
| 54 | #endif | ||
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e9e72bda1b72..5206d6541da5 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -102,6 +102,10 @@ | |||
| 102 | * vdev: the virtio_device | 102 | * vdev: the virtio_device |
| 103 | * This gives the final feature bits for the device: it can change | 103 | * This gives the final feature bits for the device: it can change |
| 104 | * the dev->feature bits if it wants. | 104 | * the dev->feature bits if it wants. |
| 105 | * @bus_name: return the bus name associated with the device | ||
| 106 | * vdev: the virtio_device | ||
| 107 | * This returns a pointer to the bus name a la pci_name from which | ||
| 108 | * the caller can then copy. | ||
| 105 | */ | 109 | */ |
| 106 | typedef void vq_callback_t(struct virtqueue *); | 110 | typedef void vq_callback_t(struct virtqueue *); |
| 107 | struct virtio_config_ops { | 111 | struct virtio_config_ops { |
| @@ -119,6 +123,7 @@ struct virtio_config_ops { | |||
| 119 | void (*del_vqs)(struct virtio_device *); | 123 | void (*del_vqs)(struct virtio_device *); |
| 120 | u32 (*get_features)(struct virtio_device *vdev); | 124 | u32 (*get_features)(struct virtio_device *vdev); |
| 121 | void (*finalize_features)(struct virtio_device *vdev); | 125 | void (*finalize_features)(struct virtio_device *vdev); |
| 126 | const char *(*bus_name)(struct virtio_device *vdev); | ||
| 122 | }; | 127 | }; |
| 123 | 128 | ||
| 124 | /* If driver didn't advertise the feature, it will never appear. */ | 129 | /* If driver didn't advertise the feature, it will never appear. */ |
| @@ -184,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, | |||
| 184 | return ERR_PTR(err); | 189 | return ERR_PTR(err); |
| 185 | return vq; | 190 | return vq; |
| 186 | } | 191 | } |
| 192 | |||
| 193 | static inline | ||
| 194 | const char *virtio_bus_name(struct virtio_device *vdev) | ||
| 195 | { | ||
| 196 | if (!vdev->config->bus_name) | ||
| 197 | return "virtio"; | ||
| 198 | return vdev->config->bus_name(vdev); | ||
| 199 | } | ||
| 200 | |||
| 187 | #endif /* __KERNEL__ */ | 201 | #endif /* __KERNEL__ */ |
| 188 | #endif /* _LINUX_VIRTIO_CONFIG_H */ | 202 | #endif /* _LINUX_VIRTIO_CONFIG_H */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4bde182fcf93..dcdfc2bda922 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
| @@ -131,6 +131,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count); | |||
| 131 | */ | 131 | */ |
| 132 | extern rwlock_t vmlist_lock; | 132 | extern rwlock_t vmlist_lock; |
| 133 | extern struct vm_struct *vmlist; | 133 | extern struct vm_struct *vmlist; |
| 134 | extern __init void vm_area_add_early(struct vm_struct *vm); | ||
| 134 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | 135 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
| 135 | 136 | ||
| 136 | #ifdef CONFIG_SMP | 137 | #ifdef CONFIG_SMP |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 3efc9f3f43a0..a9ce45e8501c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -77,13 +77,13 @@ struct task_struct; | |||
| 77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ | 77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ |
| 78 | { .flags = word, .bit_nr = bit, } | 78 | { .flags = word, .bit_nr = bit, } |
| 79 | 79 | ||
| 80 | extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); | 80 | extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); |
| 81 | 81 | ||
| 82 | #define init_waitqueue_head(q) \ | 82 | #define init_waitqueue_head(q) \ |
| 83 | do { \ | 83 | do { \ |
| 84 | static struct lock_class_key __key; \ | 84 | static struct lock_class_key __key; \ |
| 85 | \ | 85 | \ |
| 86 | __init_waitqueue_head((q), &__key); \ | 86 | __init_waitqueue_head((q), #q, &__key); \ |
| 87 | } while (0) | 87 | } while (0) |
| 88 | 88 | ||
| 89 | #ifdef CONFIG_LOCKDEP | 89 | #ifdef CONFIG_LOCKDEP |
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h index 4b697395326e..0d6373195d32 100644 --- a/include/linux/wl12xx.h +++ b/include/linux/wl12xx.h | |||
| @@ -54,6 +54,9 @@ struct wl12xx_platform_data { | |||
| 54 | int board_ref_clock; | 54 | int board_ref_clock; |
| 55 | int board_tcxo_clock; | 55 | int board_tcxo_clock; |
| 56 | unsigned long platform_quirks; | 56 | unsigned long platform_quirks; |
| 57 | bool pwr_in_suspend; | ||
| 58 | |||
| 59 | struct wl1271_if_operations *ops; | ||
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| 59 | /* Platform does not support level trigger interrupts */ | 62 | /* Platform does not support level trigger interrupts */ |
| @@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) | |||
| 73 | 76 | ||
| 74 | #endif | 77 | #endif |
| 75 | 78 | ||
| 76 | const struct wl12xx_platform_data *wl12xx_get_platform_data(void); | 79 | struct wl12xx_platform_data *wl12xx_get_platform_data(void); |
| 77 | 80 | ||
| 78 | #endif | 81 | #endif |
